vfio.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * VFIO-KVM bridge pseudo device
  3. *
  4. * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/file.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/list.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/vfio.h>
  20. struct kvm_vfio_group {
  21. struct list_head node;
  22. struct vfio_group *vfio_group;
  23. };
  24. struct kvm_vfio {
  25. struct list_head group_list;
  26. struct mutex lock;
  27. bool noncoherent;
  28. };
  29. static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
  30. {
  31. struct vfio_group *vfio_group;
  32. struct vfio_group *(*fn)(struct file *);
  33. fn = symbol_get(vfio_group_get_external_user);
  34. if (!fn)
  35. return ERR_PTR(-EINVAL);
  36. vfio_group = fn(filep);
  37. symbol_put(vfio_group_get_external_user);
  38. return vfio_group;
  39. }
  40. static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
  41. {
  42. void (*fn)(struct vfio_group *);
  43. fn = symbol_get(vfio_group_put_external_user);
  44. if (!fn)
  45. return;
  46. fn(vfio_group);
  47. symbol_put(vfio_group_put_external_user);
  48. }
  49. static bool kvm_vfio_group_is_coherent(struct vfio_group *vfio_group)
  50. {
  51. long (*fn)(struct vfio_group *, unsigned long);
  52. long ret;
  53. fn = symbol_get(vfio_external_check_extension);
  54. if (!fn)
  55. return false;
  56. ret = fn(vfio_group, VFIO_DMA_CC_IOMMU);
  57. symbol_put(vfio_external_check_extension);
  58. return ret > 0;
  59. }
  60. /*
  61. * Groups can use the same or different IOMMU domains. If the same then
  62. * adding a new group may change the coherency of groups we've previously
  63. * been told about. We don't want to care about any of that so we retest
  64. * each group and bail as soon as we find one that's noncoherent. This
  65. * means we only ever [un]register_noncoherent_dma once for the whole device.
  66. */
  67. static void kvm_vfio_update_coherency(struct kvm_device *dev)
  68. {
  69. struct kvm_vfio *kv = dev->private;
  70. bool noncoherent = false;
  71. struct kvm_vfio_group *kvg;
  72. mutex_lock(&kv->lock);
  73. list_for_each_entry(kvg, &kv->group_list, node) {
  74. if (!kvm_vfio_group_is_coherent(kvg->vfio_group)) {
  75. noncoherent = true;
  76. break;
  77. }
  78. }
  79. if (noncoherent != kv->noncoherent) {
  80. kv->noncoherent = noncoherent;
  81. if (kv->noncoherent)
  82. kvm_arch_register_noncoherent_dma(dev->kvm);
  83. else
  84. kvm_arch_unregister_noncoherent_dma(dev->kvm);
  85. }
  86. mutex_unlock(&kv->lock);
  87. }
  88. static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
  89. {
  90. struct kvm_vfio *kv = dev->private;
  91. struct vfio_group *vfio_group;
  92. struct kvm_vfio_group *kvg;
  93. int32_t __user *argp = (int32_t __user *)(unsigned long)arg;
  94. struct fd f;
  95. int32_t fd;
  96. int ret;
  97. switch (attr) {
  98. case KVM_DEV_VFIO_GROUP_ADD:
  99. if (get_user(fd, argp))
  100. return -EFAULT;
  101. f = fdget(fd);
  102. if (!f.file)
  103. return -EBADF;
  104. vfio_group = kvm_vfio_group_get_external_user(f.file);
  105. fdput(f);
  106. if (IS_ERR(vfio_group))
  107. return PTR_ERR(vfio_group);
  108. mutex_lock(&kv->lock);
  109. list_for_each_entry(kvg, &kv->group_list, node) {
  110. if (kvg->vfio_group == vfio_group) {
  111. mutex_unlock(&kv->lock);
  112. kvm_vfio_group_put_external_user(vfio_group);
  113. return -EEXIST;
  114. }
  115. }
  116. kvg = kzalloc(sizeof(*kvg), GFP_KERNEL);
  117. if (!kvg) {
  118. mutex_unlock(&kv->lock);
  119. kvm_vfio_group_put_external_user(vfio_group);
  120. return -ENOMEM;
  121. }
  122. list_add_tail(&kvg->node, &kv->group_list);
  123. kvg->vfio_group = vfio_group;
  124. mutex_unlock(&kv->lock);
  125. kvm_vfio_update_coherency(dev);
  126. return 0;
  127. case KVM_DEV_VFIO_GROUP_DEL:
  128. if (get_user(fd, argp))
  129. return -EFAULT;
  130. f = fdget(fd);
  131. if (!f.file)
  132. return -EBADF;
  133. vfio_group = kvm_vfio_group_get_external_user(f.file);
  134. fdput(f);
  135. if (IS_ERR(vfio_group))
  136. return PTR_ERR(vfio_group);
  137. ret = -ENOENT;
  138. mutex_lock(&kv->lock);
  139. list_for_each_entry(kvg, &kv->group_list, node) {
  140. if (kvg->vfio_group != vfio_group)
  141. continue;
  142. list_del(&kvg->node);
  143. kvm_vfio_group_put_external_user(kvg->vfio_group);
  144. kfree(kvg);
  145. ret = 0;
  146. break;
  147. }
  148. mutex_unlock(&kv->lock);
  149. kvm_vfio_group_put_external_user(vfio_group);
  150. kvm_vfio_update_coherency(dev);
  151. return ret;
  152. }
  153. return -ENXIO;
  154. }
  155. static int kvm_vfio_set_attr(struct kvm_device *dev,
  156. struct kvm_device_attr *attr)
  157. {
  158. switch (attr->group) {
  159. case KVM_DEV_VFIO_GROUP:
  160. return kvm_vfio_set_group(dev, attr->attr, attr->addr);
  161. }
  162. return -ENXIO;
  163. }
  164. static int kvm_vfio_has_attr(struct kvm_device *dev,
  165. struct kvm_device_attr *attr)
  166. {
  167. switch (attr->group) {
  168. case KVM_DEV_VFIO_GROUP:
  169. switch (attr->attr) {
  170. case KVM_DEV_VFIO_GROUP_ADD:
  171. case KVM_DEV_VFIO_GROUP_DEL:
  172. return 0;
  173. }
  174. break;
  175. }
  176. return -ENXIO;
  177. }
  178. static void kvm_vfio_destroy(struct kvm_device *dev)
  179. {
  180. struct kvm_vfio *kv = dev->private;
  181. struct kvm_vfio_group *kvg, *tmp;
  182. list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
  183. kvm_vfio_group_put_external_user(kvg->vfio_group);
  184. list_del(&kvg->node);
  185. kfree(kvg);
  186. }
  187. kvm_vfio_update_coherency(dev);
  188. kfree(kv);
  189. kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
  190. }
  191. static int kvm_vfio_create(struct kvm_device *dev, u32 type)
  192. {
  193. struct kvm_device *tmp;
  194. struct kvm_vfio *kv;
  195. /* Only one VFIO "device" per VM */
  196. list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
  197. if (tmp->ops == &kvm_vfio_ops)
  198. return -EBUSY;
  199. kv = kzalloc(sizeof(*kv), GFP_KERNEL);
  200. if (!kv)
  201. return -ENOMEM;
  202. INIT_LIST_HEAD(&kv->group_list);
  203. mutex_init(&kv->lock);
  204. dev->private = kv;
  205. return 0;
  206. }
  207. struct kvm_device_ops kvm_vfio_ops = {
  208. .name = "kvm-vfio",
  209. .create = kvm_vfio_create,
  210. .destroy = kvm_vfio_destroy,
  211. .set_attr = kvm_vfio_set_attr,
  212. .has_attr = kvm_vfio_has_attr,
  213. };