virtio_pci_legacy.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * Virtio PCI driver - legacy device support
  3. *
  4. * This module allows virtio devices to be used over a virtual PCI device.
  5. * This can be used with QEMU based VMMs like KVM or Xen.
  6. *
  7. * Copyright IBM Corp. 2007
  8. * Copyright Red Hat, Inc. 2014
  9. *
  10. * Authors:
  11. * Anthony Liguori <aliguori@us.ibm.com>
  12. * Rusty Russell <rusty@rustcorp.com.au>
  13. * Michael S. Tsirkin <mst@redhat.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  16. * See the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "virtio_pci_common.h"
  20. /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
  21. static const struct pci_device_id virtio_pci_id_table[] = {
  22. { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
  23. { 0 }
  24. };
  25. MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
  26. /* virtio config->get_features() implementation */
  27. static u64 vp_get_features(struct virtio_device *vdev)
  28. {
  29. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  30. /* When someone needs more than 32 feature bits, we'll need to
  31. * steal a bit to indicate that the rest are somewhere else. */
  32. return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
  33. }
  34. /* virtio config->finalize_features() implementation */
  35. static int vp_finalize_features(struct virtio_device *vdev)
  36. {
  37. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  38. /* Give virtio_ring a chance to accept features. */
  39. vring_transport_features(vdev);
  40. /* Make sure we don't have any features > 32 bits! */
  41. BUG_ON((u32)vdev->features != vdev->features);
  42. /* We only support 32 feature bits. */
  43. iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
  44. return 0;
  45. }
  46. /* virtio config->get() implementation */
  47. static void vp_get(struct virtio_device *vdev, unsigned offset,
  48. void *buf, unsigned len)
  49. {
  50. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  51. void __iomem *ioaddr = vp_dev->ioaddr +
  52. VIRTIO_PCI_CONFIG(vp_dev) + offset;
  53. u8 *ptr = buf;
  54. int i;
  55. for (i = 0; i < len; i++)
  56. ptr[i] = ioread8(ioaddr + i);
  57. }
  58. /* the config->set() implementation. it's symmetric to the config->get()
  59. * implementation */
  60. static void vp_set(struct virtio_device *vdev, unsigned offset,
  61. const void *buf, unsigned len)
  62. {
  63. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  64. void __iomem *ioaddr = vp_dev->ioaddr +
  65. VIRTIO_PCI_CONFIG(vp_dev) + offset;
  66. const u8 *ptr = buf;
  67. int i;
  68. for (i = 0; i < len; i++)
  69. iowrite8(ptr[i], ioaddr + i);
  70. }
  71. /* config->{get,set}_status() implementations */
  72. static u8 vp_get_status(struct virtio_device *vdev)
  73. {
  74. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  75. return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  76. }
  77. static void vp_set_status(struct virtio_device *vdev, u8 status)
  78. {
  79. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  80. /* We should never be setting status to 0. */
  81. BUG_ON(status == 0);
  82. iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  83. }
  84. static void vp_reset(struct virtio_device *vdev)
  85. {
  86. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  87. /* 0 status means a reset. */
  88. iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  89. /* Flush out the status write, and flush in device writes,
  90. * including MSi-X interrupts, if any. */
  91. ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  92. /* Flush pending VQ/configuration callbacks. */
  93. vp_synchronize_vectors(vdev);
  94. }
  95. static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  96. {
  97. /* Setup the vector used for configuration events */
  98. iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  99. /* Verify we had enough resources to assign the vector */
  100. /* Will also flush the write out to device */
  101. return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  102. }
  103. static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
  104. struct virtio_pci_vq_info *info,
  105. unsigned index,
  106. void (*callback)(struct virtqueue *vq),
  107. const char *name,
  108. u16 msix_vec)
  109. {
  110. struct virtqueue *vq;
  111. unsigned long size;
  112. u16 num;
  113. int err;
  114. /* Select the queue we're interested in */
  115. iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  116. /* Check if queue is either not available or already active. */
  117. num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
  118. if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
  119. return ERR_PTR(-ENOENT);
  120. info->num = num;
  121. info->msix_vector = msix_vec;
  122. size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
  123. info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
  124. if (info->queue == NULL)
  125. return ERR_PTR(-ENOMEM);
  126. /* activate the queue */
  127. iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
  128. vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  129. /* create the vring */
  130. vq = vring_new_virtqueue(index, info->num,
  131. VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
  132. true, info->queue, vp_notify, callback, name);
  133. if (!vq) {
  134. err = -ENOMEM;
  135. goto out_activate_queue;
  136. }
  137. vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  138. if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
  139. iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  140. msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  141. if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  142. err = -EBUSY;
  143. goto out_assign;
  144. }
  145. }
  146. return vq;
  147. out_assign:
  148. vring_del_virtqueue(vq);
  149. out_activate_queue:
  150. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  151. free_pages_exact(info->queue, size);
  152. return ERR_PTR(err);
  153. }
  154. static void del_vq(struct virtio_pci_vq_info *info)
  155. {
  156. struct virtqueue *vq = info->vq;
  157. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  158. unsigned long size;
  159. iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  160. if (vp_dev->msix_enabled) {
  161. iowrite16(VIRTIO_MSI_NO_VECTOR,
  162. vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  163. /* Flush the write out to device */
  164. ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
  165. }
  166. vring_del_virtqueue(vq);
  167. /* Select and deactivate the queue */
  168. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  169. size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
  170. free_pages_exact(info->queue, size);
  171. }
  172. static const struct virtio_config_ops virtio_pci_config_ops = {
  173. .get = vp_get,
  174. .set = vp_set,
  175. .get_status = vp_get_status,
  176. .set_status = vp_set_status,
  177. .reset = vp_reset,
  178. .find_vqs = vp_find_vqs,
  179. .del_vqs = vp_del_vqs,
  180. .get_features = vp_get_features,
  181. .finalize_features = vp_finalize_features,
  182. .bus_name = vp_bus_name,
  183. .set_vq_affinity = vp_set_vq_affinity,
  184. };
  185. /* the PCI probing function */
  186. static int virtio_pci_probe(struct pci_dev *pci_dev,
  187. const struct pci_device_id *id)
  188. {
  189. struct virtio_pci_device *vp_dev;
  190. int err;
  191. /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
  192. if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
  193. return -ENODEV;
  194. if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
  195. printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
  196. VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
  197. return -ENODEV;
  198. }
  199. /* allocate our structure and fill it out */
  200. vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
  201. if (vp_dev == NULL)
  202. return -ENOMEM;
  203. vp_dev->vdev.dev.parent = &pci_dev->dev;
  204. vp_dev->vdev.dev.release = virtio_pci_release_dev;
  205. vp_dev->vdev.config = &virtio_pci_config_ops;
  206. vp_dev->pci_dev = pci_dev;
  207. INIT_LIST_HEAD(&vp_dev->virtqueues);
  208. spin_lock_init(&vp_dev->lock);
  209. /* Disable MSI/MSIX to bring device to a known good state. */
  210. pci_msi_off(pci_dev);
  211. /* enable the device */
  212. err = pci_enable_device(pci_dev);
  213. if (err)
  214. goto out;
  215. err = pci_request_regions(pci_dev, "virtio-pci");
  216. if (err)
  217. goto out_enable_device;
  218. vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
  219. if (vp_dev->ioaddr == NULL) {
  220. err = -ENOMEM;
  221. goto out_req_regions;
  222. }
  223. vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
  224. pci_set_drvdata(pci_dev, vp_dev);
  225. pci_set_master(pci_dev);
  226. /* we use the subsystem vendor/device id as the virtio vendor/device
  227. * id. this allows us to use the same PCI vendor/device id for all
  228. * virtio devices and to identify the particular virtio driver by
  229. * the subsystem ids */
  230. vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
  231. vp_dev->vdev.id.device = pci_dev->subsystem_device;
  232. vp_dev->config_vector = vp_config_vector;
  233. vp_dev->setup_vq = setup_vq;
  234. vp_dev->del_vq = del_vq;
  235. /* finally register the virtio device */
  236. err = register_virtio_device(&vp_dev->vdev);
  237. if (err)
  238. goto out_set_drvdata;
  239. return 0;
  240. out_set_drvdata:
  241. pci_iounmap(pci_dev, vp_dev->ioaddr);
  242. out_req_regions:
  243. pci_release_regions(pci_dev);
  244. out_enable_device:
  245. pci_disable_device(pci_dev);
  246. out:
  247. kfree(vp_dev);
  248. return err;
  249. }
  250. static void virtio_pci_remove(struct pci_dev *pci_dev)
  251. {
  252. struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
  253. unregister_virtio_device(&vp_dev->vdev);
  254. vp_del_vqs(&vp_dev->vdev);
  255. pci_iounmap(pci_dev, vp_dev->ioaddr);
  256. pci_release_regions(pci_dev);
  257. pci_disable_device(pci_dev);
  258. kfree(vp_dev);
  259. }
  260. static struct pci_driver virtio_pci_driver = {
  261. .name = "virtio-pci",
  262. .id_table = virtio_pci_id_table,
  263. .probe = virtio_pci_probe,
  264. .remove = virtio_pci_remove,
  265. #ifdef CONFIG_PM_SLEEP
  266. .driver.pm = &virtio_pci_pm_ops,
  267. #endif
  268. };
  269. module_pci_driver(virtio_pci_driver);