coalesced_mmio.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KVM coalesced MMIO
  4. *
  5. * Copyright (c) 2008 Bull S.A.S.
  6. * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  7. *
  8. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  9. *
  10. */
  11. #include <kvm/iodev.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/slab.h>
  14. #include <linux/kvm.h>
  15. #include "coalesced_mmio.h"
  16. static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
  17. {
  18. return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
  19. }
  20. static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
  21. gpa_t addr, int len)
  22. {
  23. /* is it in a batchable area ?
  24. * (addr,len) is fully included in
  25. * (zone->addr, zone->size)
  26. */
  27. if (len < 0)
  28. return 0;
  29. if (addr + len < addr)
  30. return 0;
  31. if (addr < dev->zone.addr)
  32. return 0;
  33. if (addr + len > dev->zone.addr + dev->zone.size)
  34. return 0;
  35. return 1;
  36. }
  37. static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
  38. {
  39. struct kvm_coalesced_mmio_ring *ring;
  40. unsigned avail;
  41. /* Are we able to batch it ? */
  42. /* last is the first free entry
  43. * check if we don't meet the first used entry
  44. * there is always one unused entry in the buffer
  45. */
  46. ring = dev->kvm->coalesced_mmio_ring;
  47. avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
  48. if (avail == 0) {
  49. /* full */
  50. return 0;
  51. }
  52. return 1;
  53. }
  54. static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
  55. struct kvm_io_device *this, gpa_t addr,
  56. int len, const void *val)
  57. {
  58. struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  59. struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
  60. if (!coalesced_mmio_in_range(dev, addr, len))
  61. return -EOPNOTSUPP;
  62. spin_lock(&dev->kvm->ring_lock);
  63. if (!coalesced_mmio_has_room(dev)) {
  64. spin_unlock(&dev->kvm->ring_lock);
  65. return -EOPNOTSUPP;
  66. }
  67. /* copy data in first free entry of the ring */
  68. ring->coalesced_mmio[ring->last].phys_addr = addr;
  69. ring->coalesced_mmio[ring->last].len = len;
  70. memcpy(ring->coalesced_mmio[ring->last].data, val, len);
  71. ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
  72. smp_wmb();
  73. ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
  74. spin_unlock(&dev->kvm->ring_lock);
  75. return 0;
  76. }
  77. static void coalesced_mmio_destructor(struct kvm_io_device *this)
  78. {
  79. struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
  80. list_del(&dev->list);
  81. kfree(dev);
  82. }
  83. static const struct kvm_io_device_ops coalesced_mmio_ops = {
  84. .write = coalesced_mmio_write,
  85. .destructor = coalesced_mmio_destructor,
  86. };
  87. int kvm_coalesced_mmio_init(struct kvm *kvm)
  88. {
  89. struct page *page;
  90. int ret;
  91. ret = -ENOMEM;
  92. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  93. if (!page)
  94. goto out_err;
  95. ret = 0;
  96. kvm->coalesced_mmio_ring = page_address(page);
  97. /*
  98. * We're using this spinlock to sync access to the coalesced ring.
  99. * The list doesn't need it's own lock since device registration and
  100. * unregistration should only happen when kvm->slots_lock is held.
  101. */
  102. spin_lock_init(&kvm->ring_lock);
  103. INIT_LIST_HEAD(&kvm->coalesced_zones);
  104. out_err:
  105. return ret;
  106. }
  107. void kvm_coalesced_mmio_free(struct kvm *kvm)
  108. {
  109. if (kvm->coalesced_mmio_ring)
  110. free_page((unsigned long)kvm->coalesced_mmio_ring);
  111. }
  112. int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
  113. struct kvm_coalesced_mmio_zone *zone)
  114. {
  115. int ret;
  116. struct kvm_coalesced_mmio_dev *dev;
  117. if (zone->pio != 1 && zone->pio != 0)
  118. return -EINVAL;
  119. dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
  120. if (!dev)
  121. return -ENOMEM;
  122. kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
  123. dev->kvm = kvm;
  124. dev->zone = *zone;
  125. mutex_lock(&kvm->slots_lock);
  126. ret = kvm_io_bus_register_dev(kvm,
  127. zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
  128. zone->addr, zone->size, &dev->dev);
  129. if (ret < 0)
  130. goto out_free_dev;
  131. list_add_tail(&dev->list, &kvm->coalesced_zones);
  132. mutex_unlock(&kvm->slots_lock);
  133. return 0;
  134. out_free_dev:
  135. mutex_unlock(&kvm->slots_lock);
  136. kfree(dev);
  137. return ret;
  138. }
  139. int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
  140. struct kvm_coalesced_mmio_zone *zone)
  141. {
  142. struct kvm_coalesced_mmio_dev *dev, *tmp;
  143. mutex_lock(&kvm->slots_lock);
  144. list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
  145. if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
  146. kvm_io_bus_unregister_dev(kvm,
  147. zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
  148. kvm_iodevice_destructor(&dev->dev);
  149. }
  150. mutex_unlock(&kvm->slots_lock);
  151. return 0;
  152. }