privcmd-buf.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. // SPDX-License-Identifier: GPL-2.0 OR MIT
  2. /******************************************************************************
  3. * privcmd-buf.c
  4. *
  5. * Mmap of hypercall buffers.
  6. *
  7. * Copyright (c) 2018 Juergen Gross
  8. */
  9. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include "privcmd.h"
  17. MODULE_LICENSE("GPL");
  18. static unsigned int limit = 64;
  19. module_param(limit, uint, 0644);
  20. MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
  21. "the privcmd-buf device per open file");
  22. struct privcmd_buf_private {
  23. struct mutex lock;
  24. struct list_head list;
  25. unsigned int allocated;
  26. };
  27. struct privcmd_buf_vma_private {
  28. struct privcmd_buf_private *file_priv;
  29. struct list_head list;
  30. unsigned int users;
  31. unsigned int n_pages;
  32. struct page *pages[];
  33. };
  34. static int privcmd_buf_open(struct inode *ino, struct file *file)
  35. {
  36. struct privcmd_buf_private *file_priv;
  37. file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  38. if (!file_priv)
  39. return -ENOMEM;
  40. mutex_init(&file_priv->lock);
  41. INIT_LIST_HEAD(&file_priv->list);
  42. file->private_data = file_priv;
  43. return 0;
  44. }
  45. static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
  46. {
  47. unsigned int i;
  48. vma_priv->file_priv->allocated -= vma_priv->n_pages;
  49. list_del(&vma_priv->list);
  50. for (i = 0; i < vma_priv->n_pages; i++)
  51. if (vma_priv->pages[i])
  52. __free_page(vma_priv->pages[i]);
  53. kfree(vma_priv);
  54. }
  55. static int privcmd_buf_release(struct inode *ino, struct file *file)
  56. {
  57. struct privcmd_buf_private *file_priv = file->private_data;
  58. struct privcmd_buf_vma_private *vma_priv;
  59. mutex_lock(&file_priv->lock);
  60. while (!list_empty(&file_priv->list)) {
  61. vma_priv = list_first_entry(&file_priv->list,
  62. struct privcmd_buf_vma_private,
  63. list);
  64. privcmd_buf_vmapriv_free(vma_priv);
  65. }
  66. mutex_unlock(&file_priv->lock);
  67. kfree(file_priv);
  68. return 0;
  69. }
  70. static void privcmd_buf_vma_open(struct vm_area_struct *vma)
  71. {
  72. struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
  73. if (!vma_priv)
  74. return;
  75. mutex_lock(&vma_priv->file_priv->lock);
  76. vma_priv->users++;
  77. mutex_unlock(&vma_priv->file_priv->lock);
  78. }
  79. static void privcmd_buf_vma_close(struct vm_area_struct *vma)
  80. {
  81. struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
  82. struct privcmd_buf_private *file_priv;
  83. if (!vma_priv)
  84. return;
  85. file_priv = vma_priv->file_priv;
  86. mutex_lock(&file_priv->lock);
  87. vma_priv->users--;
  88. if (!vma_priv->users)
  89. privcmd_buf_vmapriv_free(vma_priv);
  90. mutex_unlock(&file_priv->lock);
  91. }
  92. static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
  93. {
  94. pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  95. vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
  96. vmf->pgoff, (void *)vmf->address);
  97. return VM_FAULT_SIGBUS;
  98. }
  99. static const struct vm_operations_struct privcmd_buf_vm_ops = {
  100. .open = privcmd_buf_vma_open,
  101. .close = privcmd_buf_vma_close,
  102. .fault = privcmd_buf_vma_fault,
  103. };
  104. static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
  105. {
  106. struct privcmd_buf_private *file_priv = file->private_data;
  107. struct privcmd_buf_vma_private *vma_priv;
  108. unsigned long count = vma_pages(vma);
  109. unsigned int i;
  110. int ret = 0;
  111. if (!(vma->vm_flags & VM_SHARED) || count > limit ||
  112. file_priv->allocated + count > limit)
  113. return -EINVAL;
  114. vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
  115. GFP_KERNEL);
  116. if (!vma_priv)
  117. return -ENOMEM;
  118. vma_priv->n_pages = count;
  119. count = 0;
  120. for (i = 0; i < vma_priv->n_pages; i++) {
  121. vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  122. if (!vma_priv->pages[i])
  123. break;
  124. count++;
  125. }
  126. mutex_lock(&file_priv->lock);
  127. file_priv->allocated += count;
  128. vma_priv->file_priv = file_priv;
  129. vma_priv->users = 1;
  130. vma->vm_flags |= VM_IO | VM_DONTEXPAND;
  131. vma->vm_ops = &privcmd_buf_vm_ops;
  132. vma->vm_private_data = vma_priv;
  133. list_add(&vma_priv->list, &file_priv->list);
  134. if (vma_priv->n_pages != count)
  135. ret = -ENOMEM;
  136. else
  137. for (i = 0; i < vma_priv->n_pages; i++) {
  138. ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
  139. vma_priv->pages[i]);
  140. if (ret)
  141. break;
  142. }
  143. if (ret)
  144. privcmd_buf_vmapriv_free(vma_priv);
  145. mutex_unlock(&file_priv->lock);
  146. return ret;
  147. }
  148. const struct file_operations xen_privcmdbuf_fops = {
  149. .owner = THIS_MODULE,
  150. .open = privcmd_buf_open,
  151. .release = privcmd_buf_release,
  152. .mmap = privcmd_buf_mmap,
  153. };
  154. EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
  155. struct miscdevice xen_privcmdbuf_dev = {
  156. .minor = MISC_DYNAMIC_MINOR,
  157. .name = "xen/hypercall",
  158. .fops = &xen_privcmdbuf_fops,
  159. };