gntdev.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. /******************************************************************************
  2. * gntdev.c
  3. *
  4. * Device for accessing (in user-space) pages that have been granted by other
  5. * domains.
  6. *
  7. * Copyright (c) 2006-2007, D G Murray.
  8. * (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #undef DEBUG
  20. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/init.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/fs.h>
  26. #include <linux/mm.h>
  27. #include <linux/mman.h>
  28. #include <linux/mmu_notifier.h>
  29. #include <linux/types.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/sched.h>
  32. #include <linux/sched/mm.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/slab.h>
  35. #include <linux/highmem.h>
  36. #include <xen/xen.h>
  37. #include <xen/grant_table.h>
  38. #include <xen/balloon.h>
  39. #include <xen/gntdev.h>
  40. #include <xen/events.h>
  41. #include <xen/page.h>
  42. #include <asm/xen/hypervisor.h>
  43. #include <asm/xen/hypercall.h>
  44. MODULE_LICENSE("GPL");
  45. MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  46. "Gerd Hoffmann <kraxel@redhat.com>");
  47. MODULE_DESCRIPTION("User-space granted page access driver");
  48. static int limit = 1024*1024;
  49. module_param(limit, int, 0644);
  50. MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  51. "the gntdev device");
  52. static atomic_t pages_mapped = ATOMIC_INIT(0);
  53. static int use_ptemod;
  54. #define populate_freeable_maps use_ptemod
  55. struct gntdev_priv {
  56. /* maps with visible offsets in the file descriptor */
  57. struct list_head maps;
  58. /* maps that are not visible; will be freed on munmap.
  59. * Only populated if populate_freeable_maps == 1 */
  60. struct list_head freeable_maps;
  61. /* lock protects maps and freeable_maps */
  62. struct mutex lock;
  63. struct mm_struct *mm;
  64. struct mmu_notifier mn;
  65. };
  66. struct unmap_notify {
  67. int flags;
  68. /* Address relative to the start of the grant_map */
  69. int addr;
  70. int event;
  71. };
  72. struct grant_map {
  73. struct list_head next;
  74. struct vm_area_struct *vma;
  75. int index;
  76. int count;
  77. int flags;
  78. atomic_t users;
  79. struct unmap_notify notify;
  80. struct ioctl_gntdev_grant_ref *grants;
  81. struct gnttab_map_grant_ref *map_ops;
  82. struct gnttab_unmap_grant_ref *unmap_ops;
  83. struct gnttab_map_grant_ref *kmap_ops;
  84. struct gnttab_unmap_grant_ref *kunmap_ops;
  85. struct page **pages;
  86. unsigned long pages_vm_start;
  87. };
  88. static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
  89. /* ------------------------------------------------------------------ */
  90. static void gntdev_print_maps(struct gntdev_priv *priv,
  91. char *text, int text_index)
  92. {
  93. #ifdef DEBUG
  94. struct grant_map *map;
  95. pr_debug("%s: maps list (priv %p)\n", __func__, priv);
  96. list_for_each_entry(map, &priv->maps, next)
  97. pr_debug(" index %2d, count %2d %s\n",
  98. map->index, map->count,
  99. map->index == text_index && text ? text : "");
  100. #endif
  101. }
  102. static void gntdev_free_map(struct grant_map *map)
  103. {
  104. if (map == NULL)
  105. return;
  106. if (map->pages)
  107. gnttab_free_pages(map->count, map->pages);
  108. kfree(map->pages);
  109. kfree(map->grants);
  110. kfree(map->map_ops);
  111. kfree(map->unmap_ops);
  112. kfree(map->kmap_ops);
  113. kfree(map->kunmap_ops);
  114. kfree(map);
  115. }
  116. static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
  117. {
  118. struct grant_map *add;
  119. int i;
  120. add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
  121. if (NULL == add)
  122. return NULL;
  123. add->grants = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
  124. add->map_ops = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
  125. add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
  126. add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
  127. add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
  128. add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
  129. if (NULL == add->grants ||
  130. NULL == add->map_ops ||
  131. NULL == add->unmap_ops ||
  132. NULL == add->kmap_ops ||
  133. NULL == add->kunmap_ops ||
  134. NULL == add->pages)
  135. goto err;
  136. if (gnttab_alloc_pages(count, add->pages))
  137. goto err;
  138. for (i = 0; i < count; i++) {
  139. add->map_ops[i].handle = -1;
  140. add->unmap_ops[i].handle = -1;
  141. add->kmap_ops[i].handle = -1;
  142. add->kunmap_ops[i].handle = -1;
  143. }
  144. add->index = 0;
  145. add->count = count;
  146. atomic_set(&add->users, 1);
  147. return add;
  148. err:
  149. gntdev_free_map(add);
  150. return NULL;
  151. }
  152. static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
  153. {
  154. struct grant_map *map;
  155. list_for_each_entry(map, &priv->maps, next) {
  156. if (add->index + add->count < map->index) {
  157. list_add_tail(&add->next, &map->next);
  158. goto done;
  159. }
  160. add->index = map->index + map->count;
  161. }
  162. list_add_tail(&add->next, &priv->maps);
  163. done:
  164. gntdev_print_maps(priv, "[new]", add->index);
  165. }
  166. static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
  167. int index, int count)
  168. {
  169. struct grant_map *map;
  170. list_for_each_entry(map, &priv->maps, next) {
  171. if (map->index != index)
  172. continue;
  173. if (count && map->count != count)
  174. continue;
  175. return map;
  176. }
  177. return NULL;
  178. }
  179. static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
  180. {
  181. if (!map)
  182. return;
  183. if (!atomic_dec_and_test(&map->users))
  184. return;
  185. atomic_sub(map->count, &pages_mapped);
  186. if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
  187. notify_remote_via_evtchn(map->notify.event);
  188. evtchn_put(map->notify.event);
  189. }
  190. if (populate_freeable_maps && priv) {
  191. mutex_lock(&priv->lock);
  192. list_del(&map->next);
  193. mutex_unlock(&priv->lock);
  194. }
  195. if (map->pages && !use_ptemod)
  196. unmap_grant_pages(map, 0, map->count);
  197. gntdev_free_map(map);
  198. }
  199. /* ------------------------------------------------------------------ */
  200. static int find_grant_ptes(pte_t *pte, pgtable_t token,
  201. unsigned long addr, void *data)
  202. {
  203. struct grant_map *map = data;
  204. unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
  205. int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
  206. u64 pte_maddr;
  207. BUG_ON(pgnr >= map->count);
  208. pte_maddr = arbitrary_virt_to_machine(pte).maddr;
  209. /*
  210. * Set the PTE as special to force get_user_pages_fast() fall
  211. * back to the slow path. If this is not supported as part of
  212. * the grant map, it will be done afterwards.
  213. */
  214. if (xen_feature(XENFEAT_gnttab_map_avail_bits))
  215. flags |= (1 << _GNTMAP_guest_avail0);
  216. gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
  217. map->grants[pgnr].ref,
  218. map->grants[pgnr].domid);
  219. gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
  220. -1 /* handle */);
  221. return 0;
  222. }
  223. #ifdef CONFIG_X86
  224. static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
  225. unsigned long addr, void *data)
  226. {
  227. set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
  228. return 0;
  229. }
  230. #endif
  231. static int map_grant_pages(struct grant_map *map)
  232. {
  233. int i, err = 0;
  234. if (!use_ptemod) {
  235. /* Note: it could already be mapped */
  236. if (map->map_ops[0].handle != -1)
  237. return 0;
  238. for (i = 0; i < map->count; i++) {
  239. unsigned long addr = (unsigned long)
  240. pfn_to_kaddr(page_to_pfn(map->pages[i]));
  241. gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
  242. map->grants[i].ref,
  243. map->grants[i].domid);
  244. gnttab_set_unmap_op(&map->unmap_ops[i], addr,
  245. map->flags, -1 /* handle */);
  246. }
  247. } else {
  248. /*
  249. * Setup the map_ops corresponding to the pte entries pointing
  250. * to the kernel linear addresses of the struct pages.
  251. * These ptes are completely different from the user ptes dealt
  252. * with find_grant_ptes.
  253. */
  254. for (i = 0; i < map->count; i++) {
  255. unsigned long address = (unsigned long)
  256. pfn_to_kaddr(page_to_pfn(map->pages[i]));
  257. BUG_ON(PageHighMem(map->pages[i]));
  258. gnttab_set_map_op(&map->kmap_ops[i], address,
  259. map->flags | GNTMAP_host_map,
  260. map->grants[i].ref,
  261. map->grants[i].domid);
  262. gnttab_set_unmap_op(&map->kunmap_ops[i], address,
  263. map->flags | GNTMAP_host_map, -1);
  264. }
  265. }
  266. pr_debug("map %d+%d\n", map->index, map->count);
  267. err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
  268. map->pages, map->count);
  269. if (err)
  270. return err;
  271. for (i = 0; i < map->count; i++) {
  272. if (map->map_ops[i].status) {
  273. err = -EINVAL;
  274. continue;
  275. }
  276. map->unmap_ops[i].handle = map->map_ops[i].handle;
  277. if (use_ptemod)
  278. map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
  279. }
  280. return err;
  281. }
  282. static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
  283. {
  284. int i, err = 0;
  285. struct gntab_unmap_queue_data unmap_data;
  286. if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
  287. int pgno = (map->notify.addr >> PAGE_SHIFT);
  288. if (pgno >= offset && pgno < offset + pages) {
  289. /* No need for kmap, pages are in lowmem */
  290. uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
  291. tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
  292. map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
  293. }
  294. }
  295. unmap_data.unmap_ops = map->unmap_ops + offset;
  296. unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
  297. unmap_data.pages = map->pages + offset;
  298. unmap_data.count = pages;
  299. err = gnttab_unmap_refs_sync(&unmap_data);
  300. if (err)
  301. return err;
  302. for (i = 0; i < pages; i++) {
  303. if (map->unmap_ops[offset+i].status)
  304. err = -EINVAL;
  305. pr_debug("unmap handle=%d st=%d\n",
  306. map->unmap_ops[offset+i].handle,
  307. map->unmap_ops[offset+i].status);
  308. map->unmap_ops[offset+i].handle = -1;
  309. }
  310. return err;
  311. }
  312. static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
  313. {
  314. int range, err = 0;
  315. pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
  316. /* It is possible the requested range will have a "hole" where we
  317. * already unmapped some of the grants. Only unmap valid ranges.
  318. */
  319. while (pages && !err) {
  320. while (pages && map->unmap_ops[offset].handle == -1) {
  321. offset++;
  322. pages--;
  323. }
  324. range = 0;
  325. while (range < pages) {
  326. if (map->unmap_ops[offset+range].handle == -1) {
  327. range--;
  328. break;
  329. }
  330. range++;
  331. }
  332. err = __unmap_grant_pages(map, offset, range);
  333. offset += range;
  334. pages -= range;
  335. }
  336. return err;
  337. }
  338. /* ------------------------------------------------------------------ */
  339. static void gntdev_vma_open(struct vm_area_struct *vma)
  340. {
  341. struct grant_map *map = vma->vm_private_data;
  342. pr_debug("gntdev_vma_open %p\n", vma);
  343. atomic_inc(&map->users);
  344. }
  345. static void gntdev_vma_close(struct vm_area_struct *vma)
  346. {
  347. struct grant_map *map = vma->vm_private_data;
  348. struct file *file = vma->vm_file;
  349. struct gntdev_priv *priv = file->private_data;
  350. pr_debug("gntdev_vma_close %p\n", vma);
  351. if (use_ptemod) {
  352. /* It is possible that an mmu notifier could be running
  353. * concurrently, so take priv->lock to ensure that the vma won't
  354. * vanishing during the unmap_grant_pages call, since we will
  355. * spin here until that completes. Such a concurrent call will
  356. * not do any unmapping, since that has been done prior to
  357. * closing the vma, but it may still iterate the unmap_ops list.
  358. */
  359. mutex_lock(&priv->lock);
  360. map->vma = NULL;
  361. mutex_unlock(&priv->lock);
  362. }
  363. vma->vm_private_data = NULL;
  364. gntdev_put_map(priv, map);
  365. }
  366. static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
  367. unsigned long addr)
  368. {
  369. struct grant_map *map = vma->vm_private_data;
  370. return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
  371. }
  372. static const struct vm_operations_struct gntdev_vmops = {
  373. .open = gntdev_vma_open,
  374. .close = gntdev_vma_close,
  375. .find_special_page = gntdev_vma_find_special_page,
  376. };
  377. /* ------------------------------------------------------------------ */
  378. static void unmap_if_in_range(struct grant_map *map,
  379. unsigned long start, unsigned long end)
  380. {
  381. unsigned long mstart, mend;
  382. int err;
  383. if (!map->vma)
  384. return;
  385. if (map->vma->vm_start >= end)
  386. return;
  387. if (map->vma->vm_end <= start)
  388. return;
  389. mstart = max(start, map->vma->vm_start);
  390. mend = min(end, map->vma->vm_end);
  391. pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
  392. map->index, map->count,
  393. map->vma->vm_start, map->vma->vm_end,
  394. start, end, mstart, mend);
  395. err = unmap_grant_pages(map,
  396. (mstart - map->vma->vm_start) >> PAGE_SHIFT,
  397. (mend - mstart) >> PAGE_SHIFT);
  398. WARN_ON(err);
  399. }
  400. static void mn_invl_range_start(struct mmu_notifier *mn,
  401. struct mm_struct *mm,
  402. unsigned long start, unsigned long end)
  403. {
  404. struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
  405. struct grant_map *map;
  406. mutex_lock(&priv->lock);
  407. list_for_each_entry(map, &priv->maps, next) {
  408. unmap_if_in_range(map, start, end);
  409. }
  410. list_for_each_entry(map, &priv->freeable_maps, next) {
  411. unmap_if_in_range(map, start, end);
  412. }
  413. mutex_unlock(&priv->lock);
  414. }
  415. static void mn_invl_page(struct mmu_notifier *mn,
  416. struct mm_struct *mm,
  417. unsigned long address)
  418. {
  419. mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
  420. }
  421. static void mn_release(struct mmu_notifier *mn,
  422. struct mm_struct *mm)
  423. {
  424. struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
  425. struct grant_map *map;
  426. int err;
  427. mutex_lock(&priv->lock);
  428. list_for_each_entry(map, &priv->maps, next) {
  429. if (!map->vma)
  430. continue;
  431. pr_debug("map %d+%d (%lx %lx)\n",
  432. map->index, map->count,
  433. map->vma->vm_start, map->vma->vm_end);
  434. err = unmap_grant_pages(map, /* offset */ 0, map->count);
  435. WARN_ON(err);
  436. }
  437. list_for_each_entry(map, &priv->freeable_maps, next) {
  438. if (!map->vma)
  439. continue;
  440. pr_debug("map %d+%d (%lx %lx)\n",
  441. map->index, map->count,
  442. map->vma->vm_start, map->vma->vm_end);
  443. err = unmap_grant_pages(map, /* offset */ 0, map->count);
  444. WARN_ON(err);
  445. }
  446. mutex_unlock(&priv->lock);
  447. }
  448. static const struct mmu_notifier_ops gntdev_mmu_ops = {
  449. .release = mn_release,
  450. .invalidate_page = mn_invl_page,
  451. .invalidate_range_start = mn_invl_range_start,
  452. };
  453. /* ------------------------------------------------------------------ */
  454. static int gntdev_open(struct inode *inode, struct file *flip)
  455. {
  456. struct gntdev_priv *priv;
  457. int ret = 0;
  458. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  459. if (!priv)
  460. return -ENOMEM;
  461. INIT_LIST_HEAD(&priv->maps);
  462. INIT_LIST_HEAD(&priv->freeable_maps);
  463. mutex_init(&priv->lock);
  464. if (use_ptemod) {
  465. priv->mm = get_task_mm(current);
  466. if (!priv->mm) {
  467. kfree(priv);
  468. return -ENOMEM;
  469. }
  470. priv->mn.ops = &gntdev_mmu_ops;
  471. ret = mmu_notifier_register(&priv->mn, priv->mm);
  472. mmput(priv->mm);
  473. }
  474. if (ret) {
  475. kfree(priv);
  476. return ret;
  477. }
  478. flip->private_data = priv;
  479. pr_debug("priv %p\n", priv);
  480. return 0;
  481. }
  482. static int gntdev_release(struct inode *inode, struct file *flip)
  483. {
  484. struct gntdev_priv *priv = flip->private_data;
  485. struct grant_map *map;
  486. pr_debug("priv %p\n", priv);
  487. mutex_lock(&priv->lock);
  488. while (!list_empty(&priv->maps)) {
  489. map = list_entry(priv->maps.next, struct grant_map, next);
  490. list_del(&map->next);
  491. gntdev_put_map(NULL /* already removed */, map);
  492. }
  493. WARN_ON(!list_empty(&priv->freeable_maps));
  494. mutex_unlock(&priv->lock);
  495. if (use_ptemod)
  496. mmu_notifier_unregister(&priv->mn, priv->mm);
  497. kfree(priv);
  498. return 0;
  499. }
  500. static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
  501. struct ioctl_gntdev_map_grant_ref __user *u)
  502. {
  503. struct ioctl_gntdev_map_grant_ref op;
  504. struct grant_map *map;
  505. int err;
  506. if (copy_from_user(&op, u, sizeof(op)) != 0)
  507. return -EFAULT;
  508. pr_debug("priv %p, add %d\n", priv, op.count);
  509. if (unlikely(op.count <= 0))
  510. return -EINVAL;
  511. err = -ENOMEM;
  512. map = gntdev_alloc_map(priv, op.count);
  513. if (!map)
  514. return err;
  515. if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
  516. pr_debug("can't map: over limit\n");
  517. gntdev_put_map(NULL, map);
  518. return err;
  519. }
  520. if (copy_from_user(map->grants, &u->refs,
  521. sizeof(map->grants[0]) * op.count) != 0) {
  522. gntdev_put_map(NULL, map);
  523. return -EFAULT;
  524. }
  525. mutex_lock(&priv->lock);
  526. gntdev_add_map(priv, map);
  527. op.index = map->index << PAGE_SHIFT;
  528. mutex_unlock(&priv->lock);
  529. if (copy_to_user(u, &op, sizeof(op)) != 0)
  530. return -EFAULT;
  531. return 0;
  532. }
  533. static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
  534. struct ioctl_gntdev_unmap_grant_ref __user *u)
  535. {
  536. struct ioctl_gntdev_unmap_grant_ref op;
  537. struct grant_map *map;
  538. int err = -ENOENT;
  539. if (copy_from_user(&op, u, sizeof(op)) != 0)
  540. return -EFAULT;
  541. pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
  542. mutex_lock(&priv->lock);
  543. map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
  544. if (map) {
  545. list_del(&map->next);
  546. if (populate_freeable_maps)
  547. list_add_tail(&map->next, &priv->freeable_maps);
  548. err = 0;
  549. }
  550. mutex_unlock(&priv->lock);
  551. if (map)
  552. gntdev_put_map(priv, map);
  553. return err;
  554. }
  555. static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
  556. struct ioctl_gntdev_get_offset_for_vaddr __user *u)
  557. {
  558. struct ioctl_gntdev_get_offset_for_vaddr op;
  559. struct vm_area_struct *vma;
  560. struct grant_map *map;
  561. int rv = -EINVAL;
  562. if (copy_from_user(&op, u, sizeof(op)) != 0)
  563. return -EFAULT;
  564. pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
  565. down_read(&current->mm->mmap_sem);
  566. vma = find_vma(current->mm, op.vaddr);
  567. if (!vma || vma->vm_ops != &gntdev_vmops)
  568. goto out_unlock;
  569. map = vma->vm_private_data;
  570. if (!map)
  571. goto out_unlock;
  572. op.offset = map->index << PAGE_SHIFT;
  573. op.count = map->count;
  574. rv = 0;
  575. out_unlock:
  576. up_read(&current->mm->mmap_sem);
  577. if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
  578. return -EFAULT;
  579. return rv;
  580. }
  581. static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
  582. {
  583. struct ioctl_gntdev_unmap_notify op;
  584. struct grant_map *map;
  585. int rc;
  586. int out_flags;
  587. unsigned int out_event;
  588. if (copy_from_user(&op, u, sizeof(op)))
  589. return -EFAULT;
  590. if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
  591. return -EINVAL;
  592. /* We need to grab a reference to the event channel we are going to use
  593. * to send the notify before releasing the reference we may already have
  594. * (if someone has called this ioctl twice). This is required so that
  595. * it is possible to change the clear_byte part of the notification
  596. * without disturbing the event channel part, which may now be the last
  597. * reference to that event channel.
  598. */
  599. if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
  600. if (evtchn_get(op.event_channel_port))
  601. return -EINVAL;
  602. }
  603. out_flags = op.action;
  604. out_event = op.event_channel_port;
  605. mutex_lock(&priv->lock);
  606. list_for_each_entry(map, &priv->maps, next) {
  607. uint64_t begin = map->index << PAGE_SHIFT;
  608. uint64_t end = (map->index + map->count) << PAGE_SHIFT;
  609. if (op.index >= begin && op.index < end)
  610. goto found;
  611. }
  612. rc = -ENOENT;
  613. goto unlock_out;
  614. found:
  615. if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
  616. (map->flags & GNTMAP_readonly)) {
  617. rc = -EINVAL;
  618. goto unlock_out;
  619. }
  620. out_flags = map->notify.flags;
  621. out_event = map->notify.event;
  622. map->notify.flags = op.action;
  623. map->notify.addr = op.index - (map->index << PAGE_SHIFT);
  624. map->notify.event = op.event_channel_port;
  625. rc = 0;
  626. unlock_out:
  627. mutex_unlock(&priv->lock);
  628. /* Drop the reference to the event channel we did not save in the map */
  629. if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
  630. evtchn_put(out_event);
  631. return rc;
  632. }
  633. #define GNTDEV_COPY_BATCH 16
  634. struct gntdev_copy_batch {
  635. struct gnttab_copy ops[GNTDEV_COPY_BATCH];
  636. struct page *pages[GNTDEV_COPY_BATCH];
  637. s16 __user *status[GNTDEV_COPY_BATCH];
  638. unsigned int nr_ops;
  639. unsigned int nr_pages;
  640. };
  641. static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
  642. bool writeable, unsigned long *gfn)
  643. {
  644. unsigned long addr = (unsigned long)virt;
  645. struct page *page;
  646. unsigned long xen_pfn;
  647. int ret;
  648. ret = get_user_pages_fast(addr, 1, writeable, &page);
  649. if (ret < 0)
  650. return ret;
  651. batch->pages[batch->nr_pages++] = page;
  652. xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
  653. *gfn = pfn_to_gfn(xen_pfn);
  654. return 0;
  655. }
  656. static void gntdev_put_pages(struct gntdev_copy_batch *batch)
  657. {
  658. unsigned int i;
  659. for (i = 0; i < batch->nr_pages; i++)
  660. put_page(batch->pages[i]);
  661. batch->nr_pages = 0;
  662. }
  663. static int gntdev_copy(struct gntdev_copy_batch *batch)
  664. {
  665. unsigned int i;
  666. gnttab_batch_copy(batch->ops, batch->nr_ops);
  667. gntdev_put_pages(batch);
  668. /*
  669. * For each completed op, update the status if the op failed
  670. * and all previous ops for the segment were successful.
  671. */
  672. for (i = 0; i < batch->nr_ops; i++) {
  673. s16 status = batch->ops[i].status;
  674. s16 old_status;
  675. if (status == GNTST_okay)
  676. continue;
  677. if (__get_user(old_status, batch->status[i]))
  678. return -EFAULT;
  679. if (old_status != GNTST_okay)
  680. continue;
  681. if (__put_user(status, batch->status[i]))
  682. return -EFAULT;
  683. }
  684. batch->nr_ops = 0;
  685. return 0;
  686. }
  687. static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
  688. struct gntdev_grant_copy_segment *seg,
  689. s16 __user *status)
  690. {
  691. uint16_t copied = 0;
  692. /*
  693. * Disallow local -> local copies since there is only space in
  694. * batch->pages for one page per-op and this would be a very
  695. * expensive memcpy().
  696. */
  697. if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
  698. return -EINVAL;
  699. /* Can't cross page if source/dest is a grant ref. */
  700. if (seg->flags & GNTCOPY_source_gref) {
  701. if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
  702. return -EINVAL;
  703. }
  704. if (seg->flags & GNTCOPY_dest_gref) {
  705. if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
  706. return -EINVAL;
  707. }
  708. if (put_user(GNTST_okay, status))
  709. return -EFAULT;
  710. while (copied < seg->len) {
  711. struct gnttab_copy *op;
  712. void __user *virt;
  713. size_t len, off;
  714. unsigned long gfn;
  715. int ret;
  716. if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
  717. ret = gntdev_copy(batch);
  718. if (ret < 0)
  719. return ret;
  720. }
  721. len = seg->len - copied;
  722. op = &batch->ops[batch->nr_ops];
  723. op->flags = 0;
  724. if (seg->flags & GNTCOPY_source_gref) {
  725. op->source.u.ref = seg->source.foreign.ref;
  726. op->source.domid = seg->source.foreign.domid;
  727. op->source.offset = seg->source.foreign.offset + copied;
  728. op->flags |= GNTCOPY_source_gref;
  729. } else {
  730. virt = seg->source.virt + copied;
  731. off = (unsigned long)virt & ~XEN_PAGE_MASK;
  732. len = min(len, (size_t)XEN_PAGE_SIZE - off);
  733. ret = gntdev_get_page(batch, virt, false, &gfn);
  734. if (ret < 0)
  735. return ret;
  736. op->source.u.gmfn = gfn;
  737. op->source.domid = DOMID_SELF;
  738. op->source.offset = off;
  739. }
  740. if (seg->flags & GNTCOPY_dest_gref) {
  741. op->dest.u.ref = seg->dest.foreign.ref;
  742. op->dest.domid = seg->dest.foreign.domid;
  743. op->dest.offset = seg->dest.foreign.offset + copied;
  744. op->flags |= GNTCOPY_dest_gref;
  745. } else {
  746. virt = seg->dest.virt + copied;
  747. off = (unsigned long)virt & ~XEN_PAGE_MASK;
  748. len = min(len, (size_t)XEN_PAGE_SIZE - off);
  749. ret = gntdev_get_page(batch, virt, true, &gfn);
  750. if (ret < 0)
  751. return ret;
  752. op->dest.u.gmfn = gfn;
  753. op->dest.domid = DOMID_SELF;
  754. op->dest.offset = off;
  755. }
  756. op->len = len;
  757. copied += len;
  758. batch->status[batch->nr_ops] = status;
  759. batch->nr_ops++;
  760. }
  761. return 0;
  762. }
  763. static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
  764. {
  765. struct ioctl_gntdev_grant_copy copy;
  766. struct gntdev_copy_batch batch;
  767. unsigned int i;
  768. int ret = 0;
  769. if (copy_from_user(&copy, u, sizeof(copy)))
  770. return -EFAULT;
  771. batch.nr_ops = 0;
  772. batch.nr_pages = 0;
  773. for (i = 0; i < copy.count; i++) {
  774. struct gntdev_grant_copy_segment seg;
  775. if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
  776. ret = -EFAULT;
  777. goto out;
  778. }
  779. ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
  780. if (ret < 0)
  781. goto out;
  782. cond_resched();
  783. }
  784. if (batch.nr_ops)
  785. ret = gntdev_copy(&batch);
  786. return ret;
  787. out:
  788. gntdev_put_pages(&batch);
  789. return ret;
  790. }
  791. static long gntdev_ioctl(struct file *flip,
  792. unsigned int cmd, unsigned long arg)
  793. {
  794. struct gntdev_priv *priv = flip->private_data;
  795. void __user *ptr = (void __user *)arg;
  796. switch (cmd) {
  797. case IOCTL_GNTDEV_MAP_GRANT_REF:
  798. return gntdev_ioctl_map_grant_ref(priv, ptr);
  799. case IOCTL_GNTDEV_UNMAP_GRANT_REF:
  800. return gntdev_ioctl_unmap_grant_ref(priv, ptr);
  801. case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
  802. return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
  803. case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
  804. return gntdev_ioctl_notify(priv, ptr);
  805. case IOCTL_GNTDEV_GRANT_COPY:
  806. return gntdev_ioctl_grant_copy(priv, ptr);
  807. default:
  808. pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
  809. return -ENOIOCTLCMD;
  810. }
  811. return 0;
  812. }
  813. static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
  814. {
  815. struct gntdev_priv *priv = flip->private_data;
  816. int index = vma->vm_pgoff;
  817. int count = vma_pages(vma);
  818. struct grant_map *map;
  819. int i, err = -EINVAL;
  820. if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
  821. return -EINVAL;
  822. pr_debug("map %d+%d at %lx (pgoff %lx)\n",
  823. index, count, vma->vm_start, vma->vm_pgoff);
  824. mutex_lock(&priv->lock);
  825. map = gntdev_find_map_index(priv, index, count);
  826. if (!map)
  827. goto unlock_out;
  828. if (use_ptemod && map->vma)
  829. goto unlock_out;
  830. if (use_ptemod && priv->mm != vma->vm_mm) {
  831. pr_warn("Huh? Other mm?\n");
  832. goto unlock_out;
  833. }
  834. atomic_inc(&map->users);
  835. vma->vm_ops = &gntdev_vmops;
  836. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
  837. if (use_ptemod)
  838. vma->vm_flags |= VM_DONTCOPY;
  839. vma->vm_private_data = map;
  840. if (use_ptemod)
  841. map->vma = vma;
  842. if (map->flags) {
  843. if ((vma->vm_flags & VM_WRITE) &&
  844. (map->flags & GNTMAP_readonly))
  845. goto out_unlock_put;
  846. } else {
  847. map->flags = GNTMAP_host_map;
  848. if (!(vma->vm_flags & VM_WRITE))
  849. map->flags |= GNTMAP_readonly;
  850. }
  851. mutex_unlock(&priv->lock);
  852. if (use_ptemod) {
  853. err = apply_to_page_range(vma->vm_mm, vma->vm_start,
  854. vma->vm_end - vma->vm_start,
  855. find_grant_ptes, map);
  856. if (err) {
  857. pr_warn("find_grant_ptes() failure.\n");
  858. goto out_put_map;
  859. }
  860. }
  861. err = map_grant_pages(map);
  862. if (err)
  863. goto out_put_map;
  864. if (!use_ptemod) {
  865. for (i = 0; i < count; i++) {
  866. err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
  867. map->pages[i]);
  868. if (err)
  869. goto out_put_map;
  870. }
  871. } else {
  872. #ifdef CONFIG_X86
  873. /*
  874. * If the PTEs were not made special by the grant map
  875. * hypercall, do so here.
  876. *
  877. * This is racy since the mapping is already visible
  878. * to userspace but userspace should be well-behaved
  879. * enough to not touch it until the mmap() call
  880. * returns.
  881. */
  882. if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
  883. apply_to_page_range(vma->vm_mm, vma->vm_start,
  884. vma->vm_end - vma->vm_start,
  885. set_grant_ptes_as_special, NULL);
  886. }
  887. #endif
  888. map->pages_vm_start = vma->vm_start;
  889. }
  890. return 0;
  891. unlock_out:
  892. mutex_unlock(&priv->lock);
  893. return err;
  894. out_unlock_put:
  895. mutex_unlock(&priv->lock);
  896. out_put_map:
  897. if (use_ptemod)
  898. map->vma = NULL;
  899. gntdev_put_map(priv, map);
  900. return err;
  901. }
  902. static const struct file_operations gntdev_fops = {
  903. .owner = THIS_MODULE,
  904. .open = gntdev_open,
  905. .release = gntdev_release,
  906. .mmap = gntdev_mmap,
  907. .unlocked_ioctl = gntdev_ioctl
  908. };
  909. static struct miscdevice gntdev_miscdev = {
  910. .minor = MISC_DYNAMIC_MINOR,
  911. .name = "xen/gntdev",
  912. .fops = &gntdev_fops,
  913. };
  914. /* ------------------------------------------------------------------ */
  915. static int __init gntdev_init(void)
  916. {
  917. int err;
  918. if (!xen_domain())
  919. return -ENODEV;
  920. use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
  921. err = misc_register(&gntdev_miscdev);
  922. if (err != 0) {
  923. pr_err("Could not register gntdev device\n");
  924. return err;
  925. }
  926. return 0;
  927. }
  928. static void __exit gntdev_exit(void)
  929. {
  930. misc_deregister(&gntdev_miscdev);
  931. }
  932. module_init(gntdev_init);
  933. module_exit(gntdev_exit);
  934. /* ------------------------------------------------------------------ */