privcmd.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889
  1. /******************************************************************************
  2. * privcmd.c
  3. *
  4. * Interface to privileged domain-0 commands.
  5. *
  6. * Copyright (c) 2002-2004, K A Fraser, B Dragovic
  7. */
  8. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/mm.h>
  16. #include <linux/mman.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/swap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/moduleparam.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/tlb.h>
  27. #include <asm/xen/hypervisor.h>
  28. #include <asm/xen/hypercall.h>
  29. #include <xen/xen.h>
  30. #include <xen/privcmd.h>
  31. #include <xen/interface/xen.h>
  32. #include <xen/interface/hvm/dm_op.h>
  33. #include <xen/features.h>
  34. #include <xen/page.h>
  35. #include <xen/xen-ops.h>
  36. #include <xen/balloon.h>
  37. #include "privcmd.h"
  38. MODULE_LICENSE("GPL");
  39. #define PRIV_VMA_LOCKED ((void *)1)
  40. static unsigned int privcmd_dm_op_max_num = 16;
  41. module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
  42. MODULE_PARM_DESC(dm_op_max_nr_bufs,
  43. "Maximum number of buffers per dm_op hypercall");
  44. static unsigned int privcmd_dm_op_buf_max_size = 4096;
  45. module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
  46. 0644);
  47. MODULE_PARM_DESC(dm_op_buf_max_size,
  48. "Maximum size of a dm_op hypercall buffer");
  49. struct privcmd_data {
  50. domid_t domid;
  51. };
  52. static int privcmd_vma_range_is_mapped(
  53. struct vm_area_struct *vma,
  54. unsigned long addr,
  55. unsigned long nr_pages);
  56. static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
  57. {
  58. struct privcmd_data *data = file->private_data;
  59. struct privcmd_hypercall hypercall;
  60. long ret;
  61. /* Disallow arbitrary hypercalls if restricted */
  62. if (data->domid != DOMID_INVALID)
  63. return -EPERM;
  64. if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
  65. return -EFAULT;
  66. xen_preemptible_hcall_begin();
  67. ret = privcmd_call(hypercall.op,
  68. hypercall.arg[0], hypercall.arg[1],
  69. hypercall.arg[2], hypercall.arg[3],
  70. hypercall.arg[4]);
  71. xen_preemptible_hcall_end();
  72. return ret;
  73. }
  74. static void free_page_list(struct list_head *pages)
  75. {
  76. struct page *p, *n;
  77. list_for_each_entry_safe(p, n, pages, lru)
  78. __free_page(p);
  79. INIT_LIST_HEAD(pages);
  80. }
  81. /*
  82. * Given an array of items in userspace, return a list of pages
  83. * containing the data. If copying fails, either because of memory
  84. * allocation failure or a problem reading user memory, return an
  85. * error code; its up to the caller to dispose of any partial list.
  86. */
  87. static int gather_array(struct list_head *pagelist,
  88. unsigned nelem, size_t size,
  89. const void __user *data)
  90. {
  91. unsigned pageidx;
  92. void *pagedata;
  93. int ret;
  94. if (size > PAGE_SIZE)
  95. return 0;
  96. pageidx = PAGE_SIZE;
  97. pagedata = NULL; /* quiet, gcc */
  98. while (nelem--) {
  99. if (pageidx > PAGE_SIZE-size) {
  100. struct page *page = alloc_page(GFP_KERNEL);
  101. ret = -ENOMEM;
  102. if (page == NULL)
  103. goto fail;
  104. pagedata = page_address(page);
  105. list_add_tail(&page->lru, pagelist);
  106. pageidx = 0;
  107. }
  108. ret = -EFAULT;
  109. if (copy_from_user(pagedata + pageidx, data, size))
  110. goto fail;
  111. data += size;
  112. pageidx += size;
  113. }
  114. ret = 0;
  115. fail:
  116. return ret;
  117. }
  118. /*
  119. * Call function "fn" on each element of the array fragmented
  120. * over a list of pages.
  121. */
  122. static int traverse_pages(unsigned nelem, size_t size,
  123. struct list_head *pos,
  124. int (*fn)(void *data, void *state),
  125. void *state)
  126. {
  127. void *pagedata;
  128. unsigned pageidx;
  129. int ret = 0;
  130. BUG_ON(size > PAGE_SIZE);
  131. pageidx = PAGE_SIZE;
  132. pagedata = NULL; /* hush, gcc */
  133. while (nelem--) {
  134. if (pageidx > PAGE_SIZE-size) {
  135. struct page *page;
  136. pos = pos->next;
  137. page = list_entry(pos, struct page, lru);
  138. pagedata = page_address(page);
  139. pageidx = 0;
  140. }
  141. ret = (*fn)(pagedata + pageidx, state);
  142. if (ret)
  143. break;
  144. pageidx += size;
  145. }
  146. return ret;
  147. }
  148. /*
  149. * Similar to traverse_pages, but use each page as a "block" of
  150. * data to be processed as one unit.
  151. */
  152. static int traverse_pages_block(unsigned nelem, size_t size,
  153. struct list_head *pos,
  154. int (*fn)(void *data, int nr, void *state),
  155. void *state)
  156. {
  157. void *pagedata;
  158. unsigned pageidx;
  159. int ret = 0;
  160. BUG_ON(size > PAGE_SIZE);
  161. pageidx = PAGE_SIZE;
  162. while (nelem) {
  163. int nr = (PAGE_SIZE/size);
  164. struct page *page;
  165. if (nr > nelem)
  166. nr = nelem;
  167. pos = pos->next;
  168. page = list_entry(pos, struct page, lru);
  169. pagedata = page_address(page);
  170. ret = (*fn)(pagedata, nr, state);
  171. if (ret)
  172. break;
  173. nelem -= nr;
  174. }
  175. return ret;
  176. }
  177. struct mmap_gfn_state {
  178. unsigned long va;
  179. struct vm_area_struct *vma;
  180. domid_t domain;
  181. };
  182. static int mmap_gfn_range(void *data, void *state)
  183. {
  184. struct privcmd_mmap_entry *msg = data;
  185. struct mmap_gfn_state *st = state;
  186. struct vm_area_struct *vma = st->vma;
  187. int rc;
  188. /* Do not allow range to wrap the address space. */
  189. if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
  190. ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
  191. return -EINVAL;
  192. /* Range chunks must be contiguous in va space. */
  193. if ((msg->va != st->va) ||
  194. ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
  195. return -EINVAL;
  196. rc = xen_remap_domain_gfn_range(vma,
  197. msg->va & PAGE_MASK,
  198. msg->mfn, msg->npages,
  199. vma->vm_page_prot,
  200. st->domain, NULL);
  201. if (rc < 0)
  202. return rc;
  203. st->va += msg->npages << PAGE_SHIFT;
  204. return 0;
  205. }
  206. static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
  207. {
  208. struct privcmd_data *data = file->private_data;
  209. struct privcmd_mmap mmapcmd;
  210. struct mm_struct *mm = current->mm;
  211. struct vm_area_struct *vma;
  212. int rc;
  213. LIST_HEAD(pagelist);
  214. struct mmap_gfn_state state;
  215. /* We only support privcmd_ioctl_mmap_batch for auto translated. */
  216. if (xen_feature(XENFEAT_auto_translated_physmap))
  217. return -ENOSYS;
  218. if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
  219. return -EFAULT;
  220. /* If restriction is in place, check the domid matches */
  221. if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
  222. return -EPERM;
  223. rc = gather_array(&pagelist,
  224. mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  225. mmapcmd.entry);
  226. if (rc || list_empty(&pagelist))
  227. goto out;
  228. down_write(&mm->mmap_sem);
  229. {
  230. struct page *page = list_first_entry(&pagelist,
  231. struct page, lru);
  232. struct privcmd_mmap_entry *msg = page_address(page);
  233. vma = find_vma(mm, msg->va);
  234. rc = -EINVAL;
  235. if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
  236. goto out_up;
  237. vma->vm_private_data = PRIV_VMA_LOCKED;
  238. }
  239. state.va = vma->vm_start;
  240. state.vma = vma;
  241. state.domain = mmapcmd.dom;
  242. rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
  243. &pagelist,
  244. mmap_gfn_range, &state);
  245. out_up:
  246. up_write(&mm->mmap_sem);
  247. out:
  248. free_page_list(&pagelist);
  249. return rc;
  250. }
  251. struct mmap_batch_state {
  252. domid_t domain;
  253. unsigned long va;
  254. struct vm_area_struct *vma;
  255. int index;
  256. /* A tristate:
  257. * 0 for no errors
  258. * 1 if at least one error has happened (and no
  259. * -ENOENT errors have happened)
  260. * -ENOENT if at least 1 -ENOENT has happened.
  261. */
  262. int global_error;
  263. int version;
  264. /* User-space gfn array to store errors in the second pass for V1. */
  265. xen_pfn_t __user *user_gfn;
  266. /* User-space int array to store errors in the second pass for V2. */
  267. int __user *user_err;
  268. };
  269. /* auto translated dom0 note: if domU being created is PV, then gfn is
  270. * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  271. */
  272. static int mmap_batch_fn(void *data, int nr, void *state)
  273. {
  274. xen_pfn_t *gfnp = data;
  275. struct mmap_batch_state *st = state;
  276. struct vm_area_struct *vma = st->vma;
  277. struct page **pages = vma->vm_private_data;
  278. struct page **cur_pages = NULL;
  279. int ret;
  280. if (xen_feature(XENFEAT_auto_translated_physmap))
  281. cur_pages = &pages[st->index];
  282. BUG_ON(nr < 0);
  283. ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
  284. (int *)gfnp, st->vma->vm_page_prot,
  285. st->domain, cur_pages);
  286. /* Adjust the global_error? */
  287. if (ret != nr) {
  288. if (ret == -ENOENT)
  289. st->global_error = -ENOENT;
  290. else {
  291. /* Record that at least one error has happened. */
  292. if (st->global_error == 0)
  293. st->global_error = 1;
  294. }
  295. }
  296. st->va += PAGE_SIZE * nr;
  297. st->index += nr;
  298. return 0;
  299. }
  300. static int mmap_return_error(int err, struct mmap_batch_state *st)
  301. {
  302. int ret;
  303. if (st->version == 1) {
  304. if (err) {
  305. xen_pfn_t gfn;
  306. ret = get_user(gfn, st->user_gfn);
  307. if (ret < 0)
  308. return ret;
  309. /*
  310. * V1 encodes the error codes in the 32bit top
  311. * nibble of the gfn (with its known
  312. * limitations vis-a-vis 64 bit callers).
  313. */
  314. gfn |= (err == -ENOENT) ?
  315. PRIVCMD_MMAPBATCH_PAGED_ERROR :
  316. PRIVCMD_MMAPBATCH_MFN_ERROR;
  317. return __put_user(gfn, st->user_gfn++);
  318. } else
  319. st->user_gfn++;
  320. } else { /* st->version == 2 */
  321. if (err)
  322. return __put_user(err, st->user_err++);
  323. else
  324. st->user_err++;
  325. }
  326. return 0;
  327. }
  328. static int mmap_return_errors(void *data, int nr, void *state)
  329. {
  330. struct mmap_batch_state *st = state;
  331. int *errs = data;
  332. int i;
  333. int ret;
  334. for (i = 0; i < nr; i++) {
  335. ret = mmap_return_error(errs[i], st);
  336. if (ret < 0)
  337. return ret;
  338. }
  339. return 0;
  340. }
  341. /* Allocate pfns that are then mapped with gfns from foreign domid. Update
  342. * the vma with the page info to use later.
  343. * Returns: 0 if success, otherwise -errno
  344. */
  345. static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
  346. {
  347. int rc;
  348. struct page **pages;
  349. pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
  350. if (pages == NULL)
  351. return -ENOMEM;
  352. rc = alloc_xenballooned_pages(numpgs, pages);
  353. if (rc != 0) {
  354. pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
  355. numpgs, rc);
  356. kfree(pages);
  357. return -ENOMEM;
  358. }
  359. BUG_ON(vma->vm_private_data != NULL);
  360. vma->vm_private_data = pages;
  361. return 0;
  362. }
  363. static const struct vm_operations_struct privcmd_vm_ops;
  364. static long privcmd_ioctl_mmap_batch(
  365. struct file *file, void __user *udata, int version)
  366. {
  367. struct privcmd_data *data = file->private_data;
  368. int ret;
  369. struct privcmd_mmapbatch_v2 m;
  370. struct mm_struct *mm = current->mm;
  371. struct vm_area_struct *vma;
  372. unsigned long nr_pages;
  373. LIST_HEAD(pagelist);
  374. struct mmap_batch_state state;
  375. switch (version) {
  376. case 1:
  377. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
  378. return -EFAULT;
  379. /* Returns per-frame error in m.arr. */
  380. m.err = NULL;
  381. if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
  382. return -EFAULT;
  383. break;
  384. case 2:
  385. if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
  386. return -EFAULT;
  387. /* Returns per-frame error code in m.err. */
  388. if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
  389. return -EFAULT;
  390. break;
  391. default:
  392. return -EINVAL;
  393. }
  394. /* If restriction is in place, check the domid matches */
  395. if (data->domid != DOMID_INVALID && data->domid != m.dom)
  396. return -EPERM;
  397. nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
  398. if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
  399. return -EINVAL;
  400. ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
  401. if (ret)
  402. goto out;
  403. if (list_empty(&pagelist)) {
  404. ret = -EINVAL;
  405. goto out;
  406. }
  407. if (version == 2) {
  408. /* Zero error array now to only copy back actual errors. */
  409. if (clear_user(m.err, sizeof(int) * m.num)) {
  410. ret = -EFAULT;
  411. goto out;
  412. }
  413. }
  414. down_write(&mm->mmap_sem);
  415. vma = find_vma(mm, m.addr);
  416. if (!vma ||
  417. vma->vm_ops != &privcmd_vm_ops) {
  418. ret = -EINVAL;
  419. goto out_unlock;
  420. }
  421. /*
  422. * Caller must either:
  423. *
  424. * Map the whole VMA range, which will also allocate all the
  425. * pages required for the auto_translated_physmap case.
  426. *
  427. * Or
  428. *
  429. * Map unmapped holes left from a previous map attempt (e.g.,
  430. * because those foreign frames were previously paged out).
  431. */
  432. if (vma->vm_private_data == NULL) {
  433. if (m.addr != vma->vm_start ||
  434. m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
  435. ret = -EINVAL;
  436. goto out_unlock;
  437. }
  438. if (xen_feature(XENFEAT_auto_translated_physmap)) {
  439. ret = alloc_empty_pages(vma, nr_pages);
  440. if (ret < 0)
  441. goto out_unlock;
  442. } else
  443. vma->vm_private_data = PRIV_VMA_LOCKED;
  444. } else {
  445. if (m.addr < vma->vm_start ||
  446. m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
  447. ret = -EINVAL;
  448. goto out_unlock;
  449. }
  450. if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
  451. ret = -EINVAL;
  452. goto out_unlock;
  453. }
  454. }
  455. state.domain = m.dom;
  456. state.vma = vma;
  457. state.va = m.addr;
  458. state.index = 0;
  459. state.global_error = 0;
  460. state.version = version;
  461. BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
  462. /* mmap_batch_fn guarantees ret == 0 */
  463. BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
  464. &pagelist, mmap_batch_fn, &state));
  465. up_write(&mm->mmap_sem);
  466. if (state.global_error) {
  467. /* Write back errors in second pass. */
  468. state.user_gfn = (xen_pfn_t *)m.arr;
  469. state.user_err = m.err;
  470. ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
  471. &pagelist, mmap_return_errors, &state);
  472. } else
  473. ret = 0;
  474. /* If we have not had any EFAULT-like global errors then set the global
  475. * error to -ENOENT if necessary. */
  476. if ((ret == 0) && (state.global_error == -ENOENT))
  477. ret = -ENOENT;
  478. out:
  479. free_page_list(&pagelist);
  480. return ret;
  481. out_unlock:
  482. up_write(&mm->mmap_sem);
  483. goto out;
  484. }
  485. static int lock_pages(
  486. struct privcmd_dm_op_buf kbufs[], unsigned int num,
  487. struct page *pages[], unsigned int nr_pages)
  488. {
  489. unsigned int i;
  490. for (i = 0; i < num; i++) {
  491. unsigned int requested;
  492. int pinned;
  493. requested = DIV_ROUND_UP(
  494. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  495. PAGE_SIZE);
  496. if (requested > nr_pages)
  497. return -ENOSPC;
  498. pinned = get_user_pages_fast(
  499. (unsigned long) kbufs[i].uptr,
  500. requested, FOLL_WRITE, pages);
  501. if (pinned < 0)
  502. return pinned;
  503. nr_pages -= pinned;
  504. pages += pinned;
  505. }
  506. return 0;
  507. }
  508. static void unlock_pages(struct page *pages[], unsigned int nr_pages)
  509. {
  510. unsigned int i;
  511. if (!pages)
  512. return;
  513. for (i = 0; i < nr_pages; i++) {
  514. if (pages[i])
  515. put_page(pages[i]);
  516. }
  517. }
  518. static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
  519. {
  520. struct privcmd_data *data = file->private_data;
  521. struct privcmd_dm_op kdata;
  522. struct privcmd_dm_op_buf *kbufs;
  523. unsigned int nr_pages = 0;
  524. struct page **pages = NULL;
  525. struct xen_dm_op_buf *xbufs = NULL;
  526. unsigned int i;
  527. long rc;
  528. if (copy_from_user(&kdata, udata, sizeof(kdata)))
  529. return -EFAULT;
  530. /* If restriction is in place, check the domid matches */
  531. if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
  532. return -EPERM;
  533. if (kdata.num == 0)
  534. return 0;
  535. if (kdata.num > privcmd_dm_op_max_num)
  536. return -E2BIG;
  537. kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
  538. if (!kbufs)
  539. return -ENOMEM;
  540. if (copy_from_user(kbufs, kdata.ubufs,
  541. sizeof(*kbufs) * kdata.num)) {
  542. rc = -EFAULT;
  543. goto out;
  544. }
  545. for (i = 0; i < kdata.num; i++) {
  546. if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
  547. rc = -E2BIG;
  548. goto out;
  549. }
  550. if (!access_ok(VERIFY_WRITE, kbufs[i].uptr,
  551. kbufs[i].size)) {
  552. rc = -EFAULT;
  553. goto out;
  554. }
  555. nr_pages += DIV_ROUND_UP(
  556. offset_in_page(kbufs[i].uptr) + kbufs[i].size,
  557. PAGE_SIZE);
  558. }
  559. pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
  560. if (!pages) {
  561. rc = -ENOMEM;
  562. goto out;
  563. }
  564. xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
  565. if (!xbufs) {
  566. rc = -ENOMEM;
  567. goto out;
  568. }
  569. rc = lock_pages(kbufs, kdata.num, pages, nr_pages);
  570. if (rc)
  571. goto out;
  572. for (i = 0; i < kdata.num; i++) {
  573. set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
  574. xbufs[i].size = kbufs[i].size;
  575. }
  576. xen_preemptible_hcall_begin();
  577. rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
  578. xen_preemptible_hcall_end();
  579. out:
  580. unlock_pages(pages, nr_pages);
  581. kfree(xbufs);
  582. kfree(pages);
  583. kfree(kbufs);
  584. return rc;
  585. }
  586. static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
  587. {
  588. struct privcmd_data *data = file->private_data;
  589. domid_t dom;
  590. if (copy_from_user(&dom, udata, sizeof(dom)))
  591. return -EFAULT;
  592. /* Set restriction to the specified domain, or check it matches */
  593. if (data->domid == DOMID_INVALID)
  594. data->domid = dom;
  595. else if (data->domid != dom)
  596. return -EINVAL;
  597. return 0;
  598. }
  599. static long privcmd_ioctl(struct file *file,
  600. unsigned int cmd, unsigned long data)
  601. {
  602. int ret = -ENOTTY;
  603. void __user *udata = (void __user *) data;
  604. switch (cmd) {
  605. case IOCTL_PRIVCMD_HYPERCALL:
  606. ret = privcmd_ioctl_hypercall(file, udata);
  607. break;
  608. case IOCTL_PRIVCMD_MMAP:
  609. ret = privcmd_ioctl_mmap(file, udata);
  610. break;
  611. case IOCTL_PRIVCMD_MMAPBATCH:
  612. ret = privcmd_ioctl_mmap_batch(file, udata, 1);
  613. break;
  614. case IOCTL_PRIVCMD_MMAPBATCH_V2:
  615. ret = privcmd_ioctl_mmap_batch(file, udata, 2);
  616. break;
  617. case IOCTL_PRIVCMD_DM_OP:
  618. ret = privcmd_ioctl_dm_op(file, udata);
  619. break;
  620. case IOCTL_PRIVCMD_RESTRICT:
  621. ret = privcmd_ioctl_restrict(file, udata);
  622. break;
  623. default:
  624. break;
  625. }
  626. return ret;
  627. }
  628. static int privcmd_open(struct inode *ino, struct file *file)
  629. {
  630. struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
  631. if (!data)
  632. return -ENOMEM;
  633. /* DOMID_INVALID implies no restriction */
  634. data->domid = DOMID_INVALID;
  635. file->private_data = data;
  636. return 0;
  637. }
  638. static int privcmd_release(struct inode *ino, struct file *file)
  639. {
  640. struct privcmd_data *data = file->private_data;
  641. kfree(data);
  642. return 0;
  643. }
  644. static void privcmd_close(struct vm_area_struct *vma)
  645. {
  646. struct page **pages = vma->vm_private_data;
  647. int numpgs = vma_pages(vma);
  648. int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
  649. int rc;
  650. if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
  651. return;
  652. rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
  653. if (rc == 0)
  654. free_xenballooned_pages(numpgs, pages);
  655. else
  656. pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
  657. numpgs, rc);
  658. kfree(pages);
  659. }
  660. static int privcmd_fault(struct vm_fault *vmf)
  661. {
  662. printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
  663. vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
  664. vmf->pgoff, (void *)vmf->address);
  665. return VM_FAULT_SIGBUS;
  666. }
  667. static const struct vm_operations_struct privcmd_vm_ops = {
  668. .close = privcmd_close,
  669. .fault = privcmd_fault
  670. };
  671. static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
  672. {
  673. /* DONTCOPY is essential for Xen because copy_page_range doesn't know
  674. * how to recreate these mappings */
  675. vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
  676. VM_DONTEXPAND | VM_DONTDUMP;
  677. vma->vm_ops = &privcmd_vm_ops;
  678. vma->vm_private_data = NULL;
  679. return 0;
  680. }
  681. /*
  682. * For MMAPBATCH*. This allows asserting the singleshot mapping
  683. * on a per pfn/pte basis. Mapping calls that fail with ENOENT
  684. * can be then retried until success.
  685. */
  686. static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
  687. unsigned long addr, void *data)
  688. {
  689. return pte_none(*pte) ? 0 : -EBUSY;
  690. }
  691. static int privcmd_vma_range_is_mapped(
  692. struct vm_area_struct *vma,
  693. unsigned long addr,
  694. unsigned long nr_pages)
  695. {
  696. return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
  697. is_mapped_fn, NULL) != 0;
  698. }
  699. const struct file_operations xen_privcmd_fops = {
  700. .owner = THIS_MODULE,
  701. .unlocked_ioctl = privcmd_ioctl,
  702. .open = privcmd_open,
  703. .release = privcmd_release,
  704. .mmap = privcmd_mmap,
  705. };
  706. EXPORT_SYMBOL_GPL(xen_privcmd_fops);
  707. static struct miscdevice privcmd_dev = {
  708. .minor = MISC_DYNAMIC_MINOR,
  709. .name = "xen/privcmd",
  710. .fops = &xen_privcmd_fops,
  711. };
  712. static int __init privcmd_init(void)
  713. {
  714. int err;
  715. if (!xen_domain())
  716. return -ENODEV;
  717. err = misc_register(&privcmd_dev);
  718. if (err != 0) {
  719. pr_err("Could not register Xen privcmd device\n");
  720. return err;
  721. }
  722. return 0;
  723. }
  724. static void __exit privcmd_exit(void)
  725. {
  726. misc_deregister(&privcmd_dev);
  727. }
  728. module_init(privcmd_init);
  729. module_exit(privcmd_exit);