vboxguest_utils.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
  2. /*
  3. * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
  4. * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
  5. *
  6. * Copyright (C) 2006-2016 Oracle Corporation
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/sizes.h>
  13. #include <linux/slab.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/vbox_err.h>
  17. #include <linux/vbox_utils.h>
  18. #include "vboxguest_core.h"
  19. /* Get the pointer to the first parameter of a HGCM call request. */
  20. #define VMMDEV_HGCM_CALL_PARMS(a) \
  21. ((struct vmmdev_hgcm_function_parameter *)( \
  22. (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
  23. /* The max parameter buffer size for a user request. */
  24. #define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
  25. /* The max parameter buffer size for a kernel request. */
  26. #define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
  27. #define VBG_DEBUG_PORT 0x504
  28. /* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
  29. static DEFINE_SPINLOCK(vbg_log_lock);
  30. static char vbg_log_buf[128];
  31. #define VBG_LOG(name, pr_func) \
  32. void name(const char *fmt, ...) \
  33. { \
  34. unsigned long flags; \
  35. va_list args; \
  36. int i, count; \
  37. \
  38. va_start(args, fmt); \
  39. spin_lock_irqsave(&vbg_log_lock, flags); \
  40. \
  41. count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
  42. for (i = 0; i < count; i++) \
  43. outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
  44. \
  45. pr_func("%s", vbg_log_buf); \
  46. \
  47. spin_unlock_irqrestore(&vbg_log_lock, flags); \
  48. va_end(args); \
  49. } \
  50. EXPORT_SYMBOL(name)
  51. VBG_LOG(vbg_info, pr_info);
  52. VBG_LOG(vbg_warn, pr_warn);
  53. VBG_LOG(vbg_err, pr_err);
  54. #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
  55. VBG_LOG(vbg_debug, pr_debug);
  56. #endif
  57. void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
  58. {
  59. struct vmmdev_request_header *req;
  60. int order = get_order(PAGE_ALIGN(len));
  61. req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
  62. if (!req)
  63. return NULL;
  64. memset(req, 0xaa, len);
  65. req->size = len;
  66. req->version = VMMDEV_REQUEST_HEADER_VERSION;
  67. req->request_type = req_type;
  68. req->rc = VERR_GENERAL_FAILURE;
  69. req->reserved1 = 0;
  70. req->reserved2 = 0;
  71. return req;
  72. }
  73. void vbg_req_free(void *req, size_t len)
  74. {
  75. if (!req)
  76. return;
  77. free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
  78. }
  79. /* Note this function returns a VBox status code, not a negative errno!! */
  80. int vbg_req_perform(struct vbg_dev *gdev, void *req)
  81. {
  82. unsigned long phys_req = virt_to_phys(req);
  83. outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
  84. /*
  85. * The host changes the request as a result of the outl, make sure
  86. * the outl and any reads of the req happen in the correct order.
  87. */
  88. mb();
  89. return ((struct vmmdev_request_header *)req)->rc;
  90. }
  91. static bool hgcm_req_done(struct vbg_dev *gdev,
  92. struct vmmdev_hgcmreq_header *header)
  93. {
  94. unsigned long flags;
  95. bool done;
  96. spin_lock_irqsave(&gdev->event_spinlock, flags);
  97. done = header->flags & VMMDEV_HGCM_REQ_DONE;
  98. spin_unlock_irqrestore(&gdev->event_spinlock, flags);
  99. return done;
  100. }
  101. int vbg_hgcm_connect(struct vbg_dev *gdev,
  102. struct vmmdev_hgcm_service_location *loc,
  103. u32 *client_id, int *vbox_status)
  104. {
  105. struct vmmdev_hgcm_connect *hgcm_connect = NULL;
  106. int rc;
  107. hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
  108. VMMDEVREQ_HGCM_CONNECT);
  109. if (!hgcm_connect)
  110. return -ENOMEM;
  111. hgcm_connect->header.flags = 0;
  112. memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
  113. hgcm_connect->client_id = 0;
  114. rc = vbg_req_perform(gdev, hgcm_connect);
  115. if (rc == VINF_HGCM_ASYNC_EXECUTE)
  116. wait_event(gdev->hgcm_wq,
  117. hgcm_req_done(gdev, &hgcm_connect->header));
  118. if (rc >= 0) {
  119. *client_id = hgcm_connect->client_id;
  120. rc = hgcm_connect->header.result;
  121. }
  122. vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
  123. *vbox_status = rc;
  124. return 0;
  125. }
  126. EXPORT_SYMBOL(vbg_hgcm_connect);
  127. int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
  128. {
  129. struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
  130. int rc;
  131. hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
  132. VMMDEVREQ_HGCM_DISCONNECT);
  133. if (!hgcm_disconnect)
  134. return -ENOMEM;
  135. hgcm_disconnect->header.flags = 0;
  136. hgcm_disconnect->client_id = client_id;
  137. rc = vbg_req_perform(gdev, hgcm_disconnect);
  138. if (rc == VINF_HGCM_ASYNC_EXECUTE)
  139. wait_event(gdev->hgcm_wq,
  140. hgcm_req_done(gdev, &hgcm_disconnect->header));
  141. if (rc >= 0)
  142. rc = hgcm_disconnect->header.result;
  143. vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
  144. *vbox_status = rc;
  145. return 0;
  146. }
  147. EXPORT_SYMBOL(vbg_hgcm_disconnect);
  148. static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
  149. {
  150. u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
  151. return size >> PAGE_SHIFT;
  152. }
  153. static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
  154. {
  155. u32 page_count;
  156. page_count = hgcm_call_buf_size_in_pages(buf, len);
  157. *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
  158. }
  159. static int hgcm_call_preprocess_linaddr(
  160. const struct vmmdev_hgcm_function_parameter *src_parm,
  161. void **bounce_buf_ret, size_t *extra)
  162. {
  163. void *buf, *bounce_buf;
  164. bool copy_in;
  165. u32 len;
  166. int ret;
  167. buf = (void *)src_parm->u.pointer.u.linear_addr;
  168. len = src_parm->u.pointer.size;
  169. copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
  170. if (len > VBG_MAX_HGCM_USER_PARM)
  171. return -E2BIG;
  172. bounce_buf = kvmalloc(len, GFP_KERNEL);
  173. if (!bounce_buf)
  174. return -ENOMEM;
  175. if (copy_in) {
  176. ret = copy_from_user(bounce_buf, (void __user *)buf, len);
  177. if (ret)
  178. return -EFAULT;
  179. } else {
  180. memset(bounce_buf, 0, len);
  181. }
  182. *bounce_buf_ret = bounce_buf;
  183. hgcm_call_add_pagelist_size(bounce_buf, len, extra);
  184. return 0;
  185. }
  186. /**
  187. * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
  188. * figure out how much extra storage we need for page lists.
  189. * Return: 0 or negative errno value.
  190. * @src_parm: Pointer to source function call parameters
  191. * @parm_count: Number of function call parameters.
  192. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
  193. * @extra: Where to return the extra request space needed for
  194. * physical page lists.
  195. */
  196. static int hgcm_call_preprocess(
  197. const struct vmmdev_hgcm_function_parameter *src_parm,
  198. u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
  199. {
  200. void *buf, **bounce_bufs = NULL;
  201. u32 i, len;
  202. int ret;
  203. for (i = 0; i < parm_count; i++, src_parm++) {
  204. switch (src_parm->type) {
  205. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  206. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  207. break;
  208. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  209. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  210. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  211. if (!bounce_bufs) {
  212. bounce_bufs = kcalloc(parm_count,
  213. sizeof(void *),
  214. GFP_KERNEL);
  215. if (!bounce_bufs)
  216. return -ENOMEM;
  217. *bounce_bufs_ret = bounce_bufs;
  218. }
  219. ret = hgcm_call_preprocess_linaddr(src_parm,
  220. &bounce_bufs[i],
  221. extra);
  222. if (ret)
  223. return ret;
  224. break;
  225. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  226. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  227. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  228. buf = (void *)src_parm->u.pointer.u.linear_addr;
  229. len = src_parm->u.pointer.size;
  230. if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
  231. return -E2BIG;
  232. hgcm_call_add_pagelist_size(buf, len, extra);
  233. break;
  234. default:
  235. return -EINVAL;
  236. }
  237. }
  238. return 0;
  239. }
  240. /**
  241. * Translates linear address types to page list direction flags.
  242. *
  243. * Return: page list flags.
  244. * @type: The type.
  245. */
  246. static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
  247. enum vmmdev_hgcm_function_parameter_type type)
  248. {
  249. switch (type) {
  250. default:
  251. WARN_ON(1);
  252. /* Fall through */
  253. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  254. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  255. return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
  256. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  257. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  258. return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
  259. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  260. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  261. return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
  262. }
  263. }
  264. static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
  265. struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
  266. enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
  267. {
  268. struct vmmdev_hgcm_pagelist *dst_pg_lst;
  269. struct page *page;
  270. bool is_vmalloc;
  271. u32 i, page_count;
  272. dst_parm->type = type;
  273. if (len == 0) {
  274. dst_parm->u.pointer.size = 0;
  275. dst_parm->u.pointer.u.linear_addr = 0;
  276. return;
  277. }
  278. dst_pg_lst = (void *)call + *off_extra;
  279. page_count = hgcm_call_buf_size_in_pages(buf, len);
  280. is_vmalloc = is_vmalloc_addr(buf);
  281. dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
  282. dst_parm->u.page_list.size = len;
  283. dst_parm->u.page_list.offset = *off_extra;
  284. dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
  285. dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
  286. dst_pg_lst->page_count = page_count;
  287. for (i = 0; i < page_count; i++) {
  288. if (is_vmalloc)
  289. page = vmalloc_to_page(buf);
  290. else
  291. page = virt_to_page(buf);
  292. dst_pg_lst->pages[i] = page_to_phys(page);
  293. buf += PAGE_SIZE;
  294. }
  295. *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
  296. }
  297. /**
  298. * Initializes the call request that we're sending to the host.
  299. * @call: The call to initialize.
  300. * @client_id: The client ID of the caller.
  301. * @function: The function number of the function to call.
  302. * @src_parm: Pointer to source function call parameters.
  303. * @parm_count: Number of function call parameters.
  304. * @bounce_bufs: The bouncebuffer array.
  305. */
  306. static void hgcm_call_init_call(
  307. struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
  308. const struct vmmdev_hgcm_function_parameter *src_parm,
  309. u32 parm_count, void **bounce_bufs)
  310. {
  311. struct vmmdev_hgcm_function_parameter *dst_parm =
  312. VMMDEV_HGCM_CALL_PARMS(call);
  313. u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
  314. void *buf;
  315. call->header.flags = 0;
  316. call->header.result = VINF_SUCCESS;
  317. call->client_id = client_id;
  318. call->function = function;
  319. call->parm_count = parm_count;
  320. for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
  321. switch (src_parm->type) {
  322. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  323. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  324. *dst_parm = *src_parm;
  325. break;
  326. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  327. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  328. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  329. hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
  330. src_parm->u.pointer.size,
  331. src_parm->type, &off_extra);
  332. break;
  333. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  334. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  335. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  336. buf = (void *)src_parm->u.pointer.u.linear_addr;
  337. hgcm_call_init_linaddr(call, dst_parm, buf,
  338. src_parm->u.pointer.size,
  339. src_parm->type, &off_extra);
  340. break;
  341. default:
  342. WARN_ON(1);
  343. dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
  344. }
  345. }
  346. }
  347. /**
  348. * Tries to cancel a pending HGCM call.
  349. *
  350. * Return: VBox status code
  351. */
  352. static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
  353. {
  354. int rc;
  355. /*
  356. * We use a pre-allocated request for cancellations, which is
  357. * protected by cancel_req_mutex. This means that all cancellations
  358. * get serialized, this should be fine since they should be rare.
  359. */
  360. mutex_lock(&gdev->cancel_req_mutex);
  361. gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
  362. rc = vbg_req_perform(gdev, gdev->cancel_req);
  363. mutex_unlock(&gdev->cancel_req_mutex);
  364. if (rc == VERR_NOT_IMPLEMENTED) {
  365. call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
  366. call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
  367. rc = vbg_req_perform(gdev, call);
  368. if (rc == VERR_INVALID_PARAMETER)
  369. rc = VERR_NOT_FOUND;
  370. }
  371. if (rc >= 0)
  372. call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
  373. return rc;
  374. }
  375. /**
  376. * Performs the call and completion wait.
  377. * Return: 0 or negative errno value.
  378. * @gdev: The VBoxGuest device extension.
  379. * @call: The call to execute.
  380. * @timeout_ms: Timeout in ms.
  381. * @leak_it: Where to return the leak it / free it, indicator.
  382. * Cancellation fun.
  383. */
  384. static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
  385. u32 timeout_ms, bool *leak_it)
  386. {
  387. int rc, cancel_rc, ret;
  388. long timeout;
  389. *leak_it = false;
  390. rc = vbg_req_perform(gdev, call);
  391. /*
  392. * If the call failed, then pretend success. Upper layers will
  393. * interpret the result code in the packet.
  394. */
  395. if (rc < 0) {
  396. call->header.result = rc;
  397. return 0;
  398. }
  399. if (rc != VINF_HGCM_ASYNC_EXECUTE)
  400. return 0;
  401. /* Host decided to process the request asynchronously, wait for it */
  402. if (timeout_ms == U32_MAX)
  403. timeout = MAX_SCHEDULE_TIMEOUT;
  404. else
  405. timeout = msecs_to_jiffies(timeout_ms);
  406. timeout = wait_event_interruptible_timeout(
  407. gdev->hgcm_wq,
  408. hgcm_req_done(gdev, &call->header),
  409. timeout);
  410. /* timeout > 0 means hgcm_req_done has returned true, so success */
  411. if (timeout > 0)
  412. return 0;
  413. if (timeout == 0)
  414. ret = -ETIMEDOUT;
  415. else
  416. ret = -EINTR;
  417. /* Cancel the request */
  418. cancel_rc = hgcm_cancel_call(gdev, call);
  419. if (cancel_rc >= 0)
  420. return ret;
  421. /*
  422. * Failed to cancel, this should mean that the cancel has lost the
  423. * race with normal completion, wait while the host completes it.
  424. */
  425. if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
  426. timeout = msecs_to_jiffies(500);
  427. else
  428. timeout = msecs_to_jiffies(2000);
  429. timeout = wait_event_timeout(gdev->hgcm_wq,
  430. hgcm_req_done(gdev, &call->header),
  431. timeout);
  432. if (WARN_ON(timeout == 0)) {
  433. /* We really should never get here */
  434. vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
  435. __func__);
  436. *leak_it = true;
  437. return ret;
  438. }
  439. /* The call has completed normally after all */
  440. return 0;
  441. }
  442. /**
  443. * Copies the result of the call back to the caller info structure and user
  444. * buffers.
  445. * Return: 0 or negative errno value.
  446. * @call: HGCM call request.
  447. * @dst_parm: Pointer to function call parameters destination.
  448. * @parm_count: Number of function call parameters.
  449. * @bounce_bufs: The bouncebuffer array.
  450. */
  451. static int hgcm_call_copy_back_result(
  452. const struct vmmdev_hgcm_call *call,
  453. struct vmmdev_hgcm_function_parameter *dst_parm,
  454. u32 parm_count, void **bounce_bufs)
  455. {
  456. const struct vmmdev_hgcm_function_parameter *src_parm =
  457. VMMDEV_HGCM_CALL_PARMS(call);
  458. void __user *p;
  459. int ret;
  460. u32 i;
  461. /* Copy back parameters. */
  462. for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
  463. switch (dst_parm->type) {
  464. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  465. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  466. *dst_parm = *src_parm;
  467. break;
  468. case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
  469. dst_parm->u.page_list.size = src_parm->u.page_list.size;
  470. break;
  471. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  472. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
  473. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
  474. case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
  475. dst_parm->u.pointer.size = src_parm->u.pointer.size;
  476. break;
  477. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  478. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  479. dst_parm->u.pointer.size = src_parm->u.pointer.size;
  480. p = (void __user *)dst_parm->u.pointer.u.linear_addr;
  481. ret = copy_to_user(p, bounce_bufs[i],
  482. min(src_parm->u.pointer.size,
  483. dst_parm->u.pointer.size));
  484. if (ret)
  485. return -EFAULT;
  486. break;
  487. default:
  488. WARN_ON(1);
  489. return -EINVAL;
  490. }
  491. }
  492. return 0;
  493. }
  494. int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
  495. u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
  496. u32 parm_count, int *vbox_status)
  497. {
  498. struct vmmdev_hgcm_call *call;
  499. void **bounce_bufs = NULL;
  500. bool leak_it;
  501. size_t size;
  502. int i, ret;
  503. size = sizeof(struct vmmdev_hgcm_call) +
  504. parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
  505. /*
  506. * Validate and buffer the parameters for the call. This also increases
  507. * call_size with the amount of extra space needed for page lists.
  508. */
  509. ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
  510. if (ret) {
  511. /* Even on error bounce bufs may still have been allocated */
  512. goto free_bounce_bufs;
  513. }
  514. call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
  515. if (!call) {
  516. ret = -ENOMEM;
  517. goto free_bounce_bufs;
  518. }
  519. hgcm_call_init_call(call, client_id, function, parms, parm_count,
  520. bounce_bufs);
  521. ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
  522. if (ret == 0) {
  523. *vbox_status = call->header.result;
  524. ret = hgcm_call_copy_back_result(call, parms, parm_count,
  525. bounce_bufs);
  526. }
  527. if (!leak_it)
  528. vbg_req_free(call, size);
  529. free_bounce_bufs:
  530. if (bounce_bufs) {
  531. for (i = 0; i < parm_count; i++)
  532. kvfree(bounce_bufs[i]);
  533. kfree(bounce_bufs);
  534. }
  535. return ret;
  536. }
  537. EXPORT_SYMBOL(vbg_hgcm_call);
  538. #ifdef CONFIG_COMPAT
  539. int vbg_hgcm_call32(
  540. struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
  541. struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
  542. int *vbox_status)
  543. {
  544. struct vmmdev_hgcm_function_parameter *parm64 = NULL;
  545. u32 i, size;
  546. int ret = 0;
  547. /* KISS allocate a temporary request and convert the parameters. */
  548. size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
  549. parm64 = kzalloc(size, GFP_KERNEL);
  550. if (!parm64)
  551. return -ENOMEM;
  552. for (i = 0; i < parm_count; i++) {
  553. switch (parm32[i].type) {
  554. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  555. parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
  556. parm64[i].u.value32 = parm32[i].u.value32;
  557. break;
  558. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  559. parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
  560. parm64[i].u.value64 = parm32[i].u.value64;
  561. break;
  562. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  563. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  564. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  565. parm64[i].type = parm32[i].type;
  566. parm64[i].u.pointer.size = parm32[i].u.pointer.size;
  567. parm64[i].u.pointer.u.linear_addr =
  568. parm32[i].u.pointer.u.linear_addr;
  569. break;
  570. default:
  571. ret = -EINVAL;
  572. }
  573. if (ret < 0)
  574. goto out_free;
  575. }
  576. ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
  577. parm64, parm_count, vbox_status);
  578. if (ret < 0)
  579. goto out_free;
  580. /* Copy back. */
  581. for (i = 0; i < parm_count; i++, parm32++, parm64++) {
  582. switch (parm64[i].type) {
  583. case VMMDEV_HGCM_PARM_TYPE_32BIT:
  584. parm32[i].u.value32 = parm64[i].u.value32;
  585. break;
  586. case VMMDEV_HGCM_PARM_TYPE_64BIT:
  587. parm32[i].u.value64 = parm64[i].u.value64;
  588. break;
  589. case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
  590. case VMMDEV_HGCM_PARM_TYPE_LINADDR:
  591. case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
  592. parm32[i].u.pointer.size = parm64[i].u.pointer.size;
  593. break;
  594. default:
  595. WARN_ON(1);
  596. ret = -EINVAL;
  597. }
  598. }
  599. out_free:
  600. kfree(parm64);
  601. return ret;
  602. }
  603. #endif
  604. static const int vbg_status_code_to_errno_table[] = {
  605. [-VERR_ACCESS_DENIED] = -EPERM,
  606. [-VERR_FILE_NOT_FOUND] = -ENOENT,
  607. [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
  608. [-VERR_INTERRUPTED] = -EINTR,
  609. [-VERR_DEV_IO_ERROR] = -EIO,
  610. [-VERR_TOO_MUCH_DATA] = -E2BIG,
  611. [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
  612. [-VERR_INVALID_HANDLE] = -EBADF,
  613. [-VERR_TRY_AGAIN] = -EAGAIN,
  614. [-VERR_NO_MEMORY] = -ENOMEM,
  615. [-VERR_INVALID_POINTER] = -EFAULT,
  616. [-VERR_RESOURCE_BUSY] = -EBUSY,
  617. [-VERR_ALREADY_EXISTS] = -EEXIST,
  618. [-VERR_NOT_SAME_DEVICE] = -EXDEV,
  619. [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
  620. [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
  621. [-VERR_INVALID_NAME] = -ENOENT,
  622. [-VERR_IS_A_DIRECTORY] = -EISDIR,
  623. [-VERR_INVALID_PARAMETER] = -EINVAL,
  624. [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
  625. [-VERR_INVALID_FUNCTION] = -ENOTTY,
  626. [-VERR_SHARING_VIOLATION] = -ETXTBSY,
  627. [-VERR_FILE_TOO_BIG] = -EFBIG,
  628. [-VERR_DISK_FULL] = -ENOSPC,
  629. [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
  630. [-VERR_WRITE_PROTECT] = -EROFS,
  631. [-VERR_BROKEN_PIPE] = -EPIPE,
  632. [-VERR_DEADLOCK] = -EDEADLK,
  633. [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
  634. [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
  635. [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
  636. [-VERR_NOT_SUPPORTED] = -ENOSYS,
  637. [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
  638. [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
  639. [-VERR_NO_MORE_FILES] = -ENODATA,
  640. [-VERR_NO_DATA] = -ENODATA,
  641. [-VERR_NET_NO_NETWORK] = -ENONET,
  642. [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
  643. [-VERR_NO_TRANSLATION] = -EILSEQ,
  644. [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
  645. [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
  646. [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
  647. [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
  648. [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
  649. [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
  650. [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
  651. [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
  652. [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
  653. [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
  654. [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
  655. [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
  656. [-VERR_NET_DOWN] = -ENETDOWN,
  657. [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
  658. [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
  659. [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
  660. [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
  661. [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
  662. [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
  663. [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
  664. [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
  665. [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
  666. [-VERR_TIMEOUT] = -ETIMEDOUT,
  667. [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
  668. [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
  669. [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
  670. [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
  671. [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
  672. [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
  673. [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
  674. };
  675. int vbg_status_code_to_errno(int rc)
  676. {
  677. if (rc >= 0)
  678. return 0;
  679. rc = -rc;
  680. if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
  681. vbg_status_code_to_errno_table[rc] == 0) {
  682. vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
  683. return -EPROTO;
  684. }
  685. return vbg_status_code_to_errno_table[rc];
  686. }
  687. EXPORT_SYMBOL(vbg_status_code_to_errno);