stub_rx.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2003-2008 Takahiro Hirofuchi
  4. */
  5. #include <asm/byteorder.h>
  6. #include <linux/kthread.h>
  7. #include <linux/usb.h>
  8. #include <linux/usb/hcd.h>
  9. #include "usbip_common.h"
  10. #include "stub.h"
  11. static int is_clear_halt_cmd(struct urb *urb)
  12. {
  13. struct usb_ctrlrequest *req;
  14. req = (struct usb_ctrlrequest *) urb->setup_packet;
  15. return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
  16. (req->bRequestType == USB_RECIP_ENDPOINT) &&
  17. (req->wValue == USB_ENDPOINT_HALT);
  18. }
  19. static int is_set_interface_cmd(struct urb *urb)
  20. {
  21. struct usb_ctrlrequest *req;
  22. req = (struct usb_ctrlrequest *) urb->setup_packet;
  23. return (req->bRequest == USB_REQ_SET_INTERFACE) &&
  24. (req->bRequestType == USB_RECIP_INTERFACE);
  25. }
  26. static int is_set_configuration_cmd(struct urb *urb)
  27. {
  28. struct usb_ctrlrequest *req;
  29. req = (struct usb_ctrlrequest *) urb->setup_packet;
  30. return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
  31. (req->bRequestType == USB_RECIP_DEVICE);
  32. }
  33. static int is_reset_device_cmd(struct urb *urb)
  34. {
  35. struct usb_ctrlrequest *req;
  36. __u16 value;
  37. __u16 index;
  38. req = (struct usb_ctrlrequest *) urb->setup_packet;
  39. value = le16_to_cpu(req->wValue);
  40. index = le16_to_cpu(req->wIndex);
  41. if ((req->bRequest == USB_REQ_SET_FEATURE) &&
  42. (req->bRequestType == USB_RT_PORT) &&
  43. (value == USB_PORT_FEAT_RESET)) {
  44. usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
  45. return 1;
  46. } else
  47. return 0;
  48. }
  49. static int tweak_clear_halt_cmd(struct urb *urb)
  50. {
  51. struct usb_ctrlrequest *req;
  52. int target_endp;
  53. int target_dir;
  54. int target_pipe;
  55. int ret;
  56. req = (struct usb_ctrlrequest *) urb->setup_packet;
  57. /*
  58. * The stalled endpoint is specified in the wIndex value. The endpoint
  59. * of the urb is the target of this clear_halt request (i.e., control
  60. * endpoint).
  61. */
  62. target_endp = le16_to_cpu(req->wIndex) & 0x000f;
  63. /* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80. */
  64. target_dir = le16_to_cpu(req->wIndex) & 0x0080;
  65. if (target_dir)
  66. target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
  67. else
  68. target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
  69. ret = usb_clear_halt(urb->dev, target_pipe);
  70. if (ret < 0)
  71. dev_err(&urb->dev->dev,
  72. "usb_clear_halt error: devnum %d endp %d ret %d\n",
  73. urb->dev->devnum, target_endp, ret);
  74. else
  75. dev_info(&urb->dev->dev,
  76. "usb_clear_halt done: devnum %d endp %d\n",
  77. urb->dev->devnum, target_endp);
  78. return ret;
  79. }
  80. static int tweak_set_interface_cmd(struct urb *urb)
  81. {
  82. struct usb_ctrlrequest *req;
  83. __u16 alternate;
  84. __u16 interface;
  85. int ret;
  86. req = (struct usb_ctrlrequest *) urb->setup_packet;
  87. alternate = le16_to_cpu(req->wValue);
  88. interface = le16_to_cpu(req->wIndex);
  89. usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
  90. interface, alternate);
  91. ret = usb_set_interface(urb->dev, interface, alternate);
  92. if (ret < 0)
  93. dev_err(&urb->dev->dev,
  94. "usb_set_interface error: inf %u alt %u ret %d\n",
  95. interface, alternate, ret);
  96. else
  97. dev_info(&urb->dev->dev,
  98. "usb_set_interface done: inf %u alt %u\n",
  99. interface, alternate);
  100. return ret;
  101. }
  102. static int tweak_set_configuration_cmd(struct urb *urb)
  103. {
  104. struct stub_priv *priv = (struct stub_priv *) urb->context;
  105. struct stub_device *sdev = priv->sdev;
  106. struct usb_ctrlrequest *req;
  107. __u16 config;
  108. int err;
  109. req = (struct usb_ctrlrequest *) urb->setup_packet;
  110. config = le16_to_cpu(req->wValue);
  111. err = usb_set_configuration(sdev->udev, config);
  112. if (err && err != -ENODEV)
  113. dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
  114. config, err);
  115. return 0;
  116. }
  117. static int tweak_reset_device_cmd(struct urb *urb)
  118. {
  119. struct stub_priv *priv = (struct stub_priv *) urb->context;
  120. struct stub_device *sdev = priv->sdev;
  121. dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
  122. if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
  123. dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
  124. return 0;
  125. }
  126. usb_reset_device(sdev->udev);
  127. usb_unlock_device(sdev->udev);
  128. return 0;
  129. }
  130. /*
  131. * clear_halt, set_interface, and set_configuration require special tricks.
  132. */
  133. static void tweak_special_requests(struct urb *urb)
  134. {
  135. if (!urb || !urb->setup_packet)
  136. return;
  137. if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
  138. return;
  139. if (is_clear_halt_cmd(urb))
  140. /* tweak clear_halt */
  141. tweak_clear_halt_cmd(urb);
  142. else if (is_set_interface_cmd(urb))
  143. /* tweak set_interface */
  144. tweak_set_interface_cmd(urb);
  145. else if (is_set_configuration_cmd(urb))
  146. /* tweak set_configuration */
  147. tweak_set_configuration_cmd(urb);
  148. else if (is_reset_device_cmd(urb))
  149. tweak_reset_device_cmd(urb);
  150. else
  151. usbip_dbg_stub_rx("no need to tweak\n");
  152. }
  153. /*
  154. * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
  155. * By unlinking the urb asynchronously, stub_rx can continuously
  156. * process coming urbs. Even if the urb is unlinked, its completion
  157. * handler will be called and stub_tx will send a return pdu.
  158. *
  159. * See also comments about unlinking strategy in vhci_hcd.c.
  160. */
  161. static int stub_recv_cmd_unlink(struct stub_device *sdev,
  162. struct usbip_header *pdu)
  163. {
  164. int ret;
  165. unsigned long flags;
  166. struct stub_priv *priv;
  167. spin_lock_irqsave(&sdev->priv_lock, flags);
  168. list_for_each_entry(priv, &sdev->priv_init, list) {
  169. if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
  170. continue;
  171. /*
  172. * This matched urb is not completed yet (i.e., be in
  173. * flight in usb hcd hardware/driver). Now we are
  174. * cancelling it. The unlinking flag means that we are
  175. * now not going to return the normal result pdu of a
  176. * submission request, but going to return a result pdu
  177. * of the unlink request.
  178. */
  179. priv->unlinking = 1;
  180. /*
  181. * In the case that unlinking flag is on, prev->seqnum
  182. * is changed from the seqnum of the cancelling urb to
  183. * the seqnum of the unlink request. This will be used
  184. * to make the result pdu of the unlink request.
  185. */
  186. priv->seqnum = pdu->base.seqnum;
  187. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  188. /*
  189. * usb_unlink_urb() is now out of spinlocking to avoid
  190. * spinlock recursion since stub_complete() is
  191. * sometimes called in this context but not in the
  192. * interrupt context. If stub_complete() is executed
  193. * before we call usb_unlink_urb(), usb_unlink_urb()
  194. * will return an error value. In this case, stub_tx
  195. * will return the result pdu of this unlink request
  196. * though submission is completed and actual unlinking
  197. * is not executed. OK?
  198. */
  199. /* In the above case, urb->status is not -ECONNRESET,
  200. * so a driver in a client host will know the failure
  201. * of the unlink request ?
  202. */
  203. ret = usb_unlink_urb(priv->urb);
  204. if (ret != -EINPROGRESS)
  205. dev_err(&priv->urb->dev->dev,
  206. "failed to unlink a urb # %lu, ret %d\n",
  207. priv->seqnum, ret);
  208. return 0;
  209. }
  210. usbip_dbg_stub_rx("seqnum %d is not pending\n",
  211. pdu->u.cmd_unlink.seqnum);
  212. /*
  213. * The urb of the unlink target is not found in priv_init queue. It was
  214. * already completed and its results is/was going to be sent by a
  215. * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
  216. * return the completeness of this unlink request to vhci_hcd.
  217. */
  218. stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
  219. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  220. return 0;
  221. }
  222. static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
  223. {
  224. struct usbip_device *ud = &sdev->ud;
  225. int valid = 0;
  226. if (pdu->base.devid == sdev->devid) {
  227. spin_lock_irq(&ud->lock);
  228. if (ud->status == SDEV_ST_USED) {
  229. /* A request is valid. */
  230. valid = 1;
  231. }
  232. spin_unlock_irq(&ud->lock);
  233. }
  234. return valid;
  235. }
  236. static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
  237. struct usbip_header *pdu)
  238. {
  239. struct stub_priv *priv;
  240. struct usbip_device *ud = &sdev->ud;
  241. unsigned long flags;
  242. spin_lock_irqsave(&sdev->priv_lock, flags);
  243. priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
  244. if (!priv) {
  245. dev_err(&sdev->udev->dev, "alloc stub_priv\n");
  246. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  247. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  248. return NULL;
  249. }
  250. priv->seqnum = pdu->base.seqnum;
  251. priv->sdev = sdev;
  252. /*
  253. * After a stub_priv is linked to a list_head,
  254. * our error handler can free allocated data.
  255. */
  256. list_add_tail(&priv->list, &sdev->priv_init);
  257. spin_unlock_irqrestore(&sdev->priv_lock, flags);
  258. return priv;
  259. }
  260. static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
  261. {
  262. struct usb_device *udev = sdev->udev;
  263. struct usb_host_endpoint *ep;
  264. struct usb_endpoint_descriptor *epd = NULL;
  265. int epnum = pdu->base.ep;
  266. int dir = pdu->base.direction;
  267. if (epnum < 0 || epnum > 15)
  268. goto err_ret;
  269. if (dir == USBIP_DIR_IN)
  270. ep = udev->ep_in[epnum & 0x7f];
  271. else
  272. ep = udev->ep_out[epnum & 0x7f];
  273. if (!ep)
  274. goto err_ret;
  275. epd = &ep->desc;
  276. if (usb_endpoint_xfer_control(epd)) {
  277. if (dir == USBIP_DIR_OUT)
  278. return usb_sndctrlpipe(udev, epnum);
  279. else
  280. return usb_rcvctrlpipe(udev, epnum);
  281. }
  282. if (usb_endpoint_xfer_bulk(epd)) {
  283. if (dir == USBIP_DIR_OUT)
  284. return usb_sndbulkpipe(udev, epnum);
  285. else
  286. return usb_rcvbulkpipe(udev, epnum);
  287. }
  288. if (usb_endpoint_xfer_int(epd)) {
  289. if (dir == USBIP_DIR_OUT)
  290. return usb_sndintpipe(udev, epnum);
  291. else
  292. return usb_rcvintpipe(udev, epnum);
  293. }
  294. if (usb_endpoint_xfer_isoc(epd)) {
  295. /* validate number of packets */
  296. if (pdu->u.cmd_submit.number_of_packets < 0 ||
  297. pdu->u.cmd_submit.number_of_packets >
  298. USBIP_MAX_ISO_PACKETS) {
  299. dev_err(&sdev->udev->dev,
  300. "CMD_SUBMIT: isoc invalid num packets %d\n",
  301. pdu->u.cmd_submit.number_of_packets);
  302. return -1;
  303. }
  304. if (dir == USBIP_DIR_OUT)
  305. return usb_sndisocpipe(udev, epnum);
  306. else
  307. return usb_rcvisocpipe(udev, epnum);
  308. }
  309. err_ret:
  310. /* NOT REACHED */
  311. dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
  312. return -1;
  313. }
  314. static void masking_bogus_flags(struct urb *urb)
  315. {
  316. int xfertype;
  317. struct usb_device *dev;
  318. struct usb_host_endpoint *ep;
  319. int is_out;
  320. unsigned int allowed;
  321. if (!urb || urb->hcpriv || !urb->complete)
  322. return;
  323. dev = urb->dev;
  324. if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
  325. return;
  326. ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
  327. [usb_pipeendpoint(urb->pipe)];
  328. if (!ep)
  329. return;
  330. xfertype = usb_endpoint_type(&ep->desc);
  331. if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
  332. struct usb_ctrlrequest *setup =
  333. (struct usb_ctrlrequest *) urb->setup_packet;
  334. if (!setup)
  335. return;
  336. is_out = !(setup->bRequestType & USB_DIR_IN) ||
  337. !setup->wLength;
  338. } else {
  339. is_out = usb_endpoint_dir_out(&ep->desc);
  340. }
  341. /* enforce simple/standard policy */
  342. allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
  343. URB_DIR_MASK | URB_FREE_BUFFER);
  344. switch (xfertype) {
  345. case USB_ENDPOINT_XFER_BULK:
  346. if (is_out)
  347. allowed |= URB_ZERO_PACKET;
  348. /* FALLTHROUGH */
  349. default: /* all non-iso endpoints */
  350. if (!is_out)
  351. allowed |= URB_SHORT_NOT_OK;
  352. break;
  353. case USB_ENDPOINT_XFER_ISOC:
  354. allowed |= URB_ISO_ASAP;
  355. break;
  356. }
  357. urb->transfer_flags &= allowed;
  358. }
  359. static void stub_recv_cmd_submit(struct stub_device *sdev,
  360. struct usbip_header *pdu)
  361. {
  362. int ret;
  363. struct stub_priv *priv;
  364. struct usbip_device *ud = &sdev->ud;
  365. struct usb_device *udev = sdev->udev;
  366. int pipe = get_pipe(sdev, pdu);
  367. if (pipe == -1)
  368. return;
  369. priv = stub_priv_alloc(sdev, pdu);
  370. if (!priv)
  371. return;
  372. /* setup a urb */
  373. if (usb_pipeisoc(pipe))
  374. priv->urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
  375. GFP_KERNEL);
  376. else
  377. priv->urb = usb_alloc_urb(0, GFP_KERNEL);
  378. if (!priv->urb) {
  379. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  380. return;
  381. }
  382. /* allocate urb transfer buffer, if needed */
  383. if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
  384. priv->urb->transfer_buffer =
  385. kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
  386. GFP_KERNEL);
  387. if (!priv->urb->transfer_buffer) {
  388. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  389. return;
  390. }
  391. }
  392. /* copy urb setup packet */
  393. priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
  394. GFP_KERNEL);
  395. if (!priv->urb->setup_packet) {
  396. dev_err(&udev->dev, "allocate setup_packet\n");
  397. usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
  398. return;
  399. }
  400. /* set other members from the base header of pdu */
  401. priv->urb->context = (void *) priv;
  402. priv->urb->dev = udev;
  403. priv->urb->pipe = pipe;
  404. priv->urb->complete = stub_complete;
  405. usbip_pack_pdu(pdu, priv->urb, USBIP_CMD_SUBMIT, 0);
  406. if (usbip_recv_xbuff(ud, priv->urb) < 0)
  407. return;
  408. if (usbip_recv_iso(ud, priv->urb) < 0)
  409. return;
  410. /* no need to submit an intercepted request, but harmless? */
  411. tweak_special_requests(priv->urb);
  412. masking_bogus_flags(priv->urb);
  413. /* urb is now ready to submit */
  414. ret = usb_submit_urb(priv->urb, GFP_KERNEL);
  415. if (ret == 0)
  416. usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
  417. pdu->base.seqnum);
  418. else {
  419. dev_err(&udev->dev, "submit_urb error, %d\n", ret);
  420. usbip_dump_header(pdu);
  421. usbip_dump_urb(priv->urb);
  422. /*
  423. * Pessimistic.
  424. * This connection will be discarded.
  425. */
  426. usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
  427. }
  428. usbip_dbg_stub_rx("Leave\n");
  429. }
  430. /* recv a pdu */
  431. static void stub_rx_pdu(struct usbip_device *ud)
  432. {
  433. int ret;
  434. struct usbip_header pdu;
  435. struct stub_device *sdev = container_of(ud, struct stub_device, ud);
  436. struct device *dev = &sdev->udev->dev;
  437. usbip_dbg_stub_rx("Enter\n");
  438. memset(&pdu, 0, sizeof(pdu));
  439. /* receive a pdu header */
  440. ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
  441. if (ret != sizeof(pdu)) {
  442. dev_err(dev, "recv a header, %d\n", ret);
  443. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  444. return;
  445. }
  446. usbip_header_correct_endian(&pdu, 0);
  447. if (usbip_dbg_flag_stub_rx)
  448. usbip_dump_header(&pdu);
  449. if (!valid_request(sdev, &pdu)) {
  450. dev_err(dev, "recv invalid request\n");
  451. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  452. return;
  453. }
  454. switch (pdu.base.command) {
  455. case USBIP_CMD_UNLINK:
  456. stub_recv_cmd_unlink(sdev, &pdu);
  457. break;
  458. case USBIP_CMD_SUBMIT:
  459. stub_recv_cmd_submit(sdev, &pdu);
  460. break;
  461. default:
  462. /* NOTREACHED */
  463. dev_err(dev, "unknown pdu\n");
  464. usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
  465. break;
  466. }
  467. }
  468. int stub_rx_loop(void *data)
  469. {
  470. struct usbip_device *ud = data;
  471. while (!kthread_should_stop()) {
  472. if (usbip_event_happened(ud))
  473. break;
  474. stub_rx_pdu(ud);
  475. }
  476. return 0;
  477. }