udl_main.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * Copyright (C) 2012 Red Hat
  3. *
  4. * based in parts on udlfb.c:
  5. * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
  6. * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
  7. * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License v2. See the file COPYING in the main directory of this archive for
  11. * more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include <drm/drm_crtc_helper.h>
  15. #include "udl_drv.h"
  16. /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
  17. #define BULK_SIZE 512
  18. #define NR_USB_REQUEST_CHANNEL 0x12
  19. #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
  20. #define WRITES_IN_FLIGHT (4)
  21. #define MAX_VENDOR_DESCRIPTOR_SIZE 256
  22. #define GET_URB_TIMEOUT HZ
  23. #define FREE_URB_TIMEOUT (HZ*2)
  24. static int udl_parse_vendor_descriptor(struct drm_device *dev,
  25. struct usb_device *usbdev)
  26. {
  27. struct udl_device *udl = dev->dev_private;
  28. char *desc;
  29. char *buf;
  30. char *desc_end;
  31. u8 total_len = 0;
  32. buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
  33. if (!buf)
  34. return false;
  35. desc = buf;
  36. total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
  37. 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
  38. if (total_len > 5) {
  39. DRM_INFO("vendor descriptor length:%x data:%11ph\n",
  40. total_len, desc);
  41. if ((desc[0] != total_len) || /* descriptor length */
  42. (desc[1] != 0x5f) || /* vendor descriptor type */
  43. (desc[2] != 0x01) || /* version (2 bytes) */
  44. (desc[3] != 0x00) ||
  45. (desc[4] != total_len - 2)) /* length after type */
  46. goto unrecognized;
  47. desc_end = desc + total_len;
  48. desc += 5; /* the fixed header we've already parsed */
  49. while (desc < desc_end) {
  50. u8 length;
  51. u16 key;
  52. key = le16_to_cpu(*((u16 *) desc));
  53. desc += sizeof(u16);
  54. length = *desc;
  55. desc++;
  56. switch (key) {
  57. case 0x0200: { /* max_area */
  58. u32 max_area;
  59. max_area = le32_to_cpu(*((u32 *)desc));
  60. DRM_DEBUG("DL chip limited to %d pixel modes\n",
  61. max_area);
  62. udl->sku_pixel_limit = max_area;
  63. break;
  64. }
  65. default:
  66. break;
  67. }
  68. desc += length;
  69. }
  70. }
  71. goto success;
  72. unrecognized:
  73. /* allow udlfb to load for now even if firmware unrecognized */
  74. DRM_ERROR("Unrecognized vendor firmware descriptor\n");
  75. success:
  76. kfree(buf);
  77. return true;
  78. }
  79. /*
  80. * Need to ensure a channel is selected before submitting URBs
  81. */
  82. static int udl_select_std_channel(struct udl_device *udl)
  83. {
  84. int ret;
  85. static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
  86. 0x1C, 0x88, 0x5E, 0x15,
  87. 0x60, 0xFE, 0xC6, 0x97,
  88. 0x16, 0x3D, 0x47, 0xF2};
  89. void *sendbuf;
  90. sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
  91. if (!sendbuf)
  92. return -ENOMEM;
  93. ret = usb_control_msg(udl->udev,
  94. usb_sndctrlpipe(udl->udev, 0),
  95. NR_USB_REQUEST_CHANNEL,
  96. (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
  97. sendbuf, sizeof(set_def_chn),
  98. USB_CTRL_SET_TIMEOUT);
  99. kfree(sendbuf);
  100. return ret < 0 ? ret : 0;
  101. }
  102. static void udl_release_urb_work(struct work_struct *work)
  103. {
  104. struct urb_node *unode = container_of(work, struct urb_node,
  105. release_urb_work.work);
  106. up(&unode->dev->urbs.limit_sem);
  107. }
  108. void udl_urb_completion(struct urb *urb)
  109. {
  110. struct urb_node *unode = urb->context;
  111. struct udl_device *udl = unode->dev;
  112. unsigned long flags;
  113. /* sync/async unlink faults aren't errors */
  114. if (urb->status) {
  115. if (!(urb->status == -ENOENT ||
  116. urb->status == -ECONNRESET ||
  117. urb->status == -ESHUTDOWN)) {
  118. DRM_ERROR("%s - nonzero write bulk status received: %d\n",
  119. __func__, urb->status);
  120. atomic_set(&udl->lost_pixels, 1);
  121. }
  122. }
  123. urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
  124. spin_lock_irqsave(&udl->urbs.lock, flags);
  125. list_add_tail(&unode->entry, &udl->urbs.list);
  126. udl->urbs.available++;
  127. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  128. #if 0
  129. /*
  130. * When using fb_defio, we deadlock if up() is called
  131. * while another is waiting. So queue to another process.
  132. */
  133. if (fb_defio)
  134. schedule_delayed_work(&unode->release_urb_work, 0);
  135. else
  136. #endif
  137. up(&udl->urbs.limit_sem);
  138. }
  139. static void udl_free_urb_list(struct drm_device *dev)
  140. {
  141. struct udl_device *udl = dev->dev_private;
  142. int count = udl->urbs.count;
  143. struct list_head *node;
  144. struct urb_node *unode;
  145. struct urb *urb;
  146. int ret;
  147. unsigned long flags;
  148. DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
  149. /* keep waiting and freeing, until we've got 'em all */
  150. while (count--) {
  151. /* Getting interrupted means a leak, but ok at shutdown*/
  152. ret = down_interruptible(&udl->urbs.limit_sem);
  153. if (ret)
  154. break;
  155. spin_lock_irqsave(&udl->urbs.lock, flags);
  156. node = udl->urbs.list.next; /* have reserved one with sem */
  157. list_del_init(node);
  158. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  159. unode = list_entry(node, struct urb_node, entry);
  160. urb = unode->urb;
  161. /* Free each separately allocated piece */
  162. usb_free_coherent(urb->dev, udl->urbs.size,
  163. urb->transfer_buffer, urb->transfer_dma);
  164. usb_free_urb(urb);
  165. kfree(node);
  166. }
  167. udl->urbs.count = 0;
  168. }
  169. static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
  170. {
  171. struct udl_device *udl = dev->dev_private;
  172. int i = 0;
  173. struct urb *urb;
  174. struct urb_node *unode;
  175. char *buf;
  176. spin_lock_init(&udl->urbs.lock);
  177. udl->urbs.size = size;
  178. INIT_LIST_HEAD(&udl->urbs.list);
  179. while (i < count) {
  180. unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
  181. if (!unode)
  182. break;
  183. unode->dev = udl;
  184. INIT_DELAYED_WORK(&unode->release_urb_work,
  185. udl_release_urb_work);
  186. urb = usb_alloc_urb(0, GFP_KERNEL);
  187. if (!urb) {
  188. kfree(unode);
  189. break;
  190. }
  191. unode->urb = urb;
  192. buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
  193. &urb->transfer_dma);
  194. if (!buf) {
  195. kfree(unode);
  196. usb_free_urb(urb);
  197. break;
  198. }
  199. /* urb->transfer_buffer_length set to actual before submit */
  200. usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
  201. buf, size, udl_urb_completion, unode);
  202. urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  203. list_add_tail(&unode->entry, &udl->urbs.list);
  204. i++;
  205. }
  206. sema_init(&udl->urbs.limit_sem, i);
  207. udl->urbs.count = i;
  208. udl->urbs.available = i;
  209. DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
  210. return i;
  211. }
  212. struct urb *udl_get_urb(struct drm_device *dev)
  213. {
  214. struct udl_device *udl = dev->dev_private;
  215. int ret = 0;
  216. struct list_head *entry;
  217. struct urb_node *unode;
  218. struct urb *urb = NULL;
  219. unsigned long flags;
  220. /* Wait for an in-flight buffer to complete and get re-queued */
  221. ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
  222. if (ret) {
  223. atomic_set(&udl->lost_pixels, 1);
  224. DRM_INFO("wait for urb interrupted: %x available: %d\n",
  225. ret, udl->urbs.available);
  226. goto error;
  227. }
  228. spin_lock_irqsave(&udl->urbs.lock, flags);
  229. BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
  230. entry = udl->urbs.list.next;
  231. list_del_init(entry);
  232. udl->urbs.available--;
  233. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  234. unode = list_entry(entry, struct urb_node, entry);
  235. urb = unode->urb;
  236. error:
  237. return urb;
  238. }
  239. int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
  240. {
  241. struct udl_device *udl = dev->dev_private;
  242. int ret;
  243. BUG_ON(len > udl->urbs.size);
  244. urb->transfer_buffer_length = len; /* set to actual payload len */
  245. ret = usb_submit_urb(urb, GFP_ATOMIC);
  246. if (ret) {
  247. udl_urb_completion(urb); /* because no one else will */
  248. atomic_set(&udl->lost_pixels, 1);
  249. DRM_ERROR("usb_submit_urb error %x\n", ret);
  250. }
  251. return ret;
  252. }
  253. int udl_driver_load(struct drm_device *dev, unsigned long flags)
  254. {
  255. struct usb_device *udev = (void*)flags;
  256. struct udl_device *udl;
  257. int ret = -ENOMEM;
  258. DRM_DEBUG("\n");
  259. udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
  260. if (!udl)
  261. return -ENOMEM;
  262. udl->udev = udev;
  263. udl->ddev = dev;
  264. dev->dev_private = udl;
  265. if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
  266. ret = -ENODEV;
  267. DRM_ERROR("firmware not recognized. Assume incompatible device\n");
  268. goto err;
  269. }
  270. if (udl_select_std_channel(udl))
  271. DRM_ERROR("Selecting channel failed\n");
  272. if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
  273. DRM_ERROR("udl_alloc_urb_list failed\n");
  274. goto err;
  275. }
  276. DRM_DEBUG("\n");
  277. ret = udl_modeset_init(dev);
  278. if (ret)
  279. goto err;
  280. ret = udl_fbdev_init(dev);
  281. if (ret)
  282. goto err;
  283. ret = drm_vblank_init(dev, 1);
  284. if (ret)
  285. goto err_fb;
  286. drm_kms_helper_poll_init(dev);
  287. return 0;
  288. err_fb:
  289. udl_fbdev_cleanup(dev);
  290. err:
  291. if (udl->urbs.count)
  292. udl_free_urb_list(dev);
  293. kfree(udl);
  294. DRM_ERROR("%d\n", ret);
  295. return ret;
  296. }
  297. int udl_drop_usb(struct drm_device *dev)
  298. {
  299. udl_free_urb_list(dev);
  300. return 0;
  301. }
  302. void udl_driver_unload(struct drm_device *dev)
  303. {
  304. struct udl_device *udl = dev->dev_private;
  305. drm_kms_helper_poll_fini(dev);
  306. if (udl->urbs.count)
  307. udl_free_urb_list(dev);
  308. udl_fbdev_cleanup(dev);
  309. udl_modeset_cleanup(dev);
  310. kfree(udl);
  311. }