udl_main.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * Copyright (C) 2012 Red Hat
  3. *
  4. * based in parts on udlfb.c:
  5. * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
  6. * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
  7. * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License v2. See the file COPYING in the main directory of this archive for
  11. * more details.
  12. */
  13. #include <drm/drmP.h>
  14. #include "udl_drv.h"
  15. /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
  16. #define BULK_SIZE 512
  17. #define NR_USB_REQUEST_CHANNEL 0x12
  18. #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
  19. #define WRITES_IN_FLIGHT (4)
  20. #define MAX_VENDOR_DESCRIPTOR_SIZE 256
  21. #define GET_URB_TIMEOUT HZ
  22. #define FREE_URB_TIMEOUT (HZ*2)
  23. static int udl_parse_vendor_descriptor(struct drm_device *dev,
  24. struct usb_device *usbdev)
  25. {
  26. struct udl_device *udl = dev->dev_private;
  27. char *desc;
  28. char *buf;
  29. char *desc_end;
  30. u8 total_len = 0;
  31. buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
  32. if (!buf)
  33. return false;
  34. desc = buf;
  35. total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
  36. 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
  37. if (total_len > 5) {
  38. DRM_INFO("vendor descriptor length:%x data:%11ph\n",
  39. total_len, desc);
  40. if ((desc[0] != total_len) || /* descriptor length */
  41. (desc[1] != 0x5f) || /* vendor descriptor type */
  42. (desc[2] != 0x01) || /* version (2 bytes) */
  43. (desc[3] != 0x00) ||
  44. (desc[4] != total_len - 2)) /* length after type */
  45. goto unrecognized;
  46. desc_end = desc + total_len;
  47. desc += 5; /* the fixed header we've already parsed */
  48. while (desc < desc_end) {
  49. u8 length;
  50. u16 key;
  51. key = le16_to_cpu(*((u16 *) desc));
  52. desc += sizeof(u16);
  53. length = *desc;
  54. desc++;
  55. switch (key) {
  56. case 0x0200: { /* max_area */
  57. u32 max_area;
  58. max_area = le32_to_cpu(*((u32 *)desc));
  59. DRM_DEBUG("DL chip limited to %d pixel modes\n",
  60. max_area);
  61. udl->sku_pixel_limit = max_area;
  62. break;
  63. }
  64. default:
  65. break;
  66. }
  67. desc += length;
  68. }
  69. }
  70. goto success;
  71. unrecognized:
  72. /* allow udlfb to load for now even if firmware unrecognized */
  73. DRM_ERROR("Unrecognized vendor firmware descriptor\n");
  74. success:
  75. kfree(buf);
  76. return true;
  77. }
  78. /*
  79. * Need to ensure a channel is selected before submitting URBs
  80. */
  81. static int udl_select_std_channel(struct udl_device *udl)
  82. {
  83. int ret;
  84. u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
  85. 0x1C, 0x88, 0x5E, 0x15,
  86. 0x60, 0xFE, 0xC6, 0x97,
  87. 0x16, 0x3D, 0x47, 0xF2};
  88. ret = usb_control_msg(udl->udev,
  89. usb_sndctrlpipe(udl->udev, 0),
  90. NR_USB_REQUEST_CHANNEL,
  91. (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
  92. set_def_chn, sizeof(set_def_chn),
  93. USB_CTRL_SET_TIMEOUT);
  94. return ret < 0 ? ret : 0;
  95. }
  96. static void udl_release_urb_work(struct work_struct *work)
  97. {
  98. struct urb_node *unode = container_of(work, struct urb_node,
  99. release_urb_work.work);
  100. up(&unode->dev->urbs.limit_sem);
  101. }
  102. void udl_urb_completion(struct urb *urb)
  103. {
  104. struct urb_node *unode = urb->context;
  105. struct udl_device *udl = unode->dev;
  106. unsigned long flags;
  107. /* sync/async unlink faults aren't errors */
  108. if (urb->status) {
  109. if (!(urb->status == -ENOENT ||
  110. urb->status == -ECONNRESET ||
  111. urb->status == -ESHUTDOWN)) {
  112. DRM_ERROR("%s - nonzero write bulk status received: %d\n",
  113. __func__, urb->status);
  114. atomic_set(&udl->lost_pixels, 1);
  115. }
  116. }
  117. urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
  118. spin_lock_irqsave(&udl->urbs.lock, flags);
  119. list_add_tail(&unode->entry, &udl->urbs.list);
  120. udl->urbs.available++;
  121. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  122. #if 0
  123. /*
  124. * When using fb_defio, we deadlock if up() is called
  125. * while another is waiting. So queue to another process.
  126. */
  127. if (fb_defio)
  128. schedule_delayed_work(&unode->release_urb_work, 0);
  129. else
  130. #endif
  131. up(&udl->urbs.limit_sem);
  132. }
  133. static void udl_free_urb_list(struct drm_device *dev)
  134. {
  135. struct udl_device *udl = dev->dev_private;
  136. int count = udl->urbs.count;
  137. struct list_head *node;
  138. struct urb_node *unode;
  139. struct urb *urb;
  140. int ret;
  141. unsigned long flags;
  142. DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
  143. /* keep waiting and freeing, until we've got 'em all */
  144. while (count--) {
  145. /* Getting interrupted means a leak, but ok at shutdown*/
  146. ret = down_interruptible(&udl->urbs.limit_sem);
  147. if (ret)
  148. break;
  149. spin_lock_irqsave(&udl->urbs.lock, flags);
  150. node = udl->urbs.list.next; /* have reserved one with sem */
  151. list_del_init(node);
  152. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  153. unode = list_entry(node, struct urb_node, entry);
  154. urb = unode->urb;
  155. /* Free each separately allocated piece */
  156. usb_free_coherent(urb->dev, udl->urbs.size,
  157. urb->transfer_buffer, urb->transfer_dma);
  158. usb_free_urb(urb);
  159. kfree(node);
  160. }
  161. udl->urbs.count = 0;
  162. }
  163. static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
  164. {
  165. struct udl_device *udl = dev->dev_private;
  166. int i = 0;
  167. struct urb *urb;
  168. struct urb_node *unode;
  169. char *buf;
  170. spin_lock_init(&udl->urbs.lock);
  171. udl->urbs.size = size;
  172. INIT_LIST_HEAD(&udl->urbs.list);
  173. while (i < count) {
  174. unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
  175. if (!unode)
  176. break;
  177. unode->dev = udl;
  178. INIT_DELAYED_WORK(&unode->release_urb_work,
  179. udl_release_urb_work);
  180. urb = usb_alloc_urb(0, GFP_KERNEL);
  181. if (!urb) {
  182. kfree(unode);
  183. break;
  184. }
  185. unode->urb = urb;
  186. buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
  187. &urb->transfer_dma);
  188. if (!buf) {
  189. kfree(unode);
  190. usb_free_urb(urb);
  191. break;
  192. }
  193. /* urb->transfer_buffer_length set to actual before submit */
  194. usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
  195. buf, size, udl_urb_completion, unode);
  196. urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
  197. list_add_tail(&unode->entry, &udl->urbs.list);
  198. i++;
  199. }
  200. sema_init(&udl->urbs.limit_sem, i);
  201. udl->urbs.count = i;
  202. udl->urbs.available = i;
  203. DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
  204. return i;
  205. }
  206. struct urb *udl_get_urb(struct drm_device *dev)
  207. {
  208. struct udl_device *udl = dev->dev_private;
  209. int ret = 0;
  210. struct list_head *entry;
  211. struct urb_node *unode;
  212. struct urb *urb = NULL;
  213. unsigned long flags;
  214. /* Wait for an in-flight buffer to complete and get re-queued */
  215. ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
  216. if (ret) {
  217. atomic_set(&udl->lost_pixels, 1);
  218. DRM_INFO("wait for urb interrupted: %x available: %d\n",
  219. ret, udl->urbs.available);
  220. goto error;
  221. }
  222. spin_lock_irqsave(&udl->urbs.lock, flags);
  223. BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
  224. entry = udl->urbs.list.next;
  225. list_del_init(entry);
  226. udl->urbs.available--;
  227. spin_unlock_irqrestore(&udl->urbs.lock, flags);
  228. unode = list_entry(entry, struct urb_node, entry);
  229. urb = unode->urb;
  230. error:
  231. return urb;
  232. }
  233. int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
  234. {
  235. struct udl_device *udl = dev->dev_private;
  236. int ret;
  237. BUG_ON(len > udl->urbs.size);
  238. urb->transfer_buffer_length = len; /* set to actual payload len */
  239. ret = usb_submit_urb(urb, GFP_ATOMIC);
  240. if (ret) {
  241. udl_urb_completion(urb); /* because no one else will */
  242. atomic_set(&udl->lost_pixels, 1);
  243. DRM_ERROR("usb_submit_urb error %x\n", ret);
  244. }
  245. return ret;
  246. }
  247. int udl_driver_load(struct drm_device *dev, unsigned long flags)
  248. {
  249. struct usb_device *udev = (void*)flags;
  250. struct udl_device *udl;
  251. int ret = -ENOMEM;
  252. DRM_DEBUG("\n");
  253. udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
  254. if (!udl)
  255. return -ENOMEM;
  256. udl->udev = udev;
  257. udl->ddev = dev;
  258. dev->dev_private = udl;
  259. if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
  260. ret = -ENODEV;
  261. DRM_ERROR("firmware not recognized. Assume incompatible device\n");
  262. goto err;
  263. }
  264. if (udl_select_std_channel(udl))
  265. DRM_ERROR("Selecting channel failed\n");
  266. if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
  267. DRM_ERROR("udl_alloc_urb_list failed\n");
  268. goto err;
  269. }
  270. DRM_DEBUG("\n");
  271. ret = udl_modeset_init(dev);
  272. if (ret)
  273. goto err;
  274. ret = udl_fbdev_init(dev);
  275. if (ret)
  276. goto err;
  277. ret = drm_vblank_init(dev, 1);
  278. if (ret)
  279. goto err_fb;
  280. return 0;
  281. err_fb:
  282. udl_fbdev_cleanup(dev);
  283. err:
  284. if (udl->urbs.count)
  285. udl_free_urb_list(dev);
  286. kfree(udl);
  287. DRM_ERROR("%d\n", ret);
  288. return ret;
  289. }
  290. int udl_drop_usb(struct drm_device *dev)
  291. {
  292. udl_free_urb_list(dev);
  293. return 0;
  294. }
  295. int udl_driver_unload(struct drm_device *dev)
  296. {
  297. struct udl_device *udl = dev->dev_private;
  298. drm_vblank_cleanup(dev);
  299. if (udl->urbs.count)
  300. udl_free_urb_list(dev);
  301. udl_fbdev_cleanup(dev);
  302. udl_modeset_cleanup(dev);
  303. kfree(udl);
  304. return 0;
  305. }