msm_drv.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_gpu.h"
  19. #include "msm_kms.h"
  20. static void msm_fb_output_poll_changed(struct drm_device *dev)
  21. {
  22. struct msm_drm_private *priv = dev->dev_private;
  23. if (priv->fbdev)
  24. drm_fb_helper_hotplug_event(priv->fbdev);
  25. }
  26. static const struct drm_mode_config_funcs mode_config_funcs = {
  27. .fb_create = msm_framebuffer_create,
  28. .output_poll_changed = msm_fb_output_poll_changed,
  29. };
  30. int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
  31. {
  32. struct msm_drm_private *priv = dev->dev_private;
  33. int idx = priv->num_mmus++;
  34. if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
  35. return -EINVAL;
  36. priv->mmus[idx] = mmu;
  37. return idx;
  38. }
  39. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  40. static bool reglog = false;
  41. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  42. module_param(reglog, bool, 0600);
  43. #else
  44. #define reglog 0
  45. #endif
  46. static char *vram;
  47. MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU");
  48. module_param(vram, charp, 0);
  49. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  50. const char *dbgname)
  51. {
  52. struct resource *res;
  53. unsigned long size;
  54. void __iomem *ptr;
  55. if (name)
  56. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  57. else
  58. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  59. if (!res) {
  60. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  61. return ERR_PTR(-EINVAL);
  62. }
  63. size = resource_size(res);
  64. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  65. if (!ptr) {
  66. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  67. return ERR_PTR(-ENOMEM);
  68. }
  69. if (reglog)
  70. printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
  71. return ptr;
  72. }
  73. void msm_writel(u32 data, void __iomem *addr)
  74. {
  75. if (reglog)
  76. printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
  77. writel(data, addr);
  78. }
  79. u32 msm_readl(const void __iomem *addr)
  80. {
  81. u32 val = readl(addr);
  82. if (reglog)
  83. printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
  84. return val;
  85. }
  86. /*
  87. * DRM operations:
  88. */
  89. static int msm_unload(struct drm_device *dev)
  90. {
  91. struct msm_drm_private *priv = dev->dev_private;
  92. struct msm_kms *kms = priv->kms;
  93. struct msm_gpu *gpu = priv->gpu;
  94. drm_kms_helper_poll_fini(dev);
  95. drm_mode_config_cleanup(dev);
  96. drm_vblank_cleanup(dev);
  97. pm_runtime_get_sync(dev->dev);
  98. drm_irq_uninstall(dev);
  99. pm_runtime_put_sync(dev->dev);
  100. flush_workqueue(priv->wq);
  101. destroy_workqueue(priv->wq);
  102. if (kms) {
  103. pm_runtime_disable(dev->dev);
  104. kms->funcs->destroy(kms);
  105. }
  106. if (gpu) {
  107. mutex_lock(&dev->struct_mutex);
  108. gpu->funcs->pm_suspend(gpu);
  109. gpu->funcs->destroy(gpu);
  110. mutex_unlock(&dev->struct_mutex);
  111. }
  112. if (priv->vram.paddr) {
  113. DEFINE_DMA_ATTRS(attrs);
  114. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  115. drm_mm_takedown(&priv->vram.mm);
  116. dma_free_attrs(dev->dev, priv->vram.size, NULL,
  117. priv->vram.paddr, &attrs);
  118. }
  119. dev->dev_private = NULL;
  120. kfree(priv);
  121. return 0;
  122. }
  123. static int get_mdp_ver(struct platform_device *pdev)
  124. {
  125. #ifdef CONFIG_OF
  126. const static struct of_device_id match_types[] = { {
  127. .compatible = "qcom,mdss_mdp",
  128. .data = (void *)5,
  129. }, {
  130. /* end node */
  131. } };
  132. struct device *dev = &pdev->dev;
  133. const struct of_device_id *match;
  134. match = of_match_node(match_types, dev->of_node);
  135. if (match)
  136. return (int)match->data;
  137. #endif
  138. return 4;
  139. }
  140. static int msm_load(struct drm_device *dev, unsigned long flags)
  141. {
  142. struct platform_device *pdev = dev->platformdev;
  143. struct msm_drm_private *priv;
  144. struct msm_kms *kms;
  145. int ret;
  146. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  147. if (!priv) {
  148. dev_err(dev->dev, "failed to allocate private data\n");
  149. return -ENOMEM;
  150. }
  151. dev->dev_private = priv;
  152. priv->wq = alloc_ordered_workqueue("msm", 0);
  153. init_waitqueue_head(&priv->fence_event);
  154. INIT_LIST_HEAD(&priv->inactive_list);
  155. INIT_LIST_HEAD(&priv->fence_cbs);
  156. drm_mode_config_init(dev);
  157. /* if we have no IOMMU, then we need to use carveout allocator.
  158. * Grab the entire CMA chunk carved out in early startup in
  159. * mach-msm:
  160. */
  161. if (!iommu_present(&platform_bus_type)) {
  162. DEFINE_DMA_ATTRS(attrs);
  163. unsigned long size;
  164. void *p;
  165. DBG("using %s VRAM carveout", vram);
  166. size = memparse(vram, NULL);
  167. priv->vram.size = size;
  168. drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
  169. dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
  170. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  171. /* note that for no-kernel-mapping, the vaddr returned
  172. * is bogus, but non-null if allocation succeeded:
  173. */
  174. p = dma_alloc_attrs(dev->dev, size,
  175. &priv->vram.paddr, 0, &attrs);
  176. if (!p) {
  177. dev_err(dev->dev, "failed to allocate VRAM\n");
  178. priv->vram.paddr = 0;
  179. ret = -ENOMEM;
  180. goto fail;
  181. }
  182. dev_info(dev->dev, "VRAM: %08x->%08x\n",
  183. (uint32_t)priv->vram.paddr,
  184. (uint32_t)(priv->vram.paddr + size));
  185. }
  186. switch (get_mdp_ver(pdev)) {
  187. case 4:
  188. kms = mdp4_kms_init(dev);
  189. break;
  190. case 5:
  191. kms = mdp5_kms_init(dev);
  192. break;
  193. default:
  194. kms = ERR_PTR(-ENODEV);
  195. break;
  196. }
  197. if (IS_ERR(kms)) {
  198. /*
  199. * NOTE: once we have GPU support, having no kms should not
  200. * be considered fatal.. ideally we would still support gpu
  201. * and (for example) use dmabuf/prime to share buffers with
  202. * imx drm driver on iMX5
  203. */
  204. dev_err(dev->dev, "failed to load kms\n");
  205. ret = PTR_ERR(kms);
  206. goto fail;
  207. }
  208. priv->kms = kms;
  209. if (kms) {
  210. pm_runtime_enable(dev->dev);
  211. ret = kms->funcs->hw_init(kms);
  212. if (ret) {
  213. dev_err(dev->dev, "kms hw init failed: %d\n", ret);
  214. goto fail;
  215. }
  216. }
  217. dev->mode_config.min_width = 0;
  218. dev->mode_config.min_height = 0;
  219. dev->mode_config.max_width = 2048;
  220. dev->mode_config.max_height = 2048;
  221. dev->mode_config.funcs = &mode_config_funcs;
  222. ret = drm_vblank_init(dev, 1);
  223. if (ret < 0) {
  224. dev_err(dev->dev, "failed to initialize vblank\n");
  225. goto fail;
  226. }
  227. pm_runtime_get_sync(dev->dev);
  228. ret = drm_irq_install(dev);
  229. pm_runtime_put_sync(dev->dev);
  230. if (ret < 0) {
  231. dev_err(dev->dev, "failed to install IRQ handler\n");
  232. goto fail;
  233. }
  234. platform_set_drvdata(pdev, dev);
  235. #ifdef CONFIG_DRM_MSM_FBDEV
  236. priv->fbdev = msm_fbdev_init(dev);
  237. #endif
  238. drm_kms_helper_poll_init(dev);
  239. return 0;
  240. fail:
  241. msm_unload(dev);
  242. return ret;
  243. }
  244. static void load_gpu(struct drm_device *dev)
  245. {
  246. struct msm_drm_private *priv = dev->dev_private;
  247. struct msm_gpu *gpu;
  248. if (priv->gpu)
  249. return;
  250. mutex_lock(&dev->struct_mutex);
  251. gpu = a3xx_gpu_init(dev);
  252. if (IS_ERR(gpu)) {
  253. dev_warn(dev->dev, "failed to load a3xx gpu\n");
  254. gpu = NULL;
  255. /* not fatal */
  256. }
  257. mutex_unlock(&dev->struct_mutex);
  258. if (gpu) {
  259. int ret;
  260. gpu->funcs->pm_resume(gpu);
  261. ret = gpu->funcs->hw_init(gpu);
  262. if (ret) {
  263. dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
  264. gpu->funcs->destroy(gpu);
  265. gpu = NULL;
  266. }
  267. }
  268. priv->gpu = gpu;
  269. }
  270. static int msm_open(struct drm_device *dev, struct drm_file *file)
  271. {
  272. struct msm_file_private *ctx;
  273. /* For now, load gpu on open.. to avoid the requirement of having
  274. * firmware in the initrd.
  275. */
  276. load_gpu(dev);
  277. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  278. if (!ctx)
  279. return -ENOMEM;
  280. file->driver_priv = ctx;
  281. return 0;
  282. }
  283. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  284. {
  285. struct msm_drm_private *priv = dev->dev_private;
  286. struct msm_file_private *ctx = file->driver_priv;
  287. struct msm_kms *kms = priv->kms;
  288. if (kms)
  289. kms->funcs->preclose(kms, file);
  290. mutex_lock(&dev->struct_mutex);
  291. if (ctx == priv->lastctx)
  292. priv->lastctx = NULL;
  293. mutex_unlock(&dev->struct_mutex);
  294. kfree(ctx);
  295. }
  296. static void msm_lastclose(struct drm_device *dev)
  297. {
  298. struct msm_drm_private *priv = dev->dev_private;
  299. if (priv->fbdev) {
  300. drm_modeset_lock_all(dev);
  301. drm_fb_helper_restore_fbdev_mode(priv->fbdev);
  302. drm_modeset_unlock_all(dev);
  303. }
  304. }
  305. static irqreturn_t msm_irq(int irq, void *arg)
  306. {
  307. struct drm_device *dev = arg;
  308. struct msm_drm_private *priv = dev->dev_private;
  309. struct msm_kms *kms = priv->kms;
  310. BUG_ON(!kms);
  311. return kms->funcs->irq(kms);
  312. }
  313. static void msm_irq_preinstall(struct drm_device *dev)
  314. {
  315. struct msm_drm_private *priv = dev->dev_private;
  316. struct msm_kms *kms = priv->kms;
  317. BUG_ON(!kms);
  318. kms->funcs->irq_preinstall(kms);
  319. }
  320. static int msm_irq_postinstall(struct drm_device *dev)
  321. {
  322. struct msm_drm_private *priv = dev->dev_private;
  323. struct msm_kms *kms = priv->kms;
  324. BUG_ON(!kms);
  325. return kms->funcs->irq_postinstall(kms);
  326. }
  327. static void msm_irq_uninstall(struct drm_device *dev)
  328. {
  329. struct msm_drm_private *priv = dev->dev_private;
  330. struct msm_kms *kms = priv->kms;
  331. BUG_ON(!kms);
  332. kms->funcs->irq_uninstall(kms);
  333. }
  334. static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
  335. {
  336. struct msm_drm_private *priv = dev->dev_private;
  337. struct msm_kms *kms = priv->kms;
  338. if (!kms)
  339. return -ENXIO;
  340. DBG("dev=%p, crtc=%d", dev, crtc_id);
  341. return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
  342. }
  343. static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
  344. {
  345. struct msm_drm_private *priv = dev->dev_private;
  346. struct msm_kms *kms = priv->kms;
  347. if (!kms)
  348. return;
  349. DBG("dev=%p, crtc=%d", dev, crtc_id);
  350. kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
  351. }
  352. /*
  353. * DRM debugfs:
  354. */
  355. #ifdef CONFIG_DEBUG_FS
  356. static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
  357. {
  358. struct msm_drm_private *priv = dev->dev_private;
  359. struct msm_gpu *gpu = priv->gpu;
  360. if (gpu) {
  361. seq_printf(m, "%s Status:\n", gpu->name);
  362. gpu->funcs->show(gpu, m);
  363. }
  364. return 0;
  365. }
  366. static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
  367. {
  368. struct msm_drm_private *priv = dev->dev_private;
  369. struct msm_gpu *gpu = priv->gpu;
  370. if (gpu) {
  371. seq_printf(m, "Active Objects (%s):\n", gpu->name);
  372. msm_gem_describe_objects(&gpu->active_list, m);
  373. }
  374. seq_printf(m, "Inactive Objects:\n");
  375. msm_gem_describe_objects(&priv->inactive_list, m);
  376. return 0;
  377. }
  378. static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
  379. {
  380. return drm_mm_dump_table(m, dev->mm_private);
  381. }
  382. static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
  383. {
  384. struct msm_drm_private *priv = dev->dev_private;
  385. struct drm_framebuffer *fb, *fbdev_fb = NULL;
  386. if (priv->fbdev) {
  387. seq_printf(m, "fbcon ");
  388. fbdev_fb = priv->fbdev->fb;
  389. msm_framebuffer_describe(fbdev_fb, m);
  390. }
  391. mutex_lock(&dev->mode_config.fb_lock);
  392. list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
  393. if (fb == fbdev_fb)
  394. continue;
  395. seq_printf(m, "user ");
  396. msm_framebuffer_describe(fb, m);
  397. }
  398. mutex_unlock(&dev->mode_config.fb_lock);
  399. return 0;
  400. }
  401. static int show_locked(struct seq_file *m, void *arg)
  402. {
  403. struct drm_info_node *node = (struct drm_info_node *) m->private;
  404. struct drm_device *dev = node->minor->dev;
  405. int (*show)(struct drm_device *dev, struct seq_file *m) =
  406. node->info_ent->data;
  407. int ret;
  408. ret = mutex_lock_interruptible(&dev->struct_mutex);
  409. if (ret)
  410. return ret;
  411. ret = show(dev, m);
  412. mutex_unlock(&dev->struct_mutex);
  413. return ret;
  414. }
  415. static struct drm_info_list msm_debugfs_list[] = {
  416. {"gpu", show_locked, 0, msm_gpu_show},
  417. {"gem", show_locked, 0, msm_gem_show},
  418. { "mm", show_locked, 0, msm_mm_show },
  419. { "fb", show_locked, 0, msm_fb_show },
  420. };
  421. static int msm_debugfs_init(struct drm_minor *minor)
  422. {
  423. struct drm_device *dev = minor->dev;
  424. int ret;
  425. ret = drm_debugfs_create_files(msm_debugfs_list,
  426. ARRAY_SIZE(msm_debugfs_list),
  427. minor->debugfs_root, minor);
  428. if (ret) {
  429. dev_err(dev->dev, "could not install msm_debugfs_list\n");
  430. return ret;
  431. }
  432. return ret;
  433. }
  434. static void msm_debugfs_cleanup(struct drm_minor *minor)
  435. {
  436. drm_debugfs_remove_files(msm_debugfs_list,
  437. ARRAY_SIZE(msm_debugfs_list), minor);
  438. }
  439. #endif
  440. /*
  441. * Fences:
  442. */
  443. int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
  444. struct timespec *timeout)
  445. {
  446. struct msm_drm_private *priv = dev->dev_private;
  447. int ret;
  448. if (!priv->gpu)
  449. return 0;
  450. if (fence > priv->gpu->submitted_fence) {
  451. DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
  452. fence, priv->gpu->submitted_fence);
  453. return -EINVAL;
  454. }
  455. if (!timeout) {
  456. /* no-wait: */
  457. ret = fence_completed(dev, fence) ? 0 : -EBUSY;
  458. } else {
  459. unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
  460. unsigned long start_jiffies = jiffies;
  461. unsigned long remaining_jiffies;
  462. if (time_after(start_jiffies, timeout_jiffies))
  463. remaining_jiffies = 0;
  464. else
  465. remaining_jiffies = timeout_jiffies - start_jiffies;
  466. ret = wait_event_interruptible_timeout(priv->fence_event,
  467. fence_completed(dev, fence),
  468. remaining_jiffies);
  469. if (ret == 0) {
  470. DBG("timeout waiting for fence: %u (completed: %u)",
  471. fence, priv->completed_fence);
  472. ret = -ETIMEDOUT;
  473. } else if (ret != -ERESTARTSYS) {
  474. ret = 0;
  475. }
  476. }
  477. return ret;
  478. }
  479. /* called from workqueue */
  480. void msm_update_fence(struct drm_device *dev, uint32_t fence)
  481. {
  482. struct msm_drm_private *priv = dev->dev_private;
  483. mutex_lock(&dev->struct_mutex);
  484. priv->completed_fence = max(fence, priv->completed_fence);
  485. while (!list_empty(&priv->fence_cbs)) {
  486. struct msm_fence_cb *cb;
  487. cb = list_first_entry(&priv->fence_cbs,
  488. struct msm_fence_cb, work.entry);
  489. if (cb->fence > priv->completed_fence)
  490. break;
  491. list_del_init(&cb->work.entry);
  492. queue_work(priv->wq, &cb->work);
  493. }
  494. mutex_unlock(&dev->struct_mutex);
  495. wake_up_all(&priv->fence_event);
  496. }
  497. void __msm_fence_worker(struct work_struct *work)
  498. {
  499. struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
  500. cb->func(cb);
  501. }
  502. /*
  503. * DRM ioctls:
  504. */
  505. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  506. struct drm_file *file)
  507. {
  508. struct msm_drm_private *priv = dev->dev_private;
  509. struct drm_msm_param *args = data;
  510. struct msm_gpu *gpu;
  511. /* for now, we just have 3d pipe.. eventually this would need to
  512. * be more clever to dispatch to appropriate gpu module:
  513. */
  514. if (args->pipe != MSM_PIPE_3D0)
  515. return -EINVAL;
  516. gpu = priv->gpu;
  517. if (!gpu)
  518. return -ENXIO;
  519. return gpu->funcs->get_param(gpu, args->param, &args->value);
  520. }
  521. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  522. struct drm_file *file)
  523. {
  524. struct drm_msm_gem_new *args = data;
  525. return msm_gem_new_handle(dev, file, args->size,
  526. args->flags, &args->handle);
  527. }
  528. #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
  529. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  530. struct drm_file *file)
  531. {
  532. struct drm_msm_gem_cpu_prep *args = data;
  533. struct drm_gem_object *obj;
  534. int ret;
  535. obj = drm_gem_object_lookup(dev, file, args->handle);
  536. if (!obj)
  537. return -ENOENT;
  538. ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  539. drm_gem_object_unreference_unlocked(obj);
  540. return ret;
  541. }
  542. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  543. struct drm_file *file)
  544. {
  545. struct drm_msm_gem_cpu_fini *args = data;
  546. struct drm_gem_object *obj;
  547. int ret;
  548. obj = drm_gem_object_lookup(dev, file, args->handle);
  549. if (!obj)
  550. return -ENOENT;
  551. ret = msm_gem_cpu_fini(obj);
  552. drm_gem_object_unreference_unlocked(obj);
  553. return ret;
  554. }
  555. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  556. struct drm_file *file)
  557. {
  558. struct drm_msm_gem_info *args = data;
  559. struct drm_gem_object *obj;
  560. int ret = 0;
  561. if (args->pad)
  562. return -EINVAL;
  563. obj = drm_gem_object_lookup(dev, file, args->handle);
  564. if (!obj)
  565. return -ENOENT;
  566. args->offset = msm_gem_mmap_offset(obj);
  567. drm_gem_object_unreference_unlocked(obj);
  568. return ret;
  569. }
  570. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  571. struct drm_file *file)
  572. {
  573. struct drm_msm_wait_fence *args = data;
  574. return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
  575. }
  576. static const struct drm_ioctl_desc msm_ioctls[] = {
  577. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  578. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  579. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  580. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  581. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  582. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  583. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
  584. };
  585. static const struct vm_operations_struct vm_ops = {
  586. .fault = msm_gem_fault,
  587. .open = drm_gem_vm_open,
  588. .close = drm_gem_vm_close,
  589. };
  590. static const struct file_operations fops = {
  591. .owner = THIS_MODULE,
  592. .open = drm_open,
  593. .release = drm_release,
  594. .unlocked_ioctl = drm_ioctl,
  595. #ifdef CONFIG_COMPAT
  596. .compat_ioctl = drm_compat_ioctl,
  597. #endif
  598. .poll = drm_poll,
  599. .read = drm_read,
  600. .llseek = no_llseek,
  601. .mmap = msm_gem_mmap,
  602. };
  603. static struct drm_driver msm_driver = {
  604. .driver_features = DRIVER_HAVE_IRQ |
  605. DRIVER_GEM |
  606. DRIVER_PRIME |
  607. DRIVER_RENDER |
  608. DRIVER_MODESET,
  609. .load = msm_load,
  610. .unload = msm_unload,
  611. .open = msm_open,
  612. .preclose = msm_preclose,
  613. .lastclose = msm_lastclose,
  614. .irq_handler = msm_irq,
  615. .irq_preinstall = msm_irq_preinstall,
  616. .irq_postinstall = msm_irq_postinstall,
  617. .irq_uninstall = msm_irq_uninstall,
  618. .get_vblank_counter = drm_vblank_count,
  619. .enable_vblank = msm_enable_vblank,
  620. .disable_vblank = msm_disable_vblank,
  621. .gem_free_object = msm_gem_free_object,
  622. .gem_vm_ops = &vm_ops,
  623. .dumb_create = msm_gem_dumb_create,
  624. .dumb_map_offset = msm_gem_dumb_map_offset,
  625. .dumb_destroy = drm_gem_dumb_destroy,
  626. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  627. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  628. .gem_prime_export = drm_gem_prime_export,
  629. .gem_prime_import = drm_gem_prime_import,
  630. .gem_prime_pin = msm_gem_prime_pin,
  631. .gem_prime_unpin = msm_gem_prime_unpin,
  632. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  633. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  634. .gem_prime_vmap = msm_gem_prime_vmap,
  635. .gem_prime_vunmap = msm_gem_prime_vunmap,
  636. #ifdef CONFIG_DEBUG_FS
  637. .debugfs_init = msm_debugfs_init,
  638. .debugfs_cleanup = msm_debugfs_cleanup,
  639. #endif
  640. .ioctls = msm_ioctls,
  641. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  642. .fops = &fops,
  643. .name = "msm",
  644. .desc = "MSM Snapdragon DRM",
  645. .date = "20130625",
  646. .major = 1,
  647. .minor = 0,
  648. };
  649. #ifdef CONFIG_PM_SLEEP
  650. static int msm_pm_suspend(struct device *dev)
  651. {
  652. struct drm_device *ddev = dev_get_drvdata(dev);
  653. drm_kms_helper_poll_disable(ddev);
  654. return 0;
  655. }
  656. static int msm_pm_resume(struct device *dev)
  657. {
  658. struct drm_device *ddev = dev_get_drvdata(dev);
  659. drm_kms_helper_poll_enable(ddev);
  660. return 0;
  661. }
  662. #endif
  663. static const struct dev_pm_ops msm_pm_ops = {
  664. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  665. };
  666. /*
  667. * Platform driver:
  668. */
  669. static int msm_pdev_probe(struct platform_device *pdev)
  670. {
  671. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  672. return drm_platform_init(&msm_driver, pdev);
  673. }
  674. static int msm_pdev_remove(struct platform_device *pdev)
  675. {
  676. drm_put_dev(platform_get_drvdata(pdev));
  677. return 0;
  678. }
  679. static const struct platform_device_id msm_id[] = {
  680. { "mdp", 0 },
  681. { }
  682. };
  683. static const struct of_device_id dt_match[] = {
  684. { .compatible = "qcom,mdss_mdp" },
  685. {}
  686. };
  687. MODULE_DEVICE_TABLE(of, dt_match);
  688. static struct platform_driver msm_platform_driver = {
  689. .probe = msm_pdev_probe,
  690. .remove = msm_pdev_remove,
  691. .driver = {
  692. .owner = THIS_MODULE,
  693. .name = "msm",
  694. .of_match_table = dt_match,
  695. .pm = &msm_pm_ops,
  696. },
  697. .id_table = msm_id,
  698. };
  699. static int __init msm_drm_register(void)
  700. {
  701. DBG("init");
  702. hdmi_register();
  703. a3xx_register();
  704. return platform_driver_register(&msm_platform_driver);
  705. }
  706. static void __exit msm_drm_unregister(void)
  707. {
  708. DBG("fini");
  709. platform_driver_unregister(&msm_platform_driver);
  710. hdmi_unregister();
  711. a3xx_unregister();
  712. }
  713. module_init(msm_drm_register);
  714. module_exit(msm_drm_unregister);
  715. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  716. MODULE_DESCRIPTION("MSM DRM Driver");
  717. MODULE_LICENSE("GPL");