msm_drv.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094
  1. /*
  2. * Copyright (C) 2013 Red Hat
  3. * Author: Rob Clark <robdclark@gmail.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include "msm_drv.h"
  18. #include "msm_debugfs.h"
  19. #include "msm_fence.h"
  20. #include "msm_gpu.h"
  21. #include "msm_kms.h"
  22. /*
  23. * MSM driver version:
  24. * - 1.0.0 - initial interface
  25. * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  26. * - 1.2.0 - adds explicit fence support for submit ioctl
  27. */
  28. #define MSM_VERSION_MAJOR 1
  29. #define MSM_VERSION_MINOR 2
  30. #define MSM_VERSION_PATCHLEVEL 0
  31. static void msm_fb_output_poll_changed(struct drm_device *dev)
  32. {
  33. struct msm_drm_private *priv = dev->dev_private;
  34. if (priv->fbdev)
  35. drm_fb_helper_hotplug_event(priv->fbdev);
  36. }
  37. static const struct drm_mode_config_funcs mode_config_funcs = {
  38. .fb_create = msm_framebuffer_create,
  39. .output_poll_changed = msm_fb_output_poll_changed,
  40. .atomic_check = msm_atomic_check,
  41. .atomic_commit = msm_atomic_commit,
  42. };
  43. int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
  44. {
  45. struct msm_drm_private *priv = dev->dev_private;
  46. int idx = priv->num_mmus++;
  47. if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
  48. return -EINVAL;
  49. priv->mmus[idx] = mmu;
  50. return idx;
  51. }
  52. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  53. static bool reglog = false;
  54. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  55. module_param(reglog, bool, 0600);
  56. #else
  57. #define reglog 0
  58. #endif
  59. #ifdef CONFIG_DRM_FBDEV_EMULATION
  60. static bool fbdev = true;
  61. MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  62. module_param(fbdev, bool, 0600);
  63. #endif
  64. static char *vram = "16m";
  65. MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  66. module_param(vram, charp, 0);
  67. /*
  68. * Util/helpers:
  69. */
  70. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  71. const char *dbgname)
  72. {
  73. struct resource *res;
  74. unsigned long size;
  75. void __iomem *ptr;
  76. if (name)
  77. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  78. else
  79. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  80. if (!res) {
  81. dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
  82. return ERR_PTR(-EINVAL);
  83. }
  84. size = resource_size(res);
  85. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  86. if (!ptr) {
  87. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  88. return ERR_PTR(-ENOMEM);
  89. }
  90. if (reglog)
  91. printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
  92. return ptr;
  93. }
  94. void msm_writel(u32 data, void __iomem *addr)
  95. {
  96. if (reglog)
  97. printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
  98. writel(data, addr);
  99. }
  100. u32 msm_readl(const void __iomem *addr)
  101. {
  102. u32 val = readl(addr);
  103. if (reglog)
  104. printk(KERN_ERR "IO:R %p %08x\n", addr, val);
  105. return val;
  106. }
  107. struct vblank_event {
  108. struct list_head node;
  109. int crtc_id;
  110. bool enable;
  111. };
  112. static void vblank_ctrl_worker(struct work_struct *work)
  113. {
  114. struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
  115. struct msm_vblank_ctrl, work);
  116. struct msm_drm_private *priv = container_of(vbl_ctrl,
  117. struct msm_drm_private, vblank_ctrl);
  118. struct msm_kms *kms = priv->kms;
  119. struct vblank_event *vbl_ev, *tmp;
  120. unsigned long flags;
  121. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  122. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  123. list_del(&vbl_ev->node);
  124. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  125. if (vbl_ev->enable)
  126. kms->funcs->enable_vblank(kms,
  127. priv->crtcs[vbl_ev->crtc_id]);
  128. else
  129. kms->funcs->disable_vblank(kms,
  130. priv->crtcs[vbl_ev->crtc_id]);
  131. kfree(vbl_ev);
  132. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  133. }
  134. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  135. }
  136. static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
  137. int crtc_id, bool enable)
  138. {
  139. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  140. struct vblank_event *vbl_ev;
  141. unsigned long flags;
  142. vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
  143. if (!vbl_ev)
  144. return -ENOMEM;
  145. vbl_ev->crtc_id = crtc_id;
  146. vbl_ev->enable = enable;
  147. spin_lock_irqsave(&vbl_ctrl->lock, flags);
  148. list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
  149. spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
  150. queue_work(priv->wq, &vbl_ctrl->work);
  151. return 0;
  152. }
  153. static int msm_drm_uninit(struct device *dev)
  154. {
  155. struct platform_device *pdev = to_platform_device(dev);
  156. struct drm_device *ddev = platform_get_drvdata(pdev);
  157. struct msm_drm_private *priv = ddev->dev_private;
  158. struct msm_kms *kms = priv->kms;
  159. struct msm_gpu *gpu = priv->gpu;
  160. struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
  161. struct vblank_event *vbl_ev, *tmp;
  162. /* We must cancel and cleanup any pending vblank enable/disable
  163. * work before drm_irq_uninstall() to avoid work re-enabling an
  164. * irq after uninstall has disabled it.
  165. */
  166. cancel_work_sync(&vbl_ctrl->work);
  167. list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
  168. list_del(&vbl_ev->node);
  169. kfree(vbl_ev);
  170. }
  171. msm_gem_shrinker_cleanup(ddev);
  172. drm_kms_helper_poll_fini(ddev);
  173. drm_dev_unregister(ddev);
  174. #ifdef CONFIG_DRM_FBDEV_EMULATION
  175. if (fbdev && priv->fbdev)
  176. msm_fbdev_free(ddev);
  177. #endif
  178. drm_mode_config_cleanup(ddev);
  179. pm_runtime_get_sync(dev);
  180. drm_irq_uninstall(ddev);
  181. pm_runtime_put_sync(dev);
  182. flush_workqueue(priv->wq);
  183. destroy_workqueue(priv->wq);
  184. flush_workqueue(priv->atomic_wq);
  185. destroy_workqueue(priv->atomic_wq);
  186. if (kms && kms->funcs)
  187. kms->funcs->destroy(kms);
  188. if (gpu) {
  189. mutex_lock(&ddev->struct_mutex);
  190. gpu->funcs->pm_suspend(gpu);
  191. mutex_unlock(&ddev->struct_mutex);
  192. gpu->funcs->destroy(gpu);
  193. }
  194. if (priv->vram.paddr) {
  195. unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
  196. drm_mm_takedown(&priv->vram.mm);
  197. dma_free_attrs(dev, priv->vram.size, NULL,
  198. priv->vram.paddr, attrs);
  199. }
  200. component_unbind_all(dev, ddev);
  201. msm_mdss_destroy(ddev);
  202. ddev->dev_private = NULL;
  203. drm_dev_unref(ddev);
  204. kfree(priv);
  205. return 0;
  206. }
  207. static int get_mdp_ver(struct platform_device *pdev)
  208. {
  209. struct device *dev = &pdev->dev;
  210. return (int) (unsigned long) of_device_get_match_data(dev);
  211. }
  212. #include <linux/of_address.h>
  213. static int msm_init_vram(struct drm_device *dev)
  214. {
  215. struct msm_drm_private *priv = dev->dev_private;
  216. struct device_node *node;
  217. unsigned long size = 0;
  218. int ret = 0;
  219. /* In the device-tree world, we could have a 'memory-region'
  220. * phandle, which gives us a link to our "vram". Allocating
  221. * is all nicely abstracted behind the dma api, but we need
  222. * to know the entire size to allocate it all in one go. There
  223. * are two cases:
  224. * 1) device with no IOMMU, in which case we need exclusive
  225. * access to a VRAM carveout big enough for all gpu
  226. * buffers
  227. * 2) device with IOMMU, but where the bootloader puts up
  228. * a splash screen. In this case, the VRAM carveout
  229. * need only be large enough for fbdev fb. But we need
  230. * exclusive access to the buffer to avoid the kernel
  231. * using those pages for other purposes (which appears
  232. * as corruption on screen before we have a chance to
  233. * load and do initial modeset)
  234. */
  235. node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
  236. if (node) {
  237. struct resource r;
  238. ret = of_address_to_resource(node, 0, &r);
  239. of_node_put(node);
  240. if (ret)
  241. return ret;
  242. size = r.end - r.start;
  243. DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
  244. /* if we have no IOMMU, then we need to use carveout allocator.
  245. * Grab the entire CMA chunk carved out in early startup in
  246. * mach-msm:
  247. */
  248. } else if (!iommu_present(&platform_bus_type)) {
  249. DRM_INFO("using %s VRAM carveout\n", vram);
  250. size = memparse(vram, NULL);
  251. }
  252. if (size) {
  253. unsigned long attrs = 0;
  254. void *p;
  255. priv->vram.size = size;
  256. drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
  257. attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  258. attrs |= DMA_ATTR_WRITE_COMBINE;
  259. /* note that for no-kernel-mapping, the vaddr returned
  260. * is bogus, but non-null if allocation succeeded:
  261. */
  262. p = dma_alloc_attrs(dev->dev, size,
  263. &priv->vram.paddr, GFP_KERNEL, attrs);
  264. if (!p) {
  265. dev_err(dev->dev, "failed to allocate VRAM\n");
  266. priv->vram.paddr = 0;
  267. return -ENOMEM;
  268. }
  269. dev_info(dev->dev, "VRAM: %08x->%08x\n",
  270. (uint32_t)priv->vram.paddr,
  271. (uint32_t)(priv->vram.paddr + size));
  272. }
  273. return ret;
  274. }
  275. static int msm_drm_init(struct device *dev, struct drm_driver *drv)
  276. {
  277. struct platform_device *pdev = to_platform_device(dev);
  278. struct drm_device *ddev;
  279. struct msm_drm_private *priv;
  280. struct msm_kms *kms;
  281. int ret;
  282. ddev = drm_dev_alloc(drv, dev);
  283. if (IS_ERR(ddev)) {
  284. dev_err(dev, "failed to allocate drm_device\n");
  285. return PTR_ERR(ddev);
  286. }
  287. platform_set_drvdata(pdev, ddev);
  288. ddev->platformdev = pdev;
  289. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  290. if (!priv) {
  291. drm_dev_unref(ddev);
  292. return -ENOMEM;
  293. }
  294. ddev->dev_private = priv;
  295. priv->dev = ddev;
  296. ret = msm_mdss_init(ddev);
  297. if (ret) {
  298. kfree(priv);
  299. drm_dev_unref(ddev);
  300. return ret;
  301. }
  302. priv->wq = alloc_ordered_workqueue("msm", 0);
  303. priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
  304. init_waitqueue_head(&priv->pending_crtcs_event);
  305. INIT_LIST_HEAD(&priv->inactive_list);
  306. INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
  307. INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
  308. spin_lock_init(&priv->vblank_ctrl.lock);
  309. drm_mode_config_init(ddev);
  310. /* Bind all our sub-components: */
  311. ret = component_bind_all(dev, ddev);
  312. if (ret) {
  313. msm_mdss_destroy(ddev);
  314. kfree(priv);
  315. drm_dev_unref(ddev);
  316. return ret;
  317. }
  318. ret = msm_init_vram(ddev);
  319. if (ret)
  320. goto fail;
  321. msm_gem_shrinker_init(ddev);
  322. switch (get_mdp_ver(pdev)) {
  323. case 4:
  324. kms = mdp4_kms_init(ddev);
  325. priv->kms = kms;
  326. break;
  327. case 5:
  328. kms = mdp5_kms_init(ddev);
  329. break;
  330. default:
  331. kms = ERR_PTR(-ENODEV);
  332. break;
  333. }
  334. if (IS_ERR(kms)) {
  335. /*
  336. * NOTE: once we have GPU support, having no kms should not
  337. * be considered fatal.. ideally we would still support gpu
  338. * and (for example) use dmabuf/prime to share buffers with
  339. * imx drm driver on iMX5
  340. */
  341. dev_err(dev, "failed to load kms\n");
  342. ret = PTR_ERR(kms);
  343. goto fail;
  344. }
  345. if (kms) {
  346. ret = kms->funcs->hw_init(kms);
  347. if (ret) {
  348. dev_err(dev, "kms hw init failed: %d\n", ret);
  349. goto fail;
  350. }
  351. }
  352. ddev->mode_config.funcs = &mode_config_funcs;
  353. ret = drm_vblank_init(ddev, priv->num_crtcs);
  354. if (ret < 0) {
  355. dev_err(dev, "failed to initialize vblank\n");
  356. goto fail;
  357. }
  358. if (kms) {
  359. pm_runtime_get_sync(dev);
  360. ret = drm_irq_install(ddev, kms->irq);
  361. pm_runtime_put_sync(dev);
  362. if (ret < 0) {
  363. dev_err(dev, "failed to install IRQ handler\n");
  364. goto fail;
  365. }
  366. }
  367. ret = drm_dev_register(ddev, 0);
  368. if (ret)
  369. goto fail;
  370. drm_mode_config_reset(ddev);
  371. #ifdef CONFIG_DRM_FBDEV_EMULATION
  372. if (fbdev)
  373. priv->fbdev = msm_fbdev_init(ddev);
  374. #endif
  375. ret = msm_debugfs_late_init(ddev);
  376. if (ret)
  377. goto fail;
  378. drm_kms_helper_poll_init(ddev);
  379. return 0;
  380. fail:
  381. msm_drm_uninit(dev);
  382. return ret;
  383. }
  384. /*
  385. * DRM operations:
  386. */
  387. static void load_gpu(struct drm_device *dev)
  388. {
  389. static DEFINE_MUTEX(init_lock);
  390. struct msm_drm_private *priv = dev->dev_private;
  391. mutex_lock(&init_lock);
  392. if (!priv->gpu)
  393. priv->gpu = adreno_load_gpu(dev);
  394. mutex_unlock(&init_lock);
  395. }
  396. static int msm_open(struct drm_device *dev, struct drm_file *file)
  397. {
  398. struct msm_file_private *ctx;
  399. /* For now, load gpu on open.. to avoid the requirement of having
  400. * firmware in the initrd.
  401. */
  402. load_gpu(dev);
  403. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  404. if (!ctx)
  405. return -ENOMEM;
  406. file->driver_priv = ctx;
  407. return 0;
  408. }
  409. static void msm_preclose(struct drm_device *dev, struct drm_file *file)
  410. {
  411. struct msm_drm_private *priv = dev->dev_private;
  412. struct msm_file_private *ctx = file->driver_priv;
  413. mutex_lock(&dev->struct_mutex);
  414. if (ctx == priv->lastctx)
  415. priv->lastctx = NULL;
  416. mutex_unlock(&dev->struct_mutex);
  417. kfree(ctx);
  418. }
  419. static void msm_lastclose(struct drm_device *dev)
  420. {
  421. struct msm_drm_private *priv = dev->dev_private;
  422. if (priv->fbdev)
  423. drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
  424. }
  425. static irqreturn_t msm_irq(int irq, void *arg)
  426. {
  427. struct drm_device *dev = arg;
  428. struct msm_drm_private *priv = dev->dev_private;
  429. struct msm_kms *kms = priv->kms;
  430. BUG_ON(!kms);
  431. return kms->funcs->irq(kms);
  432. }
  433. static void msm_irq_preinstall(struct drm_device *dev)
  434. {
  435. struct msm_drm_private *priv = dev->dev_private;
  436. struct msm_kms *kms = priv->kms;
  437. BUG_ON(!kms);
  438. kms->funcs->irq_preinstall(kms);
  439. }
  440. static int msm_irq_postinstall(struct drm_device *dev)
  441. {
  442. struct msm_drm_private *priv = dev->dev_private;
  443. struct msm_kms *kms = priv->kms;
  444. BUG_ON(!kms);
  445. return kms->funcs->irq_postinstall(kms);
  446. }
  447. static void msm_irq_uninstall(struct drm_device *dev)
  448. {
  449. struct msm_drm_private *priv = dev->dev_private;
  450. struct msm_kms *kms = priv->kms;
  451. BUG_ON(!kms);
  452. kms->funcs->irq_uninstall(kms);
  453. }
  454. static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  455. {
  456. struct msm_drm_private *priv = dev->dev_private;
  457. struct msm_kms *kms = priv->kms;
  458. if (!kms)
  459. return -ENXIO;
  460. DBG("dev=%p, crtc=%u", dev, pipe);
  461. return vblank_ctrl_queue_work(priv, pipe, true);
  462. }
  463. static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
  464. {
  465. struct msm_drm_private *priv = dev->dev_private;
  466. struct msm_kms *kms = priv->kms;
  467. if (!kms)
  468. return;
  469. DBG("dev=%p, crtc=%u", dev, pipe);
  470. vblank_ctrl_queue_work(priv, pipe, false);
  471. }
  472. /*
  473. * DRM ioctls:
  474. */
  475. static int msm_ioctl_get_param(struct drm_device *dev, void *data,
  476. struct drm_file *file)
  477. {
  478. struct msm_drm_private *priv = dev->dev_private;
  479. struct drm_msm_param *args = data;
  480. struct msm_gpu *gpu;
  481. /* for now, we just have 3d pipe.. eventually this would need to
  482. * be more clever to dispatch to appropriate gpu module:
  483. */
  484. if (args->pipe != MSM_PIPE_3D0)
  485. return -EINVAL;
  486. gpu = priv->gpu;
  487. if (!gpu)
  488. return -ENXIO;
  489. return gpu->funcs->get_param(gpu, args->param, &args->value);
  490. }
  491. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  492. struct drm_file *file)
  493. {
  494. struct drm_msm_gem_new *args = data;
  495. if (args->flags & ~MSM_BO_FLAGS) {
  496. DRM_ERROR("invalid flags: %08x\n", args->flags);
  497. return -EINVAL;
  498. }
  499. return msm_gem_new_handle(dev, file, args->size,
  500. args->flags, &args->handle);
  501. }
  502. static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
  503. {
  504. return ktime_set(timeout.tv_sec, timeout.tv_nsec);
  505. }
  506. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  507. struct drm_file *file)
  508. {
  509. struct drm_msm_gem_cpu_prep *args = data;
  510. struct drm_gem_object *obj;
  511. ktime_t timeout = to_ktime(args->timeout);
  512. int ret;
  513. if (args->op & ~MSM_PREP_FLAGS) {
  514. DRM_ERROR("invalid op: %08x\n", args->op);
  515. return -EINVAL;
  516. }
  517. obj = drm_gem_object_lookup(file, args->handle);
  518. if (!obj)
  519. return -ENOENT;
  520. ret = msm_gem_cpu_prep(obj, args->op, &timeout);
  521. drm_gem_object_unreference_unlocked(obj);
  522. return ret;
  523. }
  524. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  525. struct drm_file *file)
  526. {
  527. struct drm_msm_gem_cpu_fini *args = data;
  528. struct drm_gem_object *obj;
  529. int ret;
  530. obj = drm_gem_object_lookup(file, args->handle);
  531. if (!obj)
  532. return -ENOENT;
  533. ret = msm_gem_cpu_fini(obj);
  534. drm_gem_object_unreference_unlocked(obj);
  535. return ret;
  536. }
  537. static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
  538. struct drm_file *file)
  539. {
  540. struct drm_msm_gem_info *args = data;
  541. struct drm_gem_object *obj;
  542. int ret = 0;
  543. if (args->pad)
  544. return -EINVAL;
  545. obj = drm_gem_object_lookup(file, args->handle);
  546. if (!obj)
  547. return -ENOENT;
  548. args->offset = msm_gem_mmap_offset(obj);
  549. drm_gem_object_unreference_unlocked(obj);
  550. return ret;
  551. }
  552. static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
  553. struct drm_file *file)
  554. {
  555. struct msm_drm_private *priv = dev->dev_private;
  556. struct drm_msm_wait_fence *args = data;
  557. ktime_t timeout = to_ktime(args->timeout);
  558. if (args->pad) {
  559. DRM_ERROR("invalid pad: %08x\n", args->pad);
  560. return -EINVAL;
  561. }
  562. if (!priv->gpu)
  563. return 0;
  564. return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
  565. }
  566. static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
  567. struct drm_file *file)
  568. {
  569. struct drm_msm_gem_madvise *args = data;
  570. struct drm_gem_object *obj;
  571. int ret;
  572. switch (args->madv) {
  573. case MSM_MADV_DONTNEED:
  574. case MSM_MADV_WILLNEED:
  575. break;
  576. default:
  577. return -EINVAL;
  578. }
  579. ret = mutex_lock_interruptible(&dev->struct_mutex);
  580. if (ret)
  581. return ret;
  582. obj = drm_gem_object_lookup(file, args->handle);
  583. if (!obj) {
  584. ret = -ENOENT;
  585. goto unlock;
  586. }
  587. ret = msm_gem_madvise(obj, args->madv);
  588. if (ret >= 0) {
  589. args->retained = ret;
  590. ret = 0;
  591. }
  592. drm_gem_object_unreference(obj);
  593. unlock:
  594. mutex_unlock(&dev->struct_mutex);
  595. return ret;
  596. }
  597. static const struct drm_ioctl_desc msm_ioctls[] = {
  598. DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  599. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  600. DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  601. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  602. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  603. DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  604. DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  605. DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
  606. };
  607. static const struct vm_operations_struct vm_ops = {
  608. .fault = msm_gem_fault,
  609. .open = drm_gem_vm_open,
  610. .close = drm_gem_vm_close,
  611. };
  612. static const struct file_operations fops = {
  613. .owner = THIS_MODULE,
  614. .open = drm_open,
  615. .release = drm_release,
  616. .unlocked_ioctl = drm_ioctl,
  617. #ifdef CONFIG_COMPAT
  618. .compat_ioctl = drm_compat_ioctl,
  619. #endif
  620. .poll = drm_poll,
  621. .read = drm_read,
  622. .llseek = no_llseek,
  623. .mmap = msm_gem_mmap,
  624. };
  625. static struct drm_driver msm_driver = {
  626. .driver_features = DRIVER_HAVE_IRQ |
  627. DRIVER_GEM |
  628. DRIVER_PRIME |
  629. DRIVER_RENDER |
  630. DRIVER_ATOMIC |
  631. DRIVER_MODESET,
  632. .open = msm_open,
  633. .preclose = msm_preclose,
  634. .lastclose = msm_lastclose,
  635. .irq_handler = msm_irq,
  636. .irq_preinstall = msm_irq_preinstall,
  637. .irq_postinstall = msm_irq_postinstall,
  638. .irq_uninstall = msm_irq_uninstall,
  639. .get_vblank_counter = drm_vblank_no_hw_counter,
  640. .enable_vblank = msm_enable_vblank,
  641. .disable_vblank = msm_disable_vblank,
  642. .gem_free_object = msm_gem_free_object,
  643. .gem_vm_ops = &vm_ops,
  644. .dumb_create = msm_gem_dumb_create,
  645. .dumb_map_offset = msm_gem_dumb_map_offset,
  646. .dumb_destroy = drm_gem_dumb_destroy,
  647. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  648. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  649. .gem_prime_export = drm_gem_prime_export,
  650. .gem_prime_import = drm_gem_prime_import,
  651. .gem_prime_pin = msm_gem_prime_pin,
  652. .gem_prime_unpin = msm_gem_prime_unpin,
  653. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  654. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  655. .gem_prime_vmap = msm_gem_prime_vmap,
  656. .gem_prime_vunmap = msm_gem_prime_vunmap,
  657. .gem_prime_mmap = msm_gem_prime_mmap,
  658. #ifdef CONFIG_DEBUG_FS
  659. .debugfs_init = msm_debugfs_init,
  660. .debugfs_cleanup = msm_debugfs_cleanup,
  661. #endif
  662. .ioctls = msm_ioctls,
  663. .num_ioctls = DRM_MSM_NUM_IOCTLS,
  664. .fops = &fops,
  665. .name = "msm",
  666. .desc = "MSM Snapdragon DRM",
  667. .date = "20130625",
  668. .major = MSM_VERSION_MAJOR,
  669. .minor = MSM_VERSION_MINOR,
  670. .patchlevel = MSM_VERSION_PATCHLEVEL,
  671. };
  672. #ifdef CONFIG_PM_SLEEP
  673. static int msm_pm_suspend(struct device *dev)
  674. {
  675. struct drm_device *ddev = dev_get_drvdata(dev);
  676. drm_kms_helper_poll_disable(ddev);
  677. return 0;
  678. }
  679. static int msm_pm_resume(struct device *dev)
  680. {
  681. struct drm_device *ddev = dev_get_drvdata(dev);
  682. drm_kms_helper_poll_enable(ddev);
  683. return 0;
  684. }
  685. #endif
  686. static const struct dev_pm_ops msm_pm_ops = {
  687. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  688. };
  689. /*
  690. * Componentized driver support:
  691. */
  692. /*
  693. * NOTE: duplication of the same code as exynos or imx (or probably any other).
  694. * so probably some room for some helpers
  695. */
  696. static int compare_of(struct device *dev, void *data)
  697. {
  698. return dev->of_node == data;
  699. }
  700. /*
  701. * Identify what components need to be added by parsing what remote-endpoints
  702. * our MDP output ports are connected to. In the case of LVDS on MDP4, there
  703. * is no external component that we need to add since LVDS is within MDP4
  704. * itself.
  705. */
  706. static int add_components_mdp(struct device *mdp_dev,
  707. struct component_match **matchptr)
  708. {
  709. struct device_node *np = mdp_dev->of_node;
  710. struct device_node *ep_node;
  711. struct device *master_dev;
  712. /*
  713. * on MDP4 based platforms, the MDP platform device is the component
  714. * master that adds other display interface components to itself.
  715. *
  716. * on MDP5 based platforms, the MDSS platform device is the component
  717. * master that adds MDP5 and other display interface components to
  718. * itself.
  719. */
  720. if (of_device_is_compatible(np, "qcom,mdp4"))
  721. master_dev = mdp_dev;
  722. else
  723. master_dev = mdp_dev->parent;
  724. for_each_endpoint_of_node(np, ep_node) {
  725. struct device_node *intf;
  726. struct of_endpoint ep;
  727. int ret;
  728. ret = of_graph_parse_endpoint(ep_node, &ep);
  729. if (ret) {
  730. dev_err(mdp_dev, "unable to parse port endpoint\n");
  731. of_node_put(ep_node);
  732. return ret;
  733. }
  734. /*
  735. * The LCDC/LVDS port on MDP4 is a speacial case where the
  736. * remote-endpoint isn't a component that we need to add
  737. */
  738. if (of_device_is_compatible(np, "qcom,mdp4") &&
  739. ep.port == 0) {
  740. of_node_put(ep_node);
  741. continue;
  742. }
  743. /*
  744. * It's okay if some of the ports don't have a remote endpoint
  745. * specified. It just means that the port isn't connected to
  746. * any external interface.
  747. */
  748. intf = of_graph_get_remote_port_parent(ep_node);
  749. if (!intf) {
  750. of_node_put(ep_node);
  751. continue;
  752. }
  753. component_match_add(master_dev, matchptr, compare_of, intf);
  754. of_node_put(intf);
  755. of_node_put(ep_node);
  756. }
  757. return 0;
  758. }
  759. static int compare_name_mdp(struct device *dev, void *data)
  760. {
  761. return (strstr(dev_name(dev), "mdp") != NULL);
  762. }
  763. static int add_display_components(struct device *dev,
  764. struct component_match **matchptr)
  765. {
  766. struct device *mdp_dev;
  767. int ret;
  768. /*
  769. * MDP5 based devices don't have a flat hierarchy. There is a top level
  770. * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
  771. * children devices, find the MDP5 node, and then add the interfaces
  772. * to our components list.
  773. */
  774. if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
  775. ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
  776. if (ret) {
  777. dev_err(dev, "failed to populate children devices\n");
  778. return ret;
  779. }
  780. mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
  781. if (!mdp_dev) {
  782. dev_err(dev, "failed to find MDSS MDP node\n");
  783. of_platform_depopulate(dev);
  784. return -ENODEV;
  785. }
  786. put_device(mdp_dev);
  787. /* add the MDP component itself */
  788. component_match_add(dev, matchptr, compare_of,
  789. mdp_dev->of_node);
  790. } else {
  791. /* MDP4 */
  792. mdp_dev = dev;
  793. }
  794. ret = add_components_mdp(mdp_dev, matchptr);
  795. if (ret)
  796. of_platform_depopulate(dev);
  797. return ret;
  798. }
  799. /*
  800. * We don't know what's the best binding to link the gpu with the drm device.
  801. * Fow now, we just hunt for all the possible gpus that we support, and add them
  802. * as components.
  803. */
  804. static const struct of_device_id msm_gpu_match[] = {
  805. { .compatible = "qcom,adreno-3xx" },
  806. { .compatible = "qcom,kgsl-3d0" },
  807. { },
  808. };
  809. static int add_gpu_components(struct device *dev,
  810. struct component_match **matchptr)
  811. {
  812. struct device_node *np;
  813. np = of_find_matching_node(NULL, msm_gpu_match);
  814. if (!np)
  815. return 0;
  816. component_match_add(dev, matchptr, compare_of, np);
  817. of_node_put(np);
  818. return 0;
  819. }
  820. static int msm_drm_bind(struct device *dev)
  821. {
  822. return msm_drm_init(dev, &msm_driver);
  823. }
  824. static void msm_drm_unbind(struct device *dev)
  825. {
  826. msm_drm_uninit(dev);
  827. }
  828. static const struct component_master_ops msm_drm_ops = {
  829. .bind = msm_drm_bind,
  830. .unbind = msm_drm_unbind,
  831. };
  832. /*
  833. * Platform driver:
  834. */
  835. static int msm_pdev_probe(struct platform_device *pdev)
  836. {
  837. struct component_match *match = NULL;
  838. int ret;
  839. ret = add_display_components(&pdev->dev, &match);
  840. if (ret)
  841. return ret;
  842. ret = add_gpu_components(&pdev->dev, &match);
  843. if (ret)
  844. return ret;
  845. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  846. return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
  847. }
  848. static int msm_pdev_remove(struct platform_device *pdev)
  849. {
  850. component_master_del(&pdev->dev, &msm_drm_ops);
  851. of_platform_depopulate(&pdev->dev);
  852. return 0;
  853. }
  854. static const struct of_device_id dt_match[] = {
  855. { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
  856. { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
  857. {}
  858. };
  859. MODULE_DEVICE_TABLE(of, dt_match);
  860. static struct platform_driver msm_platform_driver = {
  861. .probe = msm_pdev_probe,
  862. .remove = msm_pdev_remove,
  863. .driver = {
  864. .name = "msm",
  865. .of_match_table = dt_match,
  866. .pm = &msm_pm_ops,
  867. },
  868. };
  869. static int __init msm_drm_register(void)
  870. {
  871. DBG("init");
  872. msm_mdp_register();
  873. msm_dsi_register();
  874. msm_edp_register();
  875. msm_hdmi_register();
  876. adreno_register();
  877. return platform_driver_register(&msm_platform_driver);
  878. }
  879. static void __exit msm_drm_unregister(void)
  880. {
  881. DBG("fini");
  882. platform_driver_unregister(&msm_platform_driver);
  883. msm_hdmi_unregister();
  884. adreno_unregister();
  885. msm_edp_unregister();
  886. msm_dsi_unregister();
  887. msm_mdp_unregister();
  888. }
  889. module_init(msm_drm_register);
  890. module_exit(msm_drm_unregister);
  891. MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
  892. MODULE_DESCRIPTION("MSM DRM Driver");
  893. MODULE_LICENSE("GPL");