etnaviv_drv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. /*
  2. * Copyright (C) 2015 Etnaviv Project
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License version 2 as published by
  6. * the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/component.h>
  17. #include <linux/of_platform.h>
  18. #include <drm/drm_of.h>
  19. #include "etnaviv_cmdbuf.h"
  20. #include "etnaviv_drv.h"
  21. #include "etnaviv_gpu.h"
  22. #include "etnaviv_gem.h"
  23. #include "etnaviv_mmu.h"
  24. #include "etnaviv_perfmon.h"
  25. #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
  26. static bool reglog;
  27. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  28. module_param(reglog, bool, 0600);
  29. #else
  30. #define reglog 0
  31. #endif
  32. void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
  33. const char *dbgname)
  34. {
  35. struct resource *res;
  36. void __iomem *ptr;
  37. if (name)
  38. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  39. else
  40. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  41. ptr = devm_ioremap_resource(&pdev->dev, res);
  42. if (IS_ERR(ptr)) {
  43. dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
  44. PTR_ERR(ptr));
  45. return ptr;
  46. }
  47. if (reglog)
  48. dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
  49. dbgname, ptr, (size_t)resource_size(res));
  50. return ptr;
  51. }
  52. void etnaviv_writel(u32 data, void __iomem *addr)
  53. {
  54. if (reglog)
  55. printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
  56. writel(data, addr);
  57. }
  58. u32 etnaviv_readl(const void __iomem *addr)
  59. {
  60. u32 val = readl(addr);
  61. if (reglog)
  62. printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
  63. return val;
  64. }
  65. /*
  66. * DRM operations:
  67. */
  68. static void load_gpu(struct drm_device *dev)
  69. {
  70. struct etnaviv_drm_private *priv = dev->dev_private;
  71. unsigned int i;
  72. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  73. struct etnaviv_gpu *g = priv->gpu[i];
  74. if (g) {
  75. int ret;
  76. ret = etnaviv_gpu_init(g);
  77. if (ret)
  78. priv->gpu[i] = NULL;
  79. }
  80. }
  81. }
  82. static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
  83. {
  84. struct etnaviv_drm_private *priv = dev->dev_private;
  85. struct etnaviv_file_private *ctx;
  86. int i;
  87. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  88. if (!ctx)
  89. return -ENOMEM;
  90. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  91. struct etnaviv_gpu *gpu = priv->gpu[i];
  92. if (gpu) {
  93. drm_sched_entity_init(&gpu->sched,
  94. &ctx->sched_entity[i],
  95. &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
  96. 32, NULL);
  97. }
  98. }
  99. file->driver_priv = ctx;
  100. return 0;
  101. }
  102. static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
  103. {
  104. struct etnaviv_drm_private *priv = dev->dev_private;
  105. struct etnaviv_file_private *ctx = file->driver_priv;
  106. unsigned int i;
  107. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  108. struct etnaviv_gpu *gpu = priv->gpu[i];
  109. if (gpu) {
  110. mutex_lock(&gpu->lock);
  111. if (gpu->lastctx == ctx)
  112. gpu->lastctx = NULL;
  113. mutex_unlock(&gpu->lock);
  114. drm_sched_entity_fini(&gpu->sched,
  115. &ctx->sched_entity[i]);
  116. }
  117. }
  118. kfree(ctx);
  119. }
  120. /*
  121. * DRM debugfs:
  122. */
  123. #ifdef CONFIG_DEBUG_FS
  124. static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
  125. {
  126. struct etnaviv_drm_private *priv = dev->dev_private;
  127. etnaviv_gem_describe_objects(priv, m);
  128. return 0;
  129. }
  130. static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
  131. {
  132. struct drm_printer p = drm_seq_file_printer(m);
  133. read_lock(&dev->vma_offset_manager->vm_lock);
  134. drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
  135. read_unlock(&dev->vma_offset_manager->vm_lock);
  136. return 0;
  137. }
  138. static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  139. {
  140. struct drm_printer p = drm_seq_file_printer(m);
  141. seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
  142. mutex_lock(&gpu->mmu->lock);
  143. drm_mm_print(&gpu->mmu->mm, &p);
  144. mutex_unlock(&gpu->mmu->lock);
  145. return 0;
  146. }
  147. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
  148. {
  149. struct etnaviv_cmdbuf *buf = &gpu->buffer;
  150. u32 size = buf->size;
  151. u32 *ptr = buf->vaddr;
  152. u32 i;
  153. seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
  154. buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
  155. size - buf->user_size);
  156. for (i = 0; i < size / 4; i++) {
  157. if (i && !(i % 4))
  158. seq_puts(m, "\n");
  159. if (i % 4 == 0)
  160. seq_printf(m, "\t0x%p: ", ptr + i);
  161. seq_printf(m, "%08x ", *(ptr + i));
  162. }
  163. seq_puts(m, "\n");
  164. }
  165. static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
  166. {
  167. seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
  168. mutex_lock(&gpu->lock);
  169. etnaviv_buffer_dump(gpu, m);
  170. mutex_unlock(&gpu->lock);
  171. return 0;
  172. }
  173. static int show_unlocked(struct seq_file *m, void *arg)
  174. {
  175. struct drm_info_node *node = (struct drm_info_node *) m->private;
  176. struct drm_device *dev = node->minor->dev;
  177. int (*show)(struct drm_device *dev, struct seq_file *m) =
  178. node->info_ent->data;
  179. return show(dev, m);
  180. }
  181. static int show_each_gpu(struct seq_file *m, void *arg)
  182. {
  183. struct drm_info_node *node = (struct drm_info_node *) m->private;
  184. struct drm_device *dev = node->minor->dev;
  185. struct etnaviv_drm_private *priv = dev->dev_private;
  186. struct etnaviv_gpu *gpu;
  187. int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
  188. node->info_ent->data;
  189. unsigned int i;
  190. int ret = 0;
  191. for (i = 0; i < ETNA_MAX_PIPES; i++) {
  192. gpu = priv->gpu[i];
  193. if (!gpu)
  194. continue;
  195. ret = show(gpu, m);
  196. if (ret < 0)
  197. break;
  198. }
  199. return ret;
  200. }
  201. static struct drm_info_list etnaviv_debugfs_list[] = {
  202. {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
  203. {"gem", show_unlocked, 0, etnaviv_gem_show},
  204. { "mm", show_unlocked, 0, etnaviv_mm_show },
  205. {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
  206. {"ring", show_each_gpu, 0, etnaviv_ring_show},
  207. };
  208. static int etnaviv_debugfs_init(struct drm_minor *minor)
  209. {
  210. struct drm_device *dev = minor->dev;
  211. int ret;
  212. ret = drm_debugfs_create_files(etnaviv_debugfs_list,
  213. ARRAY_SIZE(etnaviv_debugfs_list),
  214. minor->debugfs_root, minor);
  215. if (ret) {
  216. dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
  217. return ret;
  218. }
  219. return ret;
  220. }
  221. #endif
  222. /*
  223. * DRM ioctls:
  224. */
  225. static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
  226. struct drm_file *file)
  227. {
  228. struct etnaviv_drm_private *priv = dev->dev_private;
  229. struct drm_etnaviv_param *args = data;
  230. struct etnaviv_gpu *gpu;
  231. if (args->pipe >= ETNA_MAX_PIPES)
  232. return -EINVAL;
  233. gpu = priv->gpu[args->pipe];
  234. if (!gpu)
  235. return -ENXIO;
  236. return etnaviv_gpu_get_param(gpu, args->param, &args->value);
  237. }
  238. static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
  239. struct drm_file *file)
  240. {
  241. struct drm_etnaviv_gem_new *args = data;
  242. if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
  243. ETNA_BO_FORCE_MMU))
  244. return -EINVAL;
  245. return etnaviv_gem_new_handle(dev, file, args->size,
  246. args->flags, &args->handle);
  247. }
  248. #define TS(t) ((struct timespec){ \
  249. .tv_sec = (t).tv_sec, \
  250. .tv_nsec = (t).tv_nsec \
  251. })
  252. static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  253. struct drm_file *file)
  254. {
  255. struct drm_etnaviv_gem_cpu_prep *args = data;
  256. struct drm_gem_object *obj;
  257. int ret;
  258. if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
  259. return -EINVAL;
  260. obj = drm_gem_object_lookup(file, args->handle);
  261. if (!obj)
  262. return -ENOENT;
  263. ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
  264. drm_gem_object_put_unlocked(obj);
  265. return ret;
  266. }
  267. static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  268. struct drm_file *file)
  269. {
  270. struct drm_etnaviv_gem_cpu_fini *args = data;
  271. struct drm_gem_object *obj;
  272. int ret;
  273. if (args->flags)
  274. return -EINVAL;
  275. obj = drm_gem_object_lookup(file, args->handle);
  276. if (!obj)
  277. return -ENOENT;
  278. ret = etnaviv_gem_cpu_fini(obj);
  279. drm_gem_object_put_unlocked(obj);
  280. return ret;
  281. }
  282. static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
  283. struct drm_file *file)
  284. {
  285. struct drm_etnaviv_gem_info *args = data;
  286. struct drm_gem_object *obj;
  287. int ret;
  288. if (args->pad)
  289. return -EINVAL;
  290. obj = drm_gem_object_lookup(file, args->handle);
  291. if (!obj)
  292. return -ENOENT;
  293. ret = etnaviv_gem_mmap_offset(obj, &args->offset);
  294. drm_gem_object_put_unlocked(obj);
  295. return ret;
  296. }
  297. static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
  298. struct drm_file *file)
  299. {
  300. struct drm_etnaviv_wait_fence *args = data;
  301. struct etnaviv_drm_private *priv = dev->dev_private;
  302. struct timespec *timeout = &TS(args->timeout);
  303. struct etnaviv_gpu *gpu;
  304. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  305. return -EINVAL;
  306. if (args->pipe >= ETNA_MAX_PIPES)
  307. return -EINVAL;
  308. gpu = priv->gpu[args->pipe];
  309. if (!gpu)
  310. return -ENXIO;
  311. if (args->flags & ETNA_WAIT_NONBLOCK)
  312. timeout = NULL;
  313. return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
  314. timeout);
  315. }
  316. static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
  317. struct drm_file *file)
  318. {
  319. struct drm_etnaviv_gem_userptr *args = data;
  320. int access;
  321. if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
  322. args->flags == 0)
  323. return -EINVAL;
  324. if (offset_in_page(args->user_ptr | args->user_size) ||
  325. (uintptr_t)args->user_ptr != args->user_ptr ||
  326. (u32)args->user_size != args->user_size ||
  327. args->user_ptr & ~PAGE_MASK)
  328. return -EINVAL;
  329. if (args->flags & ETNA_USERPTR_WRITE)
  330. access = VERIFY_WRITE;
  331. else
  332. access = VERIFY_READ;
  333. if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
  334. args->user_size))
  335. return -EFAULT;
  336. return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
  337. args->user_size, args->flags,
  338. &args->handle);
  339. }
  340. static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
  341. struct drm_file *file)
  342. {
  343. struct etnaviv_drm_private *priv = dev->dev_private;
  344. struct drm_etnaviv_gem_wait *args = data;
  345. struct timespec *timeout = &TS(args->timeout);
  346. struct drm_gem_object *obj;
  347. struct etnaviv_gpu *gpu;
  348. int ret;
  349. if (args->flags & ~(ETNA_WAIT_NONBLOCK))
  350. return -EINVAL;
  351. if (args->pipe >= ETNA_MAX_PIPES)
  352. return -EINVAL;
  353. gpu = priv->gpu[args->pipe];
  354. if (!gpu)
  355. return -ENXIO;
  356. obj = drm_gem_object_lookup(file, args->handle);
  357. if (!obj)
  358. return -ENOENT;
  359. if (args->flags & ETNA_WAIT_NONBLOCK)
  360. timeout = NULL;
  361. ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
  362. drm_gem_object_put_unlocked(obj);
  363. return ret;
  364. }
  365. static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
  366. struct drm_file *file)
  367. {
  368. struct etnaviv_drm_private *priv = dev->dev_private;
  369. struct drm_etnaviv_pm_domain *args = data;
  370. struct etnaviv_gpu *gpu;
  371. if (args->pipe >= ETNA_MAX_PIPES)
  372. return -EINVAL;
  373. gpu = priv->gpu[args->pipe];
  374. if (!gpu)
  375. return -ENXIO;
  376. return etnaviv_pm_query_dom(gpu, args);
  377. }
  378. static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
  379. struct drm_file *file)
  380. {
  381. struct etnaviv_drm_private *priv = dev->dev_private;
  382. struct drm_etnaviv_pm_signal *args = data;
  383. struct etnaviv_gpu *gpu;
  384. if (args->pipe >= ETNA_MAX_PIPES)
  385. return -EINVAL;
  386. gpu = priv->gpu[args->pipe];
  387. if (!gpu)
  388. return -ENXIO;
  389. return etnaviv_pm_query_sig(gpu, args);
  390. }
  391. static const struct drm_ioctl_desc etnaviv_ioctls[] = {
  392. #define ETNA_IOCTL(n, func, flags) \
  393. DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
  394. ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
  395. ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  396. ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
  397. ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  398. ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  399. ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
  400. ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
  401. ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
  402. ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
  403. ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
  404. ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
  405. };
  406. static const struct vm_operations_struct vm_ops = {
  407. .fault = etnaviv_gem_fault,
  408. .open = drm_gem_vm_open,
  409. .close = drm_gem_vm_close,
  410. };
  411. static const struct file_operations fops = {
  412. .owner = THIS_MODULE,
  413. .open = drm_open,
  414. .release = drm_release,
  415. .unlocked_ioctl = drm_ioctl,
  416. .compat_ioctl = drm_compat_ioctl,
  417. .poll = drm_poll,
  418. .read = drm_read,
  419. .llseek = no_llseek,
  420. .mmap = etnaviv_gem_mmap,
  421. };
  422. static struct drm_driver etnaviv_drm_driver = {
  423. .driver_features = DRIVER_GEM |
  424. DRIVER_PRIME |
  425. DRIVER_RENDER,
  426. .open = etnaviv_open,
  427. .postclose = etnaviv_postclose,
  428. .gem_free_object_unlocked = etnaviv_gem_free_object,
  429. .gem_vm_ops = &vm_ops,
  430. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  431. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  432. .gem_prime_export = drm_gem_prime_export,
  433. .gem_prime_import = drm_gem_prime_import,
  434. .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
  435. .gem_prime_pin = etnaviv_gem_prime_pin,
  436. .gem_prime_unpin = etnaviv_gem_prime_unpin,
  437. .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
  438. .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
  439. .gem_prime_vmap = etnaviv_gem_prime_vmap,
  440. .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
  441. .gem_prime_mmap = etnaviv_gem_prime_mmap,
  442. #ifdef CONFIG_DEBUG_FS
  443. .debugfs_init = etnaviv_debugfs_init,
  444. #endif
  445. .ioctls = etnaviv_ioctls,
  446. .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
  447. .fops = &fops,
  448. .name = "etnaviv",
  449. .desc = "etnaviv DRM",
  450. .date = "20151214",
  451. .major = 1,
  452. .minor = 2,
  453. };
  454. /*
  455. * Platform driver:
  456. */
  457. static int etnaviv_bind(struct device *dev)
  458. {
  459. struct etnaviv_drm_private *priv;
  460. struct drm_device *drm;
  461. int ret;
  462. drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
  463. if (IS_ERR(drm))
  464. return PTR_ERR(drm);
  465. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  466. if (!priv) {
  467. dev_err(dev, "failed to allocate private data\n");
  468. ret = -ENOMEM;
  469. goto out_unref;
  470. }
  471. drm->dev_private = priv;
  472. mutex_init(&priv->gem_lock);
  473. INIT_LIST_HEAD(&priv->gem_list);
  474. priv->num_gpus = 0;
  475. dev_set_drvdata(dev, drm);
  476. ret = component_bind_all(dev, drm);
  477. if (ret < 0)
  478. goto out_bind;
  479. load_gpu(drm);
  480. ret = drm_dev_register(drm, 0);
  481. if (ret)
  482. goto out_register;
  483. return 0;
  484. out_register:
  485. component_unbind_all(dev, drm);
  486. out_bind:
  487. kfree(priv);
  488. out_unref:
  489. drm_dev_unref(drm);
  490. return ret;
  491. }
  492. static void etnaviv_unbind(struct device *dev)
  493. {
  494. struct drm_device *drm = dev_get_drvdata(dev);
  495. struct etnaviv_drm_private *priv = drm->dev_private;
  496. drm_dev_unregister(drm);
  497. component_unbind_all(dev, drm);
  498. drm->dev_private = NULL;
  499. kfree(priv);
  500. drm_dev_unref(drm);
  501. }
  502. static const struct component_master_ops etnaviv_master_ops = {
  503. .bind = etnaviv_bind,
  504. .unbind = etnaviv_unbind,
  505. };
  506. static int compare_of(struct device *dev, void *data)
  507. {
  508. struct device_node *np = data;
  509. return dev->of_node == np;
  510. }
  511. static int compare_str(struct device *dev, void *data)
  512. {
  513. return !strcmp(dev_name(dev), data);
  514. }
  515. static int etnaviv_pdev_probe(struct platform_device *pdev)
  516. {
  517. struct device *dev = &pdev->dev;
  518. struct component_match *match = NULL;
  519. dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  520. if (!dev->platform_data) {
  521. struct device_node *core_node;
  522. for_each_compatible_node(core_node, NULL, "vivante,gc") {
  523. if (!of_device_is_available(core_node))
  524. continue;
  525. drm_of_component_match_add(&pdev->dev, &match,
  526. compare_of, core_node);
  527. }
  528. } else {
  529. char **names = dev->platform_data;
  530. unsigned i;
  531. for (i = 0; names[i]; i++)
  532. component_match_add(dev, &match, compare_str, names[i]);
  533. }
  534. return component_master_add_with_match(dev, &etnaviv_master_ops, match);
  535. }
  536. static int etnaviv_pdev_remove(struct platform_device *pdev)
  537. {
  538. component_master_del(&pdev->dev, &etnaviv_master_ops);
  539. return 0;
  540. }
  541. static struct platform_driver etnaviv_platform_driver = {
  542. .probe = etnaviv_pdev_probe,
  543. .remove = etnaviv_pdev_remove,
  544. .driver = {
  545. .name = "etnaviv",
  546. },
  547. };
  548. static int __init etnaviv_init(void)
  549. {
  550. int ret;
  551. struct device_node *np;
  552. etnaviv_validate_init();
  553. ret = platform_driver_register(&etnaviv_gpu_driver);
  554. if (ret != 0)
  555. return ret;
  556. ret = platform_driver_register(&etnaviv_platform_driver);
  557. if (ret != 0)
  558. platform_driver_unregister(&etnaviv_gpu_driver);
  559. /*
  560. * If the DT contains at least one available GPU device, instantiate
  561. * the DRM platform device.
  562. */
  563. for_each_compatible_node(np, NULL, "vivante,gc") {
  564. if (!of_device_is_available(np))
  565. continue;
  566. platform_device_register_simple("etnaviv", -1, NULL, 0);
  567. of_node_put(np);
  568. break;
  569. }
  570. return ret;
  571. }
  572. module_init(etnaviv_init);
  573. static void __exit etnaviv_exit(void)
  574. {
  575. platform_driver_unregister(&etnaviv_gpu_driver);
  576. platform_driver_unregister(&etnaviv_platform_driver);
  577. }
  578. module_exit(etnaviv_exit);
  579. MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
  580. MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
  581. MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
  582. MODULE_DESCRIPTION("etnaviv DRM Driver");
  583. MODULE_LICENSE("GPL v2");
  584. MODULE_ALIAS("platform:etnaviv");