drm.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  25. struct tegra_drm_file {
  26. struct idr contexts;
  27. struct mutex lock;
  28. };
  29. static void tegra_atomic_schedule(struct tegra_drm *tegra,
  30. struct drm_atomic_state *state)
  31. {
  32. tegra->commit.state = state;
  33. schedule_work(&tegra->commit.work);
  34. }
  35. static void tegra_atomic_complete(struct tegra_drm *tegra,
  36. struct drm_atomic_state *state)
  37. {
  38. struct drm_device *drm = tegra->drm;
  39. /*
  40. * Everything below can be run asynchronously without the need to grab
  41. * any modeset locks at all under one condition: It must be guaranteed
  42. * that the asynchronous work has either been cancelled (if the driver
  43. * supports it, which at least requires that the framebuffers get
  44. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  45. * before the new state gets committed on the software side with
  46. * drm_atomic_helper_swap_state().
  47. *
  48. * This scheme allows new atomic state updates to be prepared and
  49. * checked in parallel to the asynchronous completion of the previous
  50. * update. Which is important since compositors need to figure out the
  51. * composition of the next frame right after having submitted the
  52. * current layout.
  53. */
  54. drm_atomic_helper_commit_modeset_disables(drm, state);
  55. drm_atomic_helper_commit_modeset_enables(drm, state);
  56. drm_atomic_helper_commit_planes(drm, state,
  57. DRM_PLANE_COMMIT_ACTIVE_ONLY);
  58. drm_atomic_helper_wait_for_vblanks(drm, state);
  59. drm_atomic_helper_cleanup_planes(drm, state);
  60. drm_atomic_state_put(state);
  61. }
  62. static void tegra_atomic_work(struct work_struct *work)
  63. {
  64. struct tegra_drm *tegra = container_of(work, struct tegra_drm,
  65. commit.work);
  66. tegra_atomic_complete(tegra, tegra->commit.state);
  67. }
  68. static int tegra_atomic_commit(struct drm_device *drm,
  69. struct drm_atomic_state *state, bool nonblock)
  70. {
  71. struct tegra_drm *tegra = drm->dev_private;
  72. int err;
  73. err = drm_atomic_helper_prepare_planes(drm, state);
  74. if (err)
  75. return err;
  76. /* serialize outstanding nonblocking commits */
  77. mutex_lock(&tegra->commit.lock);
  78. flush_work(&tegra->commit.work);
  79. /*
  80. * This is the point of no return - everything below never fails except
  81. * when the hw goes bonghits. Which means we can commit the new state on
  82. * the software side now.
  83. */
  84. err = drm_atomic_helper_swap_state(state, true);
  85. if (err) {
  86. mutex_unlock(&tegra->commit.lock);
  87. drm_atomic_helper_cleanup_planes(drm, state);
  88. return err;
  89. }
  90. drm_atomic_state_get(state);
  91. if (nonblock)
  92. tegra_atomic_schedule(tegra, state);
  93. else
  94. tegra_atomic_complete(tegra, state);
  95. mutex_unlock(&tegra->commit.lock);
  96. return 0;
  97. }
  98. static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
  99. .fb_create = tegra_fb_create,
  100. #ifdef CONFIG_DRM_FBDEV_EMULATION
  101. .output_poll_changed = tegra_fb_output_poll_changed,
  102. #endif
  103. .atomic_check = drm_atomic_helper_check,
  104. .atomic_commit = tegra_atomic_commit,
  105. };
  106. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  107. {
  108. struct host1x_device *device = to_host1x_device(drm->dev);
  109. struct tegra_drm *tegra;
  110. int err;
  111. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  112. if (!tegra)
  113. return -ENOMEM;
  114. if (iommu_present(&platform_bus_type)) {
  115. u64 carveout_start, carveout_end, gem_start, gem_end;
  116. struct iommu_domain_geometry *geometry;
  117. unsigned long order;
  118. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  119. if (!tegra->domain) {
  120. err = -ENOMEM;
  121. goto free;
  122. }
  123. geometry = &tegra->domain->geometry;
  124. gem_start = geometry->aperture_start;
  125. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  126. carveout_start = gem_end + 1;
  127. carveout_end = geometry->aperture_end;
  128. order = __ffs(tegra->domain->pgsize_bitmap);
  129. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  130. carveout_start >> order,
  131. carveout_end >> order);
  132. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  133. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  134. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  135. mutex_init(&tegra->mm_lock);
  136. DRM_DEBUG("IOMMU apertures:\n");
  137. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  138. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  139. carveout_end);
  140. }
  141. mutex_init(&tegra->clients_lock);
  142. INIT_LIST_HEAD(&tegra->clients);
  143. mutex_init(&tegra->commit.lock);
  144. INIT_WORK(&tegra->commit.work, tegra_atomic_work);
  145. drm->dev_private = tegra;
  146. tegra->drm = drm;
  147. drm_mode_config_init(drm);
  148. drm->mode_config.min_width = 0;
  149. drm->mode_config.min_height = 0;
  150. drm->mode_config.max_width = 4096;
  151. drm->mode_config.max_height = 4096;
  152. drm->mode_config.allow_fb_modifiers = true;
  153. drm->mode_config.funcs = &tegra_drm_mode_funcs;
  154. err = tegra_drm_fb_prepare(drm);
  155. if (err < 0)
  156. goto config;
  157. drm_kms_helper_poll_init(drm);
  158. err = host1x_device_init(device);
  159. if (err < 0)
  160. goto fbdev;
  161. /*
  162. * We don't use the drm_irq_install() helpers provided by the DRM
  163. * core, so we need to set this manually in order to allow the
  164. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  165. */
  166. drm->irq_enabled = true;
  167. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  168. drm->max_vblank_count = 0xffffffff;
  169. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  170. if (err < 0)
  171. goto device;
  172. drm_mode_config_reset(drm);
  173. err = tegra_drm_fb_init(drm);
  174. if (err < 0)
  175. goto device;
  176. return 0;
  177. device:
  178. host1x_device_exit(device);
  179. fbdev:
  180. drm_kms_helper_poll_fini(drm);
  181. tegra_drm_fb_free(drm);
  182. config:
  183. drm_mode_config_cleanup(drm);
  184. if (tegra->domain) {
  185. iommu_domain_free(tegra->domain);
  186. drm_mm_takedown(&tegra->mm);
  187. mutex_destroy(&tegra->mm_lock);
  188. put_iova_domain(&tegra->carveout.domain);
  189. }
  190. free:
  191. kfree(tegra);
  192. return err;
  193. }
  194. static void tegra_drm_unload(struct drm_device *drm)
  195. {
  196. struct host1x_device *device = to_host1x_device(drm->dev);
  197. struct tegra_drm *tegra = drm->dev_private;
  198. int err;
  199. drm_kms_helper_poll_fini(drm);
  200. tegra_drm_fb_exit(drm);
  201. drm_mode_config_cleanup(drm);
  202. err = host1x_device_exit(device);
  203. if (err < 0)
  204. return;
  205. if (tegra->domain) {
  206. iommu_domain_free(tegra->domain);
  207. drm_mm_takedown(&tegra->mm);
  208. mutex_destroy(&tegra->mm_lock);
  209. put_iova_domain(&tegra->carveout.domain);
  210. }
  211. kfree(tegra);
  212. }
  213. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  214. {
  215. struct tegra_drm_file *fpriv;
  216. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  217. if (!fpriv)
  218. return -ENOMEM;
  219. idr_init(&fpriv->contexts);
  220. mutex_init(&fpriv->lock);
  221. filp->driver_priv = fpriv;
  222. return 0;
  223. }
  224. static void tegra_drm_context_free(struct tegra_drm_context *context)
  225. {
  226. context->client->ops->close_channel(context);
  227. kfree(context);
  228. }
  229. static void tegra_drm_lastclose(struct drm_device *drm)
  230. {
  231. #ifdef CONFIG_DRM_FBDEV_EMULATION
  232. struct tegra_drm *tegra = drm->dev_private;
  233. tegra_fbdev_restore_mode(tegra->fbdev);
  234. #endif
  235. }
  236. static struct host1x_bo *
  237. host1x_bo_lookup(struct drm_file *file, u32 handle)
  238. {
  239. struct drm_gem_object *gem;
  240. struct tegra_bo *bo;
  241. gem = drm_gem_object_lookup(file, handle);
  242. if (!gem)
  243. return NULL;
  244. bo = to_tegra_bo(gem);
  245. return &bo->base;
  246. }
  247. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  248. struct drm_tegra_reloc __user *src,
  249. struct drm_device *drm,
  250. struct drm_file *file)
  251. {
  252. u32 cmdbuf, target;
  253. int err;
  254. err = get_user(cmdbuf, &src->cmdbuf.handle);
  255. if (err < 0)
  256. return err;
  257. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  258. if (err < 0)
  259. return err;
  260. err = get_user(target, &src->target.handle);
  261. if (err < 0)
  262. return err;
  263. err = get_user(dest->target.offset, &src->target.offset);
  264. if (err < 0)
  265. return err;
  266. err = get_user(dest->shift, &src->shift);
  267. if (err < 0)
  268. return err;
  269. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  270. if (!dest->cmdbuf.bo)
  271. return -ENOENT;
  272. dest->target.bo = host1x_bo_lookup(file, target);
  273. if (!dest->target.bo)
  274. return -ENOENT;
  275. return 0;
  276. }
  277. static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
  278. struct drm_tegra_waitchk __user *src,
  279. struct drm_file *file)
  280. {
  281. u32 cmdbuf;
  282. int err;
  283. err = get_user(cmdbuf, &src->handle);
  284. if (err < 0)
  285. return err;
  286. err = get_user(dest->offset, &src->offset);
  287. if (err < 0)
  288. return err;
  289. err = get_user(dest->syncpt_id, &src->syncpt);
  290. if (err < 0)
  291. return err;
  292. err = get_user(dest->thresh, &src->thresh);
  293. if (err < 0)
  294. return err;
  295. dest->bo = host1x_bo_lookup(file, cmdbuf);
  296. if (!dest->bo)
  297. return -ENOENT;
  298. return 0;
  299. }
  300. int tegra_drm_submit(struct tegra_drm_context *context,
  301. struct drm_tegra_submit *args, struct drm_device *drm,
  302. struct drm_file *file)
  303. {
  304. unsigned int num_cmdbufs = args->num_cmdbufs;
  305. unsigned int num_relocs = args->num_relocs;
  306. unsigned int num_waitchks = args->num_waitchks;
  307. struct drm_tegra_cmdbuf __user *cmdbufs =
  308. (void __user *)(uintptr_t)args->cmdbufs;
  309. struct drm_tegra_reloc __user *relocs =
  310. (void __user *)(uintptr_t)args->relocs;
  311. struct drm_tegra_waitchk __user *waitchks =
  312. (void __user *)(uintptr_t)args->waitchks;
  313. struct drm_tegra_syncpt syncpt;
  314. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  315. struct drm_gem_object **refs;
  316. struct host1x_syncpt *sp;
  317. struct host1x_job *job;
  318. unsigned int num_refs;
  319. int err;
  320. /* We don't yet support other than one syncpt_incr struct per submit */
  321. if (args->num_syncpts != 1)
  322. return -EINVAL;
  323. /* We don't yet support waitchks */
  324. if (args->num_waitchks != 0)
  325. return -EINVAL;
  326. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  327. args->num_relocs, args->num_waitchks);
  328. if (!job)
  329. return -ENOMEM;
  330. job->num_relocs = args->num_relocs;
  331. job->num_waitchk = args->num_waitchks;
  332. job->client = (u32)args->context;
  333. job->class = context->client->base.class;
  334. job->serialize = true;
  335. /*
  336. * Track referenced BOs so that they can be unreferenced after the
  337. * submission is complete.
  338. */
  339. num_refs = num_cmdbufs + num_relocs * 2 + num_waitchks;
  340. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  341. if (!refs) {
  342. err = -ENOMEM;
  343. goto put;
  344. }
  345. /* reuse as an iterator later */
  346. num_refs = 0;
  347. while (num_cmdbufs) {
  348. struct drm_tegra_cmdbuf cmdbuf;
  349. struct host1x_bo *bo;
  350. struct tegra_bo *obj;
  351. u64 offset;
  352. if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
  353. err = -EFAULT;
  354. goto fail;
  355. }
  356. /*
  357. * The maximum number of CDMA gather fetches is 16383, a higher
  358. * value means the words count is malformed.
  359. */
  360. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  361. err = -EINVAL;
  362. goto fail;
  363. }
  364. bo = host1x_bo_lookup(file, cmdbuf.handle);
  365. if (!bo) {
  366. err = -ENOENT;
  367. goto fail;
  368. }
  369. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  370. obj = host1x_to_tegra_bo(bo);
  371. refs[num_refs++] = &obj->gem;
  372. /*
  373. * Gather buffer base address must be 4-bytes aligned,
  374. * unaligned offset is malformed and cause commands stream
  375. * corruption on the buffer address relocation.
  376. */
  377. if (offset & 3 || offset >= obj->gem.size) {
  378. err = -EINVAL;
  379. goto fail;
  380. }
  381. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  382. num_cmdbufs--;
  383. cmdbufs++;
  384. }
  385. /* copy and resolve relocations from submit */
  386. while (num_relocs--) {
  387. struct host1x_reloc *reloc;
  388. struct tegra_bo *obj;
  389. err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
  390. &relocs[num_relocs], drm,
  391. file);
  392. if (err < 0)
  393. goto fail;
  394. reloc = &job->relocarray[num_relocs];
  395. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  396. refs[num_refs++] = &obj->gem;
  397. /*
  398. * The unaligned cmdbuf offset will cause an unaligned write
  399. * during of the relocations patching, corrupting the commands
  400. * stream.
  401. */
  402. if (reloc->cmdbuf.offset & 3 ||
  403. reloc->cmdbuf.offset >= obj->gem.size) {
  404. err = -EINVAL;
  405. goto fail;
  406. }
  407. obj = host1x_to_tegra_bo(reloc->target.bo);
  408. refs[num_refs++] = &obj->gem;
  409. if (reloc->target.offset >= obj->gem.size) {
  410. err = -EINVAL;
  411. goto fail;
  412. }
  413. }
  414. /* copy and resolve waitchks from submit */
  415. while (num_waitchks--) {
  416. struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
  417. struct tegra_bo *obj;
  418. err = host1x_waitchk_copy_from_user(wait,
  419. &waitchks[num_waitchks],
  420. file);
  421. if (err < 0)
  422. goto fail;
  423. obj = host1x_to_tegra_bo(wait->bo);
  424. refs[num_refs++] = &obj->gem;
  425. /*
  426. * The unaligned offset will cause an unaligned write during
  427. * of the waitchks patching, corrupting the commands stream.
  428. */
  429. if (wait->offset & 3 ||
  430. wait->offset >= obj->gem.size) {
  431. err = -EINVAL;
  432. goto fail;
  433. }
  434. }
  435. if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
  436. sizeof(syncpt))) {
  437. err = -EFAULT;
  438. goto fail;
  439. }
  440. /* check whether syncpoint ID is valid */
  441. sp = host1x_syncpt_get(host1x, syncpt.id);
  442. if (!sp) {
  443. err = -ENOENT;
  444. goto fail;
  445. }
  446. job->is_addr_reg = context->client->ops->is_addr_reg;
  447. job->is_valid_class = context->client->ops->is_valid_class;
  448. job->syncpt_incrs = syncpt.incrs;
  449. job->syncpt_id = syncpt.id;
  450. job->timeout = 10000;
  451. if (args->timeout && args->timeout < 10000)
  452. job->timeout = args->timeout;
  453. err = host1x_job_pin(job, context->client->base.dev);
  454. if (err)
  455. goto fail;
  456. err = host1x_job_submit(job);
  457. if (err) {
  458. host1x_job_unpin(job);
  459. goto fail;
  460. }
  461. args->fence = job->syncpt_end;
  462. fail:
  463. while (num_refs--)
  464. drm_gem_object_put_unlocked(refs[num_refs]);
  465. kfree(refs);
  466. put:
  467. host1x_job_put(job);
  468. return err;
  469. }
  470. #ifdef CONFIG_DRM_TEGRA_STAGING
  471. static int tegra_gem_create(struct drm_device *drm, void *data,
  472. struct drm_file *file)
  473. {
  474. struct drm_tegra_gem_create *args = data;
  475. struct tegra_bo *bo;
  476. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  477. &args->handle);
  478. if (IS_ERR(bo))
  479. return PTR_ERR(bo);
  480. return 0;
  481. }
  482. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  483. struct drm_file *file)
  484. {
  485. struct drm_tegra_gem_mmap *args = data;
  486. struct drm_gem_object *gem;
  487. struct tegra_bo *bo;
  488. gem = drm_gem_object_lookup(file, args->handle);
  489. if (!gem)
  490. return -EINVAL;
  491. bo = to_tegra_bo(gem);
  492. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  493. drm_gem_object_put_unlocked(gem);
  494. return 0;
  495. }
  496. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  497. struct drm_file *file)
  498. {
  499. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  500. struct drm_tegra_syncpt_read *args = data;
  501. struct host1x_syncpt *sp;
  502. sp = host1x_syncpt_get(host, args->id);
  503. if (!sp)
  504. return -EINVAL;
  505. args->value = host1x_syncpt_read_min(sp);
  506. return 0;
  507. }
  508. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  509. struct drm_file *file)
  510. {
  511. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  512. struct drm_tegra_syncpt_incr *args = data;
  513. struct host1x_syncpt *sp;
  514. sp = host1x_syncpt_get(host1x, args->id);
  515. if (!sp)
  516. return -EINVAL;
  517. return host1x_syncpt_incr(sp);
  518. }
  519. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  520. struct drm_file *file)
  521. {
  522. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  523. struct drm_tegra_syncpt_wait *args = data;
  524. struct host1x_syncpt *sp;
  525. sp = host1x_syncpt_get(host1x, args->id);
  526. if (!sp)
  527. return -EINVAL;
  528. return host1x_syncpt_wait(sp, args->thresh, args->timeout,
  529. &args->value);
  530. }
  531. static int tegra_client_open(struct tegra_drm_file *fpriv,
  532. struct tegra_drm_client *client,
  533. struct tegra_drm_context *context)
  534. {
  535. int err;
  536. err = client->ops->open_channel(client, context);
  537. if (err < 0)
  538. return err;
  539. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  540. if (err < 0) {
  541. client->ops->close_channel(context);
  542. return err;
  543. }
  544. context->client = client;
  545. context->id = err;
  546. return 0;
  547. }
  548. static int tegra_open_channel(struct drm_device *drm, void *data,
  549. struct drm_file *file)
  550. {
  551. struct tegra_drm_file *fpriv = file->driver_priv;
  552. struct tegra_drm *tegra = drm->dev_private;
  553. struct drm_tegra_open_channel *args = data;
  554. struct tegra_drm_context *context;
  555. struct tegra_drm_client *client;
  556. int err = -ENODEV;
  557. context = kzalloc(sizeof(*context), GFP_KERNEL);
  558. if (!context)
  559. return -ENOMEM;
  560. mutex_lock(&fpriv->lock);
  561. list_for_each_entry(client, &tegra->clients, list)
  562. if (client->base.class == args->client) {
  563. err = tegra_client_open(fpriv, client, context);
  564. if (err < 0)
  565. break;
  566. args->context = context->id;
  567. break;
  568. }
  569. if (err < 0)
  570. kfree(context);
  571. mutex_unlock(&fpriv->lock);
  572. return err;
  573. }
  574. static int tegra_close_channel(struct drm_device *drm, void *data,
  575. struct drm_file *file)
  576. {
  577. struct tegra_drm_file *fpriv = file->driver_priv;
  578. struct drm_tegra_close_channel *args = data;
  579. struct tegra_drm_context *context;
  580. int err = 0;
  581. mutex_lock(&fpriv->lock);
  582. context = idr_find(&fpriv->contexts, args->context);
  583. if (!context) {
  584. err = -EINVAL;
  585. goto unlock;
  586. }
  587. idr_remove(&fpriv->contexts, context->id);
  588. tegra_drm_context_free(context);
  589. unlock:
  590. mutex_unlock(&fpriv->lock);
  591. return err;
  592. }
  593. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  594. struct drm_file *file)
  595. {
  596. struct tegra_drm_file *fpriv = file->driver_priv;
  597. struct drm_tegra_get_syncpt *args = data;
  598. struct tegra_drm_context *context;
  599. struct host1x_syncpt *syncpt;
  600. int err = 0;
  601. mutex_lock(&fpriv->lock);
  602. context = idr_find(&fpriv->contexts, args->context);
  603. if (!context) {
  604. err = -ENODEV;
  605. goto unlock;
  606. }
  607. if (args->index >= context->client->base.num_syncpts) {
  608. err = -EINVAL;
  609. goto unlock;
  610. }
  611. syncpt = context->client->base.syncpts[args->index];
  612. args->id = host1x_syncpt_id(syncpt);
  613. unlock:
  614. mutex_unlock(&fpriv->lock);
  615. return err;
  616. }
  617. static int tegra_submit(struct drm_device *drm, void *data,
  618. struct drm_file *file)
  619. {
  620. struct tegra_drm_file *fpriv = file->driver_priv;
  621. struct drm_tegra_submit *args = data;
  622. struct tegra_drm_context *context;
  623. int err;
  624. mutex_lock(&fpriv->lock);
  625. context = idr_find(&fpriv->contexts, args->context);
  626. if (!context) {
  627. err = -ENODEV;
  628. goto unlock;
  629. }
  630. err = context->client->ops->submit(context, args, drm, file);
  631. unlock:
  632. mutex_unlock(&fpriv->lock);
  633. return err;
  634. }
  635. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  636. struct drm_file *file)
  637. {
  638. struct tegra_drm_file *fpriv = file->driver_priv;
  639. struct drm_tegra_get_syncpt_base *args = data;
  640. struct tegra_drm_context *context;
  641. struct host1x_syncpt_base *base;
  642. struct host1x_syncpt *syncpt;
  643. int err = 0;
  644. mutex_lock(&fpriv->lock);
  645. context = idr_find(&fpriv->contexts, args->context);
  646. if (!context) {
  647. err = -ENODEV;
  648. goto unlock;
  649. }
  650. if (args->syncpt >= context->client->base.num_syncpts) {
  651. err = -EINVAL;
  652. goto unlock;
  653. }
  654. syncpt = context->client->base.syncpts[args->syncpt];
  655. base = host1x_syncpt_get_base(syncpt);
  656. if (!base) {
  657. err = -ENXIO;
  658. goto unlock;
  659. }
  660. args->id = host1x_syncpt_base_id(base);
  661. unlock:
  662. mutex_unlock(&fpriv->lock);
  663. return err;
  664. }
  665. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  666. struct drm_file *file)
  667. {
  668. struct drm_tegra_gem_set_tiling *args = data;
  669. enum tegra_bo_tiling_mode mode;
  670. struct drm_gem_object *gem;
  671. unsigned long value = 0;
  672. struct tegra_bo *bo;
  673. switch (args->mode) {
  674. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  675. mode = TEGRA_BO_TILING_MODE_PITCH;
  676. if (args->value != 0)
  677. return -EINVAL;
  678. break;
  679. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  680. mode = TEGRA_BO_TILING_MODE_TILED;
  681. if (args->value != 0)
  682. return -EINVAL;
  683. break;
  684. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  685. mode = TEGRA_BO_TILING_MODE_BLOCK;
  686. if (args->value > 5)
  687. return -EINVAL;
  688. value = args->value;
  689. break;
  690. default:
  691. return -EINVAL;
  692. }
  693. gem = drm_gem_object_lookup(file, args->handle);
  694. if (!gem)
  695. return -ENOENT;
  696. bo = to_tegra_bo(gem);
  697. bo->tiling.mode = mode;
  698. bo->tiling.value = value;
  699. drm_gem_object_put_unlocked(gem);
  700. return 0;
  701. }
  702. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  703. struct drm_file *file)
  704. {
  705. struct drm_tegra_gem_get_tiling *args = data;
  706. struct drm_gem_object *gem;
  707. struct tegra_bo *bo;
  708. int err = 0;
  709. gem = drm_gem_object_lookup(file, args->handle);
  710. if (!gem)
  711. return -ENOENT;
  712. bo = to_tegra_bo(gem);
  713. switch (bo->tiling.mode) {
  714. case TEGRA_BO_TILING_MODE_PITCH:
  715. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  716. args->value = 0;
  717. break;
  718. case TEGRA_BO_TILING_MODE_TILED:
  719. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  720. args->value = 0;
  721. break;
  722. case TEGRA_BO_TILING_MODE_BLOCK:
  723. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  724. args->value = bo->tiling.value;
  725. break;
  726. default:
  727. err = -EINVAL;
  728. break;
  729. }
  730. drm_gem_object_put_unlocked(gem);
  731. return err;
  732. }
  733. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  734. struct drm_file *file)
  735. {
  736. struct drm_tegra_gem_set_flags *args = data;
  737. struct drm_gem_object *gem;
  738. struct tegra_bo *bo;
  739. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  740. return -EINVAL;
  741. gem = drm_gem_object_lookup(file, args->handle);
  742. if (!gem)
  743. return -ENOENT;
  744. bo = to_tegra_bo(gem);
  745. bo->flags = 0;
  746. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  747. bo->flags |= TEGRA_BO_BOTTOM_UP;
  748. drm_gem_object_put_unlocked(gem);
  749. return 0;
  750. }
  751. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  752. struct drm_file *file)
  753. {
  754. struct drm_tegra_gem_get_flags *args = data;
  755. struct drm_gem_object *gem;
  756. struct tegra_bo *bo;
  757. gem = drm_gem_object_lookup(file, args->handle);
  758. if (!gem)
  759. return -ENOENT;
  760. bo = to_tegra_bo(gem);
  761. args->flags = 0;
  762. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  763. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  764. drm_gem_object_put_unlocked(gem);
  765. return 0;
  766. }
  767. #endif
  768. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  769. #ifdef CONFIG_DRM_TEGRA_STAGING
  770. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
  771. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  772. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
  773. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  774. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  775. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  776. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  777. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  778. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  779. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  780. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  781. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  782. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  783. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  784. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  785. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  786. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  787. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  788. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  789. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  790. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  791. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  792. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  793. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  794. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  795. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  796. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  797. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  798. #endif
  799. };
  800. static const struct file_operations tegra_drm_fops = {
  801. .owner = THIS_MODULE,
  802. .open = drm_open,
  803. .release = drm_release,
  804. .unlocked_ioctl = drm_ioctl,
  805. .mmap = tegra_drm_mmap,
  806. .poll = drm_poll,
  807. .read = drm_read,
  808. .compat_ioctl = drm_compat_ioctl,
  809. .llseek = noop_llseek,
  810. };
  811. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  812. {
  813. struct tegra_drm_context *context = p;
  814. tegra_drm_context_free(context);
  815. return 0;
  816. }
  817. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  818. {
  819. struct tegra_drm_file *fpriv = file->driver_priv;
  820. mutex_lock(&fpriv->lock);
  821. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  822. mutex_unlock(&fpriv->lock);
  823. idr_destroy(&fpriv->contexts);
  824. mutex_destroy(&fpriv->lock);
  825. kfree(fpriv);
  826. }
  827. #ifdef CONFIG_DEBUG_FS
  828. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  829. {
  830. struct drm_info_node *node = (struct drm_info_node *)s->private;
  831. struct drm_device *drm = node->minor->dev;
  832. struct drm_framebuffer *fb;
  833. mutex_lock(&drm->mode_config.fb_lock);
  834. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  835. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  836. fb->base.id, fb->width, fb->height,
  837. fb->format->depth,
  838. fb->format->cpp[0] * 8,
  839. drm_framebuffer_read_refcount(fb));
  840. }
  841. mutex_unlock(&drm->mode_config.fb_lock);
  842. return 0;
  843. }
  844. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  845. {
  846. struct drm_info_node *node = (struct drm_info_node *)s->private;
  847. struct drm_device *drm = node->minor->dev;
  848. struct tegra_drm *tegra = drm->dev_private;
  849. struct drm_printer p = drm_seq_file_printer(s);
  850. if (tegra->domain) {
  851. mutex_lock(&tegra->mm_lock);
  852. drm_mm_print(&tegra->mm, &p);
  853. mutex_unlock(&tegra->mm_lock);
  854. }
  855. return 0;
  856. }
  857. static struct drm_info_list tegra_debugfs_list[] = {
  858. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  859. { "iova", tegra_debugfs_iova, 0 },
  860. };
  861. static int tegra_debugfs_init(struct drm_minor *minor)
  862. {
  863. return drm_debugfs_create_files(tegra_debugfs_list,
  864. ARRAY_SIZE(tegra_debugfs_list),
  865. minor->debugfs_root, minor);
  866. }
  867. #endif
  868. static struct drm_driver tegra_drm_driver = {
  869. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  870. DRIVER_ATOMIC | DRIVER_RENDER,
  871. .load = tegra_drm_load,
  872. .unload = tegra_drm_unload,
  873. .open = tegra_drm_open,
  874. .postclose = tegra_drm_postclose,
  875. .lastclose = tegra_drm_lastclose,
  876. #if defined(CONFIG_DEBUG_FS)
  877. .debugfs_init = tegra_debugfs_init,
  878. #endif
  879. .gem_free_object_unlocked = tegra_bo_free_object,
  880. .gem_vm_ops = &tegra_bo_vm_ops,
  881. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  882. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  883. .gem_prime_export = tegra_gem_prime_export,
  884. .gem_prime_import = tegra_gem_prime_import,
  885. .dumb_create = tegra_bo_dumb_create,
  886. .ioctls = tegra_drm_ioctls,
  887. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  888. .fops = &tegra_drm_fops,
  889. .name = DRIVER_NAME,
  890. .desc = DRIVER_DESC,
  891. .date = DRIVER_DATE,
  892. .major = DRIVER_MAJOR,
  893. .minor = DRIVER_MINOR,
  894. .patchlevel = DRIVER_PATCHLEVEL,
  895. };
  896. int tegra_drm_register_client(struct tegra_drm *tegra,
  897. struct tegra_drm_client *client)
  898. {
  899. mutex_lock(&tegra->clients_lock);
  900. list_add_tail(&client->list, &tegra->clients);
  901. mutex_unlock(&tegra->clients_lock);
  902. return 0;
  903. }
  904. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  905. struct tegra_drm_client *client)
  906. {
  907. mutex_lock(&tegra->clients_lock);
  908. list_del_init(&client->list);
  909. mutex_unlock(&tegra->clients_lock);
  910. return 0;
  911. }
  912. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
  913. dma_addr_t *dma)
  914. {
  915. struct iova *alloc;
  916. void *virt;
  917. gfp_t gfp;
  918. int err;
  919. if (tegra->domain)
  920. size = iova_align(&tegra->carveout.domain, size);
  921. else
  922. size = PAGE_ALIGN(size);
  923. gfp = GFP_KERNEL | __GFP_ZERO;
  924. if (!tegra->domain) {
  925. /*
  926. * Many units only support 32-bit addresses, even on 64-bit
  927. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  928. * virtual address space, force allocations to be in the
  929. * lower 32-bit range.
  930. */
  931. gfp |= GFP_DMA;
  932. }
  933. virt = (void *)__get_free_pages(gfp, get_order(size));
  934. if (!virt)
  935. return ERR_PTR(-ENOMEM);
  936. if (!tegra->domain) {
  937. /*
  938. * If IOMMU is disabled, devices address physical memory
  939. * directly.
  940. */
  941. *dma = virt_to_phys(virt);
  942. return virt;
  943. }
  944. alloc = alloc_iova(&tegra->carveout.domain,
  945. size >> tegra->carveout.shift,
  946. tegra->carveout.limit, true);
  947. if (!alloc) {
  948. err = -EBUSY;
  949. goto free_pages;
  950. }
  951. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  952. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  953. size, IOMMU_READ | IOMMU_WRITE);
  954. if (err < 0)
  955. goto free_iova;
  956. return virt;
  957. free_iova:
  958. __free_iova(&tegra->carveout.domain, alloc);
  959. free_pages:
  960. free_pages((unsigned long)virt, get_order(size));
  961. return ERR_PTR(err);
  962. }
  963. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  964. dma_addr_t dma)
  965. {
  966. if (tegra->domain)
  967. size = iova_align(&tegra->carveout.domain, size);
  968. else
  969. size = PAGE_ALIGN(size);
  970. if (tegra->domain) {
  971. iommu_unmap(tegra->domain, dma, size);
  972. free_iova(&tegra->carveout.domain,
  973. iova_pfn(&tegra->carveout.domain, dma));
  974. }
  975. free_pages((unsigned long)virt, get_order(size));
  976. }
  977. static int host1x_drm_probe(struct host1x_device *dev)
  978. {
  979. struct drm_driver *driver = &tegra_drm_driver;
  980. struct drm_device *drm;
  981. int err;
  982. drm = drm_dev_alloc(driver, &dev->dev);
  983. if (IS_ERR(drm))
  984. return PTR_ERR(drm);
  985. dev_set_drvdata(&dev->dev, drm);
  986. err = drm_dev_register(drm, 0);
  987. if (err < 0)
  988. goto unref;
  989. return 0;
  990. unref:
  991. drm_dev_unref(drm);
  992. return err;
  993. }
  994. static int host1x_drm_remove(struct host1x_device *dev)
  995. {
  996. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  997. drm_dev_unregister(drm);
  998. drm_dev_unref(drm);
  999. return 0;
  1000. }
  1001. #ifdef CONFIG_PM_SLEEP
  1002. static int host1x_drm_suspend(struct device *dev)
  1003. {
  1004. struct drm_device *drm = dev_get_drvdata(dev);
  1005. struct tegra_drm *tegra = drm->dev_private;
  1006. drm_kms_helper_poll_disable(drm);
  1007. tegra_drm_fb_suspend(drm);
  1008. tegra->state = drm_atomic_helper_suspend(drm);
  1009. if (IS_ERR(tegra->state)) {
  1010. tegra_drm_fb_resume(drm);
  1011. drm_kms_helper_poll_enable(drm);
  1012. return PTR_ERR(tegra->state);
  1013. }
  1014. return 0;
  1015. }
  1016. static int host1x_drm_resume(struct device *dev)
  1017. {
  1018. struct drm_device *drm = dev_get_drvdata(dev);
  1019. struct tegra_drm *tegra = drm->dev_private;
  1020. drm_atomic_helper_resume(drm, tegra->state);
  1021. tegra_drm_fb_resume(drm);
  1022. drm_kms_helper_poll_enable(drm);
  1023. return 0;
  1024. }
  1025. #endif
  1026. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  1027. host1x_drm_resume);
  1028. static const struct of_device_id host1x_drm_subdevs[] = {
  1029. { .compatible = "nvidia,tegra20-dc", },
  1030. { .compatible = "nvidia,tegra20-hdmi", },
  1031. { .compatible = "nvidia,tegra20-gr2d", },
  1032. { .compatible = "nvidia,tegra20-gr3d", },
  1033. { .compatible = "nvidia,tegra30-dc", },
  1034. { .compatible = "nvidia,tegra30-hdmi", },
  1035. { .compatible = "nvidia,tegra30-gr2d", },
  1036. { .compatible = "nvidia,tegra30-gr3d", },
  1037. { .compatible = "nvidia,tegra114-dsi", },
  1038. { .compatible = "nvidia,tegra114-hdmi", },
  1039. { .compatible = "nvidia,tegra114-gr3d", },
  1040. { .compatible = "nvidia,tegra124-dc", },
  1041. { .compatible = "nvidia,tegra124-sor", },
  1042. { .compatible = "nvidia,tegra124-hdmi", },
  1043. { .compatible = "nvidia,tegra124-dsi", },
  1044. { .compatible = "nvidia,tegra124-vic", },
  1045. { .compatible = "nvidia,tegra132-dsi", },
  1046. { .compatible = "nvidia,tegra210-dc", },
  1047. { .compatible = "nvidia,tegra210-dsi", },
  1048. { .compatible = "nvidia,tegra210-sor", },
  1049. { .compatible = "nvidia,tegra210-sor1", },
  1050. { .compatible = "nvidia,tegra210-vic", },
  1051. { /* sentinel */ }
  1052. };
  1053. static struct host1x_driver host1x_drm_driver = {
  1054. .driver = {
  1055. .name = "drm",
  1056. .pm = &host1x_drm_pm_ops,
  1057. },
  1058. .probe = host1x_drm_probe,
  1059. .remove = host1x_drm_remove,
  1060. .subdevs = host1x_drm_subdevs,
  1061. };
  1062. static struct platform_driver * const drivers[] = {
  1063. &tegra_dc_driver,
  1064. &tegra_hdmi_driver,
  1065. &tegra_dsi_driver,
  1066. &tegra_dpaux_driver,
  1067. &tegra_sor_driver,
  1068. &tegra_gr2d_driver,
  1069. &tegra_gr3d_driver,
  1070. &tegra_vic_driver,
  1071. };
  1072. static int __init host1x_drm_init(void)
  1073. {
  1074. int err;
  1075. err = host1x_driver_register(&host1x_drm_driver);
  1076. if (err < 0)
  1077. return err;
  1078. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1079. if (err < 0)
  1080. goto unregister_host1x;
  1081. return 0;
  1082. unregister_host1x:
  1083. host1x_driver_unregister(&host1x_drm_driver);
  1084. return err;
  1085. }
  1086. module_init(host1x_drm_init);
  1087. static void __exit host1x_drm_exit(void)
  1088. {
  1089. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1090. host1x_driver_unregister(&host1x_drm_driver);
  1091. }
  1092. module_exit(host1x_drm_exit);
  1093. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1094. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1095. MODULE_LICENSE("GPL v2");