drm.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  25. struct tegra_drm_file {
  26. struct idr contexts;
  27. struct mutex lock;
  28. };
  29. static void tegra_atomic_schedule(struct tegra_drm *tegra,
  30. struct drm_atomic_state *state)
  31. {
  32. tegra->commit.state = state;
  33. schedule_work(&tegra->commit.work);
  34. }
  35. static void tegra_atomic_complete(struct tegra_drm *tegra,
  36. struct drm_atomic_state *state)
  37. {
  38. struct drm_device *drm = tegra->drm;
  39. /*
  40. * Everything below can be run asynchronously without the need to grab
  41. * any modeset locks at all under one condition: It must be guaranteed
  42. * that the asynchronous work has either been cancelled (if the driver
  43. * supports it, which at least requires that the framebuffers get
  44. * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
  45. * before the new state gets committed on the software side with
  46. * drm_atomic_helper_swap_state().
  47. *
  48. * This scheme allows new atomic state updates to be prepared and
  49. * checked in parallel to the asynchronous completion of the previous
  50. * update. Which is important since compositors need to figure out the
  51. * composition of the next frame right after having submitted the
  52. * current layout.
  53. */
  54. drm_atomic_helper_commit_modeset_disables(drm, state);
  55. drm_atomic_helper_commit_modeset_enables(drm, state);
  56. drm_atomic_helper_commit_planes(drm, state,
  57. DRM_PLANE_COMMIT_ACTIVE_ONLY);
  58. drm_atomic_helper_wait_for_vblanks(drm, state);
  59. drm_atomic_helper_cleanup_planes(drm, state);
  60. drm_atomic_state_put(state);
  61. }
  62. static void tegra_atomic_work(struct work_struct *work)
  63. {
  64. struct tegra_drm *tegra = container_of(work, struct tegra_drm,
  65. commit.work);
  66. tegra_atomic_complete(tegra, tegra->commit.state);
  67. }
  68. static int tegra_atomic_commit(struct drm_device *drm,
  69. struct drm_atomic_state *state, bool nonblock)
  70. {
  71. struct tegra_drm *tegra = drm->dev_private;
  72. int err;
  73. err = drm_atomic_helper_prepare_planes(drm, state);
  74. if (err)
  75. return err;
  76. /* serialize outstanding nonblocking commits */
  77. mutex_lock(&tegra->commit.lock);
  78. flush_work(&tegra->commit.work);
  79. /*
  80. * This is the point of no return - everything below never fails except
  81. * when the hw goes bonghits. Which means we can commit the new state on
  82. * the software side now.
  83. */
  84. err = drm_atomic_helper_swap_state(state, true);
  85. if (err) {
  86. mutex_unlock(&tegra->commit.lock);
  87. drm_atomic_helper_cleanup_planes(drm, state);
  88. return err;
  89. }
  90. drm_atomic_state_get(state);
  91. if (nonblock)
  92. tegra_atomic_schedule(tegra, state);
  93. else
  94. tegra_atomic_complete(tegra, state);
  95. mutex_unlock(&tegra->commit.lock);
  96. return 0;
  97. }
  98. static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
  99. .fb_create = tegra_fb_create,
  100. #ifdef CONFIG_DRM_FBDEV_EMULATION
  101. .output_poll_changed = tegra_fb_output_poll_changed,
  102. #endif
  103. .atomic_check = drm_atomic_helper_check,
  104. .atomic_commit = tegra_atomic_commit,
  105. };
  106. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  107. {
  108. struct host1x_device *device = to_host1x_device(drm->dev);
  109. struct tegra_drm *tegra;
  110. int err;
  111. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  112. if (!tegra)
  113. return -ENOMEM;
  114. if (iommu_present(&platform_bus_type)) {
  115. u64 carveout_start, carveout_end, gem_start, gem_end;
  116. struct iommu_domain_geometry *geometry;
  117. unsigned long order;
  118. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  119. if (!tegra->domain) {
  120. err = -ENOMEM;
  121. goto free;
  122. }
  123. geometry = &tegra->domain->geometry;
  124. gem_start = geometry->aperture_start;
  125. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  126. carveout_start = gem_end + 1;
  127. carveout_end = geometry->aperture_end;
  128. order = __ffs(tegra->domain->pgsize_bitmap);
  129. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  130. carveout_start >> order,
  131. carveout_end >> order);
  132. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  133. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  134. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  135. mutex_init(&tegra->mm_lock);
  136. DRM_DEBUG("IOMMU apertures:\n");
  137. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  138. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  139. carveout_end);
  140. }
  141. mutex_init(&tegra->clients_lock);
  142. INIT_LIST_HEAD(&tegra->clients);
  143. mutex_init(&tegra->commit.lock);
  144. INIT_WORK(&tegra->commit.work, tegra_atomic_work);
  145. drm->dev_private = tegra;
  146. tegra->drm = drm;
  147. drm_mode_config_init(drm);
  148. drm->mode_config.min_width = 0;
  149. drm->mode_config.min_height = 0;
  150. drm->mode_config.max_width = 4096;
  151. drm->mode_config.max_height = 4096;
  152. drm->mode_config.allow_fb_modifiers = true;
  153. drm->mode_config.funcs = &tegra_drm_mode_funcs;
  154. err = tegra_drm_fb_prepare(drm);
  155. if (err < 0)
  156. goto config;
  157. drm_kms_helper_poll_init(drm);
  158. err = host1x_device_init(device);
  159. if (err < 0)
  160. goto fbdev;
  161. /*
  162. * We don't use the drm_irq_install() helpers provided by the DRM
  163. * core, so we need to set this manually in order to allow the
  164. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  165. */
  166. drm->irq_enabled = true;
  167. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  168. drm->max_vblank_count = 0xffffffff;
  169. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  170. if (err < 0)
  171. goto device;
  172. drm_mode_config_reset(drm);
  173. err = tegra_drm_fb_init(drm);
  174. if (err < 0)
  175. goto device;
  176. return 0;
  177. device:
  178. host1x_device_exit(device);
  179. fbdev:
  180. drm_kms_helper_poll_fini(drm);
  181. tegra_drm_fb_free(drm);
  182. config:
  183. drm_mode_config_cleanup(drm);
  184. if (tegra->domain) {
  185. iommu_domain_free(tegra->domain);
  186. drm_mm_takedown(&tegra->mm);
  187. mutex_destroy(&tegra->mm_lock);
  188. put_iova_domain(&tegra->carveout.domain);
  189. }
  190. free:
  191. kfree(tegra);
  192. return err;
  193. }
  194. static void tegra_drm_unload(struct drm_device *drm)
  195. {
  196. struct host1x_device *device = to_host1x_device(drm->dev);
  197. struct tegra_drm *tegra = drm->dev_private;
  198. int err;
  199. drm_kms_helper_poll_fini(drm);
  200. tegra_drm_fb_exit(drm);
  201. drm_mode_config_cleanup(drm);
  202. err = host1x_device_exit(device);
  203. if (err < 0)
  204. return;
  205. if (tegra->domain) {
  206. iommu_domain_free(tegra->domain);
  207. drm_mm_takedown(&tegra->mm);
  208. mutex_destroy(&tegra->mm_lock);
  209. put_iova_domain(&tegra->carveout.domain);
  210. }
  211. kfree(tegra);
  212. }
  213. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  214. {
  215. struct tegra_drm_file *fpriv;
  216. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  217. if (!fpriv)
  218. return -ENOMEM;
  219. idr_init(&fpriv->contexts);
  220. mutex_init(&fpriv->lock);
  221. filp->driver_priv = fpriv;
  222. return 0;
  223. }
  224. static void tegra_drm_context_free(struct tegra_drm_context *context)
  225. {
  226. context->client->ops->close_channel(context);
  227. kfree(context);
  228. }
  229. static void tegra_drm_lastclose(struct drm_device *drm)
  230. {
  231. #ifdef CONFIG_DRM_FBDEV_EMULATION
  232. struct tegra_drm *tegra = drm->dev_private;
  233. tegra_fbdev_restore_mode(tegra->fbdev);
  234. #endif
  235. }
  236. static struct host1x_bo *
  237. host1x_bo_lookup(struct drm_file *file, u32 handle)
  238. {
  239. struct drm_gem_object *gem;
  240. struct tegra_bo *bo;
  241. gem = drm_gem_object_lookup(file, handle);
  242. if (!gem)
  243. return NULL;
  244. drm_gem_object_unreference_unlocked(gem);
  245. bo = to_tegra_bo(gem);
  246. return &bo->base;
  247. }
  248. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  249. struct drm_tegra_reloc __user *src,
  250. struct drm_device *drm,
  251. struct drm_file *file)
  252. {
  253. u32 cmdbuf, target;
  254. int err;
  255. err = get_user(cmdbuf, &src->cmdbuf.handle);
  256. if (err < 0)
  257. return err;
  258. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  259. if (err < 0)
  260. return err;
  261. err = get_user(target, &src->target.handle);
  262. if (err < 0)
  263. return err;
  264. err = get_user(dest->target.offset, &src->target.offset);
  265. if (err < 0)
  266. return err;
  267. err = get_user(dest->shift, &src->shift);
  268. if (err < 0)
  269. return err;
  270. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  271. if (!dest->cmdbuf.bo)
  272. return -ENOENT;
  273. dest->target.bo = host1x_bo_lookup(file, target);
  274. if (!dest->target.bo)
  275. return -ENOENT;
  276. return 0;
  277. }
  278. static int host1x_waitchk_copy_from_user(struct host1x_waitchk *dest,
  279. struct drm_tegra_waitchk __user *src,
  280. struct drm_file *file)
  281. {
  282. u32 cmdbuf;
  283. int err;
  284. err = get_user(cmdbuf, &src->handle);
  285. if (err < 0)
  286. return err;
  287. err = get_user(dest->offset, &src->offset);
  288. if (err < 0)
  289. return err;
  290. err = get_user(dest->syncpt_id, &src->syncpt);
  291. if (err < 0)
  292. return err;
  293. err = get_user(dest->thresh, &src->thresh);
  294. if (err < 0)
  295. return err;
  296. dest->bo = host1x_bo_lookup(file, cmdbuf);
  297. if (!dest->bo)
  298. return -ENOENT;
  299. return 0;
  300. }
  301. int tegra_drm_submit(struct tegra_drm_context *context,
  302. struct drm_tegra_submit *args, struct drm_device *drm,
  303. struct drm_file *file)
  304. {
  305. unsigned int num_cmdbufs = args->num_cmdbufs;
  306. unsigned int num_relocs = args->num_relocs;
  307. unsigned int num_waitchks = args->num_waitchks;
  308. struct drm_tegra_cmdbuf __user *cmdbufs =
  309. (void __user *)(uintptr_t)args->cmdbufs;
  310. struct drm_tegra_reloc __user *relocs =
  311. (void __user *)(uintptr_t)args->relocs;
  312. struct drm_tegra_waitchk __user *waitchks =
  313. (void __user *)(uintptr_t)args->waitchks;
  314. struct drm_tegra_syncpt syncpt;
  315. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  316. struct host1x_syncpt *sp;
  317. struct host1x_job *job;
  318. int err;
  319. /* We don't yet support other than one syncpt_incr struct per submit */
  320. if (args->num_syncpts != 1)
  321. return -EINVAL;
  322. /* We don't yet support waitchks */
  323. if (args->num_waitchks != 0)
  324. return -EINVAL;
  325. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  326. args->num_relocs, args->num_waitchks);
  327. if (!job)
  328. return -ENOMEM;
  329. job->num_relocs = args->num_relocs;
  330. job->num_waitchk = args->num_waitchks;
  331. job->client = (u32)args->context;
  332. job->class = context->client->base.class;
  333. job->serialize = true;
  334. while (num_cmdbufs) {
  335. struct drm_tegra_cmdbuf cmdbuf;
  336. struct host1x_bo *bo;
  337. struct tegra_bo *obj;
  338. u64 offset;
  339. if (copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf))) {
  340. err = -EFAULT;
  341. goto fail;
  342. }
  343. /*
  344. * The maximum number of CDMA gather fetches is 16383, a higher
  345. * value means the words count is malformed.
  346. */
  347. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  348. err = -EINVAL;
  349. goto fail;
  350. }
  351. bo = host1x_bo_lookup(file, cmdbuf.handle);
  352. if (!bo) {
  353. err = -ENOENT;
  354. goto fail;
  355. }
  356. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  357. obj = host1x_to_tegra_bo(bo);
  358. /*
  359. * Gather buffer base address must be 4-bytes aligned,
  360. * unaligned offset is malformed and cause commands stream
  361. * corruption on the buffer address relocation.
  362. */
  363. if (offset & 3 || offset >= obj->gem.size) {
  364. err = -EINVAL;
  365. goto fail;
  366. }
  367. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  368. num_cmdbufs--;
  369. cmdbufs++;
  370. }
  371. /* copy and resolve relocations from submit */
  372. while (num_relocs--) {
  373. struct host1x_reloc *reloc;
  374. struct tegra_bo *obj;
  375. err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
  376. &relocs[num_relocs], drm,
  377. file);
  378. if (err < 0)
  379. goto fail;
  380. reloc = &job->relocarray[num_relocs];
  381. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  382. /*
  383. * The unaligned cmdbuf offset will cause an unaligned write
  384. * during of the relocations patching, corrupting the commands
  385. * stream.
  386. */
  387. if (reloc->cmdbuf.offset & 3 ||
  388. reloc->cmdbuf.offset >= obj->gem.size) {
  389. err = -EINVAL;
  390. goto fail;
  391. }
  392. obj = host1x_to_tegra_bo(reloc->target.bo);
  393. if (reloc->target.offset >= obj->gem.size) {
  394. err = -EINVAL;
  395. goto fail;
  396. }
  397. }
  398. /* copy and resolve waitchks from submit */
  399. while (num_waitchks--) {
  400. struct host1x_waitchk *wait = &job->waitchk[num_waitchks];
  401. struct tegra_bo *obj;
  402. err = host1x_waitchk_copy_from_user(wait,
  403. &waitchks[num_waitchks],
  404. file);
  405. if (err < 0)
  406. goto fail;
  407. obj = host1x_to_tegra_bo(wait->bo);
  408. /*
  409. * The unaligned offset will cause an unaligned write during
  410. * of the waitchks patching, corrupting the commands stream.
  411. */
  412. if (wait->offset & 3 ||
  413. wait->offset >= obj->gem.size) {
  414. err = -EINVAL;
  415. goto fail;
  416. }
  417. }
  418. if (copy_from_user(&syncpt, (void __user *)(uintptr_t)args->syncpts,
  419. sizeof(syncpt))) {
  420. err = -EFAULT;
  421. goto fail;
  422. }
  423. /* check whether syncpoint ID is valid */
  424. sp = host1x_syncpt_get(host1x, syncpt.id);
  425. if (!sp) {
  426. err = -ENOENT;
  427. goto fail;
  428. }
  429. job->is_addr_reg = context->client->ops->is_addr_reg;
  430. job->is_valid_class = context->client->ops->is_valid_class;
  431. job->syncpt_incrs = syncpt.incrs;
  432. job->syncpt_id = syncpt.id;
  433. job->timeout = 10000;
  434. if (args->timeout && args->timeout < 10000)
  435. job->timeout = args->timeout;
  436. err = host1x_job_pin(job, context->client->base.dev);
  437. if (err)
  438. goto fail;
  439. err = host1x_job_submit(job);
  440. if (err)
  441. goto fail_submit;
  442. args->fence = job->syncpt_end;
  443. host1x_job_put(job);
  444. return 0;
  445. fail_submit:
  446. host1x_job_unpin(job);
  447. fail:
  448. host1x_job_put(job);
  449. return err;
  450. }
  451. #ifdef CONFIG_DRM_TEGRA_STAGING
  452. static int tegra_gem_create(struct drm_device *drm, void *data,
  453. struct drm_file *file)
  454. {
  455. struct drm_tegra_gem_create *args = data;
  456. struct tegra_bo *bo;
  457. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  458. &args->handle);
  459. if (IS_ERR(bo))
  460. return PTR_ERR(bo);
  461. return 0;
  462. }
  463. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  464. struct drm_file *file)
  465. {
  466. struct drm_tegra_gem_mmap *args = data;
  467. struct drm_gem_object *gem;
  468. struct tegra_bo *bo;
  469. gem = drm_gem_object_lookup(file, args->handle);
  470. if (!gem)
  471. return -EINVAL;
  472. bo = to_tegra_bo(gem);
  473. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  474. drm_gem_object_unreference_unlocked(gem);
  475. return 0;
  476. }
  477. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  478. struct drm_file *file)
  479. {
  480. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  481. struct drm_tegra_syncpt_read *args = data;
  482. struct host1x_syncpt *sp;
  483. sp = host1x_syncpt_get(host, args->id);
  484. if (!sp)
  485. return -EINVAL;
  486. args->value = host1x_syncpt_read_min(sp);
  487. return 0;
  488. }
  489. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  490. struct drm_file *file)
  491. {
  492. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  493. struct drm_tegra_syncpt_incr *args = data;
  494. struct host1x_syncpt *sp;
  495. sp = host1x_syncpt_get(host1x, args->id);
  496. if (!sp)
  497. return -EINVAL;
  498. return host1x_syncpt_incr(sp);
  499. }
  500. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  501. struct drm_file *file)
  502. {
  503. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  504. struct drm_tegra_syncpt_wait *args = data;
  505. struct host1x_syncpt *sp;
  506. sp = host1x_syncpt_get(host1x, args->id);
  507. if (!sp)
  508. return -EINVAL;
  509. return host1x_syncpt_wait(sp, args->thresh, args->timeout,
  510. &args->value);
  511. }
  512. static int tegra_client_open(struct tegra_drm_file *fpriv,
  513. struct tegra_drm_client *client,
  514. struct tegra_drm_context *context)
  515. {
  516. int err;
  517. err = client->ops->open_channel(client, context);
  518. if (err < 0)
  519. return err;
  520. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  521. if (err < 0) {
  522. client->ops->close_channel(context);
  523. return err;
  524. }
  525. context->client = client;
  526. context->id = err;
  527. return 0;
  528. }
  529. static int tegra_open_channel(struct drm_device *drm, void *data,
  530. struct drm_file *file)
  531. {
  532. struct tegra_drm_file *fpriv = file->driver_priv;
  533. struct tegra_drm *tegra = drm->dev_private;
  534. struct drm_tegra_open_channel *args = data;
  535. struct tegra_drm_context *context;
  536. struct tegra_drm_client *client;
  537. int err = -ENODEV;
  538. context = kzalloc(sizeof(*context), GFP_KERNEL);
  539. if (!context)
  540. return -ENOMEM;
  541. mutex_lock(&fpriv->lock);
  542. list_for_each_entry(client, &tegra->clients, list)
  543. if (client->base.class == args->client) {
  544. err = tegra_client_open(fpriv, client, context);
  545. if (err < 0)
  546. break;
  547. args->context = context->id;
  548. break;
  549. }
  550. if (err < 0)
  551. kfree(context);
  552. mutex_unlock(&fpriv->lock);
  553. return err;
  554. }
  555. static int tegra_close_channel(struct drm_device *drm, void *data,
  556. struct drm_file *file)
  557. {
  558. struct tegra_drm_file *fpriv = file->driver_priv;
  559. struct drm_tegra_close_channel *args = data;
  560. struct tegra_drm_context *context;
  561. int err = 0;
  562. mutex_lock(&fpriv->lock);
  563. context = idr_find(&fpriv->contexts, args->context);
  564. if (!context) {
  565. err = -EINVAL;
  566. goto unlock;
  567. }
  568. idr_remove(&fpriv->contexts, context->id);
  569. tegra_drm_context_free(context);
  570. unlock:
  571. mutex_unlock(&fpriv->lock);
  572. return err;
  573. }
  574. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  575. struct drm_file *file)
  576. {
  577. struct tegra_drm_file *fpriv = file->driver_priv;
  578. struct drm_tegra_get_syncpt *args = data;
  579. struct tegra_drm_context *context;
  580. struct host1x_syncpt *syncpt;
  581. int err = 0;
  582. mutex_lock(&fpriv->lock);
  583. context = idr_find(&fpriv->contexts, args->context);
  584. if (!context) {
  585. err = -ENODEV;
  586. goto unlock;
  587. }
  588. if (args->index >= context->client->base.num_syncpts) {
  589. err = -EINVAL;
  590. goto unlock;
  591. }
  592. syncpt = context->client->base.syncpts[args->index];
  593. args->id = host1x_syncpt_id(syncpt);
  594. unlock:
  595. mutex_unlock(&fpriv->lock);
  596. return err;
  597. }
  598. static int tegra_submit(struct drm_device *drm, void *data,
  599. struct drm_file *file)
  600. {
  601. struct tegra_drm_file *fpriv = file->driver_priv;
  602. struct drm_tegra_submit *args = data;
  603. struct tegra_drm_context *context;
  604. int err;
  605. mutex_lock(&fpriv->lock);
  606. context = idr_find(&fpriv->contexts, args->context);
  607. if (!context) {
  608. err = -ENODEV;
  609. goto unlock;
  610. }
  611. err = context->client->ops->submit(context, args, drm, file);
  612. unlock:
  613. mutex_unlock(&fpriv->lock);
  614. return err;
  615. }
  616. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  617. struct drm_file *file)
  618. {
  619. struct tegra_drm_file *fpriv = file->driver_priv;
  620. struct drm_tegra_get_syncpt_base *args = data;
  621. struct tegra_drm_context *context;
  622. struct host1x_syncpt_base *base;
  623. struct host1x_syncpt *syncpt;
  624. int err = 0;
  625. mutex_lock(&fpriv->lock);
  626. context = idr_find(&fpriv->contexts, args->context);
  627. if (!context) {
  628. err = -ENODEV;
  629. goto unlock;
  630. }
  631. if (args->syncpt >= context->client->base.num_syncpts) {
  632. err = -EINVAL;
  633. goto unlock;
  634. }
  635. syncpt = context->client->base.syncpts[args->syncpt];
  636. base = host1x_syncpt_get_base(syncpt);
  637. if (!base) {
  638. err = -ENXIO;
  639. goto unlock;
  640. }
  641. args->id = host1x_syncpt_base_id(base);
  642. unlock:
  643. mutex_unlock(&fpriv->lock);
  644. return err;
  645. }
  646. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  647. struct drm_file *file)
  648. {
  649. struct drm_tegra_gem_set_tiling *args = data;
  650. enum tegra_bo_tiling_mode mode;
  651. struct drm_gem_object *gem;
  652. unsigned long value = 0;
  653. struct tegra_bo *bo;
  654. switch (args->mode) {
  655. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  656. mode = TEGRA_BO_TILING_MODE_PITCH;
  657. if (args->value != 0)
  658. return -EINVAL;
  659. break;
  660. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  661. mode = TEGRA_BO_TILING_MODE_TILED;
  662. if (args->value != 0)
  663. return -EINVAL;
  664. break;
  665. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  666. mode = TEGRA_BO_TILING_MODE_BLOCK;
  667. if (args->value > 5)
  668. return -EINVAL;
  669. value = args->value;
  670. break;
  671. default:
  672. return -EINVAL;
  673. }
  674. gem = drm_gem_object_lookup(file, args->handle);
  675. if (!gem)
  676. return -ENOENT;
  677. bo = to_tegra_bo(gem);
  678. bo->tiling.mode = mode;
  679. bo->tiling.value = value;
  680. drm_gem_object_unreference_unlocked(gem);
  681. return 0;
  682. }
  683. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  684. struct drm_file *file)
  685. {
  686. struct drm_tegra_gem_get_tiling *args = data;
  687. struct drm_gem_object *gem;
  688. struct tegra_bo *bo;
  689. int err = 0;
  690. gem = drm_gem_object_lookup(file, args->handle);
  691. if (!gem)
  692. return -ENOENT;
  693. bo = to_tegra_bo(gem);
  694. switch (bo->tiling.mode) {
  695. case TEGRA_BO_TILING_MODE_PITCH:
  696. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  697. args->value = 0;
  698. break;
  699. case TEGRA_BO_TILING_MODE_TILED:
  700. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  701. args->value = 0;
  702. break;
  703. case TEGRA_BO_TILING_MODE_BLOCK:
  704. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  705. args->value = bo->tiling.value;
  706. break;
  707. default:
  708. err = -EINVAL;
  709. break;
  710. }
  711. drm_gem_object_unreference_unlocked(gem);
  712. return err;
  713. }
  714. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  715. struct drm_file *file)
  716. {
  717. struct drm_tegra_gem_set_flags *args = data;
  718. struct drm_gem_object *gem;
  719. struct tegra_bo *bo;
  720. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  721. return -EINVAL;
  722. gem = drm_gem_object_lookup(file, args->handle);
  723. if (!gem)
  724. return -ENOENT;
  725. bo = to_tegra_bo(gem);
  726. bo->flags = 0;
  727. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  728. bo->flags |= TEGRA_BO_BOTTOM_UP;
  729. drm_gem_object_unreference_unlocked(gem);
  730. return 0;
  731. }
  732. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  733. struct drm_file *file)
  734. {
  735. struct drm_tegra_gem_get_flags *args = data;
  736. struct drm_gem_object *gem;
  737. struct tegra_bo *bo;
  738. gem = drm_gem_object_lookup(file, args->handle);
  739. if (!gem)
  740. return -ENOENT;
  741. bo = to_tegra_bo(gem);
  742. args->flags = 0;
  743. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  744. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  745. drm_gem_object_unreference_unlocked(gem);
  746. return 0;
  747. }
  748. #endif
  749. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  750. #ifdef CONFIG_DRM_TEGRA_STAGING
  751. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 0),
  752. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 0),
  753. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 0),
  754. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, 0),
  755. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait, 0),
  756. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel, 0),
  757. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel, 0),
  758. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, 0),
  759. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, 0),
  760. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, 0),
  761. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, 0),
  762. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, 0),
  763. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, 0),
  764. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, 0),
  765. #endif
  766. };
  767. static const struct file_operations tegra_drm_fops = {
  768. .owner = THIS_MODULE,
  769. .open = drm_open,
  770. .release = drm_release,
  771. .unlocked_ioctl = drm_ioctl,
  772. .mmap = tegra_drm_mmap,
  773. .poll = drm_poll,
  774. .read = drm_read,
  775. .compat_ioctl = drm_compat_ioctl,
  776. .llseek = noop_llseek,
  777. };
  778. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  779. {
  780. struct tegra_drm_context *context = p;
  781. tegra_drm_context_free(context);
  782. return 0;
  783. }
  784. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  785. {
  786. struct tegra_drm_file *fpriv = file->driver_priv;
  787. mutex_lock(&fpriv->lock);
  788. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  789. mutex_unlock(&fpriv->lock);
  790. idr_destroy(&fpriv->contexts);
  791. mutex_destroy(&fpriv->lock);
  792. kfree(fpriv);
  793. }
  794. #ifdef CONFIG_DEBUG_FS
  795. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  796. {
  797. struct drm_info_node *node = (struct drm_info_node *)s->private;
  798. struct drm_device *drm = node->minor->dev;
  799. struct drm_framebuffer *fb;
  800. mutex_lock(&drm->mode_config.fb_lock);
  801. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  802. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  803. fb->base.id, fb->width, fb->height,
  804. fb->format->depth,
  805. fb->format->cpp[0] * 8,
  806. drm_framebuffer_read_refcount(fb));
  807. }
  808. mutex_unlock(&drm->mode_config.fb_lock);
  809. return 0;
  810. }
  811. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  812. {
  813. struct drm_info_node *node = (struct drm_info_node *)s->private;
  814. struct drm_device *drm = node->minor->dev;
  815. struct tegra_drm *tegra = drm->dev_private;
  816. struct drm_printer p = drm_seq_file_printer(s);
  817. mutex_lock(&tegra->mm_lock);
  818. drm_mm_print(&tegra->mm, &p);
  819. mutex_unlock(&tegra->mm_lock);
  820. return 0;
  821. }
  822. static struct drm_info_list tegra_debugfs_list[] = {
  823. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  824. { "iova", tegra_debugfs_iova, 0 },
  825. };
  826. static int tegra_debugfs_init(struct drm_minor *minor)
  827. {
  828. return drm_debugfs_create_files(tegra_debugfs_list,
  829. ARRAY_SIZE(tegra_debugfs_list),
  830. minor->debugfs_root, minor);
  831. }
  832. #endif
  833. static struct drm_driver tegra_drm_driver = {
  834. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  835. DRIVER_ATOMIC,
  836. .load = tegra_drm_load,
  837. .unload = tegra_drm_unload,
  838. .open = tegra_drm_open,
  839. .postclose = tegra_drm_postclose,
  840. .lastclose = tegra_drm_lastclose,
  841. #if defined(CONFIG_DEBUG_FS)
  842. .debugfs_init = tegra_debugfs_init,
  843. #endif
  844. .gem_free_object_unlocked = tegra_bo_free_object,
  845. .gem_vm_ops = &tegra_bo_vm_ops,
  846. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  847. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  848. .gem_prime_export = tegra_gem_prime_export,
  849. .gem_prime_import = tegra_gem_prime_import,
  850. .dumb_create = tegra_bo_dumb_create,
  851. .dumb_map_offset = tegra_bo_dumb_map_offset,
  852. .dumb_destroy = drm_gem_dumb_destroy,
  853. .ioctls = tegra_drm_ioctls,
  854. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  855. .fops = &tegra_drm_fops,
  856. .name = DRIVER_NAME,
  857. .desc = DRIVER_DESC,
  858. .date = DRIVER_DATE,
  859. .major = DRIVER_MAJOR,
  860. .minor = DRIVER_MINOR,
  861. .patchlevel = DRIVER_PATCHLEVEL,
  862. };
  863. int tegra_drm_register_client(struct tegra_drm *tegra,
  864. struct tegra_drm_client *client)
  865. {
  866. mutex_lock(&tegra->clients_lock);
  867. list_add_tail(&client->list, &tegra->clients);
  868. mutex_unlock(&tegra->clients_lock);
  869. return 0;
  870. }
  871. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  872. struct tegra_drm_client *client)
  873. {
  874. mutex_lock(&tegra->clients_lock);
  875. list_del_init(&client->list);
  876. mutex_unlock(&tegra->clients_lock);
  877. return 0;
  878. }
  879. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
  880. dma_addr_t *dma)
  881. {
  882. struct iova *alloc;
  883. void *virt;
  884. gfp_t gfp;
  885. int err;
  886. if (tegra->domain)
  887. size = iova_align(&tegra->carveout.domain, size);
  888. else
  889. size = PAGE_ALIGN(size);
  890. gfp = GFP_KERNEL | __GFP_ZERO;
  891. if (!tegra->domain) {
  892. /*
  893. * Many units only support 32-bit addresses, even on 64-bit
  894. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  895. * virtual address space, force allocations to be in the
  896. * lower 32-bit range.
  897. */
  898. gfp |= GFP_DMA;
  899. }
  900. virt = (void *)__get_free_pages(gfp, get_order(size));
  901. if (!virt)
  902. return ERR_PTR(-ENOMEM);
  903. if (!tegra->domain) {
  904. /*
  905. * If IOMMU is disabled, devices address physical memory
  906. * directly.
  907. */
  908. *dma = virt_to_phys(virt);
  909. return virt;
  910. }
  911. alloc = alloc_iova(&tegra->carveout.domain,
  912. size >> tegra->carveout.shift,
  913. tegra->carveout.limit, true);
  914. if (!alloc) {
  915. err = -EBUSY;
  916. goto free_pages;
  917. }
  918. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  919. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  920. size, IOMMU_READ | IOMMU_WRITE);
  921. if (err < 0)
  922. goto free_iova;
  923. return virt;
  924. free_iova:
  925. __free_iova(&tegra->carveout.domain, alloc);
  926. free_pages:
  927. free_pages((unsigned long)virt, get_order(size));
  928. return ERR_PTR(err);
  929. }
  930. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  931. dma_addr_t dma)
  932. {
  933. if (tegra->domain)
  934. size = iova_align(&tegra->carveout.domain, size);
  935. else
  936. size = PAGE_ALIGN(size);
  937. if (tegra->domain) {
  938. iommu_unmap(tegra->domain, dma, size);
  939. free_iova(&tegra->carveout.domain,
  940. iova_pfn(&tegra->carveout.domain, dma));
  941. }
  942. free_pages((unsigned long)virt, get_order(size));
  943. }
  944. static int host1x_drm_probe(struct host1x_device *dev)
  945. {
  946. struct drm_driver *driver = &tegra_drm_driver;
  947. struct drm_device *drm;
  948. int err;
  949. drm = drm_dev_alloc(driver, &dev->dev);
  950. if (IS_ERR(drm))
  951. return PTR_ERR(drm);
  952. dev_set_drvdata(&dev->dev, drm);
  953. err = drm_dev_register(drm, 0);
  954. if (err < 0)
  955. goto unref;
  956. return 0;
  957. unref:
  958. drm_dev_unref(drm);
  959. return err;
  960. }
  961. static int host1x_drm_remove(struct host1x_device *dev)
  962. {
  963. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  964. drm_dev_unregister(drm);
  965. drm_dev_unref(drm);
  966. return 0;
  967. }
  968. #ifdef CONFIG_PM_SLEEP
  969. static int host1x_drm_suspend(struct device *dev)
  970. {
  971. struct drm_device *drm = dev_get_drvdata(dev);
  972. struct tegra_drm *tegra = drm->dev_private;
  973. drm_kms_helper_poll_disable(drm);
  974. tegra_drm_fb_suspend(drm);
  975. tegra->state = drm_atomic_helper_suspend(drm);
  976. if (IS_ERR(tegra->state)) {
  977. tegra_drm_fb_resume(drm);
  978. drm_kms_helper_poll_enable(drm);
  979. return PTR_ERR(tegra->state);
  980. }
  981. return 0;
  982. }
  983. static int host1x_drm_resume(struct device *dev)
  984. {
  985. struct drm_device *drm = dev_get_drvdata(dev);
  986. struct tegra_drm *tegra = drm->dev_private;
  987. drm_atomic_helper_resume(drm, tegra->state);
  988. tegra_drm_fb_resume(drm);
  989. drm_kms_helper_poll_enable(drm);
  990. return 0;
  991. }
  992. #endif
  993. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  994. host1x_drm_resume);
  995. static const struct of_device_id host1x_drm_subdevs[] = {
  996. { .compatible = "nvidia,tegra20-dc", },
  997. { .compatible = "nvidia,tegra20-hdmi", },
  998. { .compatible = "nvidia,tegra20-gr2d", },
  999. { .compatible = "nvidia,tegra20-gr3d", },
  1000. { .compatible = "nvidia,tegra30-dc", },
  1001. { .compatible = "nvidia,tegra30-hdmi", },
  1002. { .compatible = "nvidia,tegra30-gr2d", },
  1003. { .compatible = "nvidia,tegra30-gr3d", },
  1004. { .compatible = "nvidia,tegra114-dsi", },
  1005. { .compatible = "nvidia,tegra114-hdmi", },
  1006. { .compatible = "nvidia,tegra114-gr3d", },
  1007. { .compatible = "nvidia,tegra124-dc", },
  1008. { .compatible = "nvidia,tegra124-sor", },
  1009. { .compatible = "nvidia,tegra124-hdmi", },
  1010. { .compatible = "nvidia,tegra124-dsi", },
  1011. { .compatible = "nvidia,tegra124-vic", },
  1012. { .compatible = "nvidia,tegra132-dsi", },
  1013. { .compatible = "nvidia,tegra210-dc", },
  1014. { .compatible = "nvidia,tegra210-dsi", },
  1015. { .compatible = "nvidia,tegra210-sor", },
  1016. { .compatible = "nvidia,tegra210-sor1", },
  1017. { .compatible = "nvidia,tegra210-vic", },
  1018. { /* sentinel */ }
  1019. };
  1020. static struct host1x_driver host1x_drm_driver = {
  1021. .driver = {
  1022. .name = "drm",
  1023. .pm = &host1x_drm_pm_ops,
  1024. },
  1025. .probe = host1x_drm_probe,
  1026. .remove = host1x_drm_remove,
  1027. .subdevs = host1x_drm_subdevs,
  1028. };
  1029. static struct platform_driver * const drivers[] = {
  1030. &tegra_dc_driver,
  1031. &tegra_hdmi_driver,
  1032. &tegra_dsi_driver,
  1033. &tegra_dpaux_driver,
  1034. &tegra_sor_driver,
  1035. &tegra_gr2d_driver,
  1036. &tegra_gr3d_driver,
  1037. &tegra_vic_driver,
  1038. };
  1039. static int __init host1x_drm_init(void)
  1040. {
  1041. int err;
  1042. err = host1x_driver_register(&host1x_drm_driver);
  1043. if (err < 0)
  1044. return err;
  1045. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1046. if (err < 0)
  1047. goto unregister_host1x;
  1048. return 0;
  1049. unregister_host1x:
  1050. host1x_driver_unregister(&host1x_drm_driver);
  1051. return err;
  1052. }
  1053. module_init(host1x_drm_init);
  1054. static void __exit host1x_drm_exit(void)
  1055. {
  1056. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1057. host1x_driver_unregister(&host1x_drm_driver);
  1058. }
  1059. module_exit(host1x_drm_exit);
  1060. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1061. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1062. MODULE_LICENSE("GPL v2");