drm.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  16. #include <asm/dma-iommu.h>
  17. #endif
  18. #include "drm.h"
  19. #include "gem.h"
  20. #define DRIVER_NAME "tegra"
  21. #define DRIVER_DESC "NVIDIA Tegra graphics"
  22. #define DRIVER_DATE "20120330"
  23. #define DRIVER_MAJOR 0
  24. #define DRIVER_MINOR 0
  25. #define DRIVER_PATCHLEVEL 0
  26. #define CARVEOUT_SZ SZ_64M
  27. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  28. struct tegra_drm_file {
  29. struct idr contexts;
  30. struct mutex lock;
  31. };
  32. static int tegra_atomic_check(struct drm_device *drm,
  33. struct drm_atomic_state *state)
  34. {
  35. int err;
  36. err = drm_atomic_helper_check(drm, state);
  37. if (err < 0)
  38. return err;
  39. return tegra_display_hub_atomic_check(drm, state);
  40. }
  41. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  42. .fb_create = tegra_fb_create,
  43. #ifdef CONFIG_DRM_FBDEV_EMULATION
  44. .output_poll_changed = drm_fb_helper_output_poll_changed,
  45. #endif
  46. .atomic_check = tegra_atomic_check,
  47. .atomic_commit = drm_atomic_helper_commit,
  48. };
  49. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  50. {
  51. struct drm_device *drm = old_state->dev;
  52. struct tegra_drm *tegra = drm->dev_private;
  53. if (tegra->hub) {
  54. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  55. tegra_display_hub_atomic_commit(drm, old_state);
  56. drm_atomic_helper_commit_planes(drm, old_state, 0);
  57. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  58. drm_atomic_helper_commit_hw_done(old_state);
  59. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  60. drm_atomic_helper_cleanup_planes(drm, old_state);
  61. } else {
  62. drm_atomic_helper_commit_tail_rpm(old_state);
  63. }
  64. }
  65. static const struct drm_mode_config_helper_funcs
  66. tegra_drm_mode_config_helpers = {
  67. .atomic_commit_tail = tegra_atomic_commit_tail,
  68. };
  69. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  70. {
  71. struct host1x_device *device = to_host1x_device(drm->dev);
  72. struct tegra_drm *tegra;
  73. int err;
  74. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  75. if (!tegra)
  76. return -ENOMEM;
  77. if (iommu_present(&platform_bus_type)) {
  78. u64 carveout_start, carveout_end, gem_start, gem_end;
  79. struct iommu_domain_geometry *geometry;
  80. unsigned long order;
  81. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  82. if (!tegra->domain) {
  83. err = -ENOMEM;
  84. goto free;
  85. }
  86. err = iova_cache_get();
  87. if (err < 0)
  88. goto domain;
  89. geometry = &tegra->domain->geometry;
  90. gem_start = geometry->aperture_start;
  91. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  92. carveout_start = gem_end + 1;
  93. carveout_end = geometry->aperture_end;
  94. order = __ffs(tegra->domain->pgsize_bitmap);
  95. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  96. carveout_start >> order);
  97. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  98. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  99. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  100. mutex_init(&tegra->mm_lock);
  101. DRM_DEBUG("IOMMU apertures:\n");
  102. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  103. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  104. carveout_end);
  105. }
  106. mutex_init(&tegra->clients_lock);
  107. INIT_LIST_HEAD(&tegra->clients);
  108. drm->dev_private = tegra;
  109. tegra->drm = drm;
  110. drm_mode_config_init(drm);
  111. drm->mode_config.min_width = 0;
  112. drm->mode_config.min_height = 0;
  113. drm->mode_config.max_width = 4096;
  114. drm->mode_config.max_height = 4096;
  115. drm->mode_config.allow_fb_modifiers = true;
  116. drm->mode_config.normalize_zpos = true;
  117. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  118. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  119. err = tegra_drm_fb_prepare(drm);
  120. if (err < 0)
  121. goto config;
  122. drm_kms_helper_poll_init(drm);
  123. err = host1x_device_init(device);
  124. if (err < 0)
  125. goto fbdev;
  126. if (tegra->hub) {
  127. err = tegra_display_hub_prepare(tegra->hub);
  128. if (err < 0)
  129. goto device;
  130. }
  131. /*
  132. * We don't use the drm_irq_install() helpers provided by the DRM
  133. * core, so we need to set this manually in order to allow the
  134. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  135. */
  136. drm->irq_enabled = true;
  137. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  138. drm->max_vblank_count = 0xffffffff;
  139. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  140. if (err < 0)
  141. goto hub;
  142. drm_mode_config_reset(drm);
  143. err = tegra_drm_fb_init(drm);
  144. if (err < 0)
  145. goto hub;
  146. return 0;
  147. hub:
  148. if (tegra->hub)
  149. tegra_display_hub_cleanup(tegra->hub);
  150. device:
  151. host1x_device_exit(device);
  152. fbdev:
  153. drm_kms_helper_poll_fini(drm);
  154. tegra_drm_fb_free(drm);
  155. config:
  156. drm_mode_config_cleanup(drm);
  157. if (tegra->domain) {
  158. mutex_destroy(&tegra->mm_lock);
  159. drm_mm_takedown(&tegra->mm);
  160. put_iova_domain(&tegra->carveout.domain);
  161. iova_cache_put();
  162. }
  163. domain:
  164. if (tegra->domain)
  165. iommu_domain_free(tegra->domain);
  166. free:
  167. kfree(tegra);
  168. return err;
  169. }
  170. static void tegra_drm_unload(struct drm_device *drm)
  171. {
  172. struct host1x_device *device = to_host1x_device(drm->dev);
  173. struct tegra_drm *tegra = drm->dev_private;
  174. int err;
  175. drm_kms_helper_poll_fini(drm);
  176. tegra_drm_fb_exit(drm);
  177. drm_atomic_helper_shutdown(drm);
  178. drm_mode_config_cleanup(drm);
  179. err = host1x_device_exit(device);
  180. if (err < 0)
  181. return;
  182. if (tegra->domain) {
  183. mutex_destroy(&tegra->mm_lock);
  184. drm_mm_takedown(&tegra->mm);
  185. put_iova_domain(&tegra->carveout.domain);
  186. iova_cache_put();
  187. iommu_domain_free(tegra->domain);
  188. }
  189. kfree(tegra);
  190. }
  191. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  192. {
  193. struct tegra_drm_file *fpriv;
  194. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  195. if (!fpriv)
  196. return -ENOMEM;
  197. idr_init(&fpriv->contexts);
  198. mutex_init(&fpriv->lock);
  199. filp->driver_priv = fpriv;
  200. return 0;
  201. }
  202. static void tegra_drm_context_free(struct tegra_drm_context *context)
  203. {
  204. context->client->ops->close_channel(context);
  205. kfree(context);
  206. }
  207. static struct host1x_bo *
  208. host1x_bo_lookup(struct drm_file *file, u32 handle)
  209. {
  210. struct drm_gem_object *gem;
  211. struct tegra_bo *bo;
  212. gem = drm_gem_object_lookup(file, handle);
  213. if (!gem)
  214. return NULL;
  215. bo = to_tegra_bo(gem);
  216. return &bo->base;
  217. }
  218. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  219. struct drm_tegra_reloc __user *src,
  220. struct drm_device *drm,
  221. struct drm_file *file)
  222. {
  223. u32 cmdbuf, target;
  224. int err;
  225. err = get_user(cmdbuf, &src->cmdbuf.handle);
  226. if (err < 0)
  227. return err;
  228. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  229. if (err < 0)
  230. return err;
  231. err = get_user(target, &src->target.handle);
  232. if (err < 0)
  233. return err;
  234. err = get_user(dest->target.offset, &src->target.offset);
  235. if (err < 0)
  236. return err;
  237. err = get_user(dest->shift, &src->shift);
  238. if (err < 0)
  239. return err;
  240. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  241. if (!dest->cmdbuf.bo)
  242. return -ENOENT;
  243. dest->target.bo = host1x_bo_lookup(file, target);
  244. if (!dest->target.bo)
  245. return -ENOENT;
  246. return 0;
  247. }
  248. int tegra_drm_submit(struct tegra_drm_context *context,
  249. struct drm_tegra_submit *args, struct drm_device *drm,
  250. struct drm_file *file)
  251. {
  252. struct host1x_client *client = &context->client->base;
  253. unsigned int num_cmdbufs = args->num_cmdbufs;
  254. unsigned int num_relocs = args->num_relocs;
  255. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  256. struct drm_tegra_reloc __user *user_relocs;
  257. struct drm_tegra_syncpt __user *user_syncpt;
  258. struct drm_tegra_syncpt syncpt;
  259. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  260. struct drm_gem_object **refs;
  261. struct host1x_syncpt *sp;
  262. struct host1x_job *job;
  263. unsigned int num_refs;
  264. int err;
  265. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  266. user_relocs = u64_to_user_ptr(args->relocs);
  267. user_syncpt = u64_to_user_ptr(args->syncpts);
  268. /* We don't yet support other than one syncpt_incr struct per submit */
  269. if (args->num_syncpts != 1)
  270. return -EINVAL;
  271. /* We don't yet support waitchks */
  272. if (args->num_waitchks != 0)
  273. return -EINVAL;
  274. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  275. args->num_relocs);
  276. if (!job)
  277. return -ENOMEM;
  278. job->num_relocs = args->num_relocs;
  279. job->client = client;
  280. job->class = client->class;
  281. job->serialize = true;
  282. /*
  283. * Track referenced BOs so that they can be unreferenced after the
  284. * submission is complete.
  285. */
  286. num_refs = num_cmdbufs + num_relocs * 2;
  287. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  288. if (!refs) {
  289. err = -ENOMEM;
  290. goto put;
  291. }
  292. /* reuse as an iterator later */
  293. num_refs = 0;
  294. while (num_cmdbufs) {
  295. struct drm_tegra_cmdbuf cmdbuf;
  296. struct host1x_bo *bo;
  297. struct tegra_bo *obj;
  298. u64 offset;
  299. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  300. err = -EFAULT;
  301. goto fail;
  302. }
  303. /*
  304. * The maximum number of CDMA gather fetches is 16383, a higher
  305. * value means the words count is malformed.
  306. */
  307. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  308. err = -EINVAL;
  309. goto fail;
  310. }
  311. bo = host1x_bo_lookup(file, cmdbuf.handle);
  312. if (!bo) {
  313. err = -ENOENT;
  314. goto fail;
  315. }
  316. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  317. obj = host1x_to_tegra_bo(bo);
  318. refs[num_refs++] = &obj->gem;
  319. /*
  320. * Gather buffer base address must be 4-bytes aligned,
  321. * unaligned offset is malformed and cause commands stream
  322. * corruption on the buffer address relocation.
  323. */
  324. if (offset & 3 || offset > obj->gem.size) {
  325. err = -EINVAL;
  326. goto fail;
  327. }
  328. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  329. num_cmdbufs--;
  330. user_cmdbufs++;
  331. }
  332. /* copy and resolve relocations from submit */
  333. while (num_relocs--) {
  334. struct host1x_reloc *reloc;
  335. struct tegra_bo *obj;
  336. err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
  337. &user_relocs[num_relocs], drm,
  338. file);
  339. if (err < 0)
  340. goto fail;
  341. reloc = &job->relocs[num_relocs];
  342. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  343. refs[num_refs++] = &obj->gem;
  344. /*
  345. * The unaligned cmdbuf offset will cause an unaligned write
  346. * during of the relocations patching, corrupting the commands
  347. * stream.
  348. */
  349. if (reloc->cmdbuf.offset & 3 ||
  350. reloc->cmdbuf.offset >= obj->gem.size) {
  351. err = -EINVAL;
  352. goto fail;
  353. }
  354. obj = host1x_to_tegra_bo(reloc->target.bo);
  355. refs[num_refs++] = &obj->gem;
  356. if (reloc->target.offset >= obj->gem.size) {
  357. err = -EINVAL;
  358. goto fail;
  359. }
  360. }
  361. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  362. err = -EFAULT;
  363. goto fail;
  364. }
  365. /* check whether syncpoint ID is valid */
  366. sp = host1x_syncpt_get(host1x, syncpt.id);
  367. if (!sp) {
  368. err = -ENOENT;
  369. goto fail;
  370. }
  371. job->is_addr_reg = context->client->ops->is_addr_reg;
  372. job->is_valid_class = context->client->ops->is_valid_class;
  373. job->syncpt_incrs = syncpt.incrs;
  374. job->syncpt_id = syncpt.id;
  375. job->timeout = 10000;
  376. if (args->timeout && args->timeout < 10000)
  377. job->timeout = args->timeout;
  378. err = host1x_job_pin(job, context->client->base.dev);
  379. if (err)
  380. goto fail;
  381. err = host1x_job_submit(job);
  382. if (err) {
  383. host1x_job_unpin(job);
  384. goto fail;
  385. }
  386. args->fence = job->syncpt_end;
  387. fail:
  388. while (num_refs--)
  389. drm_gem_object_put_unlocked(refs[num_refs]);
  390. kfree(refs);
  391. put:
  392. host1x_job_put(job);
  393. return err;
  394. }
  395. #ifdef CONFIG_DRM_TEGRA_STAGING
  396. static int tegra_gem_create(struct drm_device *drm, void *data,
  397. struct drm_file *file)
  398. {
  399. struct drm_tegra_gem_create *args = data;
  400. struct tegra_bo *bo;
  401. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  402. &args->handle);
  403. if (IS_ERR(bo))
  404. return PTR_ERR(bo);
  405. return 0;
  406. }
  407. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  408. struct drm_file *file)
  409. {
  410. struct drm_tegra_gem_mmap *args = data;
  411. struct drm_gem_object *gem;
  412. struct tegra_bo *bo;
  413. gem = drm_gem_object_lookup(file, args->handle);
  414. if (!gem)
  415. return -EINVAL;
  416. bo = to_tegra_bo(gem);
  417. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  418. drm_gem_object_put_unlocked(gem);
  419. return 0;
  420. }
  421. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  422. struct drm_file *file)
  423. {
  424. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  425. struct drm_tegra_syncpt_read *args = data;
  426. struct host1x_syncpt *sp;
  427. sp = host1x_syncpt_get(host, args->id);
  428. if (!sp)
  429. return -EINVAL;
  430. args->value = host1x_syncpt_read_min(sp);
  431. return 0;
  432. }
  433. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  434. struct drm_file *file)
  435. {
  436. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  437. struct drm_tegra_syncpt_incr *args = data;
  438. struct host1x_syncpt *sp;
  439. sp = host1x_syncpt_get(host1x, args->id);
  440. if (!sp)
  441. return -EINVAL;
  442. return host1x_syncpt_incr(sp);
  443. }
  444. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  445. struct drm_file *file)
  446. {
  447. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  448. struct drm_tegra_syncpt_wait *args = data;
  449. struct host1x_syncpt *sp;
  450. sp = host1x_syncpt_get(host1x, args->id);
  451. if (!sp)
  452. return -EINVAL;
  453. return host1x_syncpt_wait(sp, args->thresh,
  454. msecs_to_jiffies(args->timeout),
  455. &args->value);
  456. }
  457. static int tegra_client_open(struct tegra_drm_file *fpriv,
  458. struct tegra_drm_client *client,
  459. struct tegra_drm_context *context)
  460. {
  461. int err;
  462. err = client->ops->open_channel(client, context);
  463. if (err < 0)
  464. return err;
  465. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  466. if (err < 0) {
  467. client->ops->close_channel(context);
  468. return err;
  469. }
  470. context->client = client;
  471. context->id = err;
  472. return 0;
  473. }
  474. static int tegra_open_channel(struct drm_device *drm, void *data,
  475. struct drm_file *file)
  476. {
  477. struct tegra_drm_file *fpriv = file->driver_priv;
  478. struct tegra_drm *tegra = drm->dev_private;
  479. struct drm_tegra_open_channel *args = data;
  480. struct tegra_drm_context *context;
  481. struct tegra_drm_client *client;
  482. int err = -ENODEV;
  483. context = kzalloc(sizeof(*context), GFP_KERNEL);
  484. if (!context)
  485. return -ENOMEM;
  486. mutex_lock(&fpriv->lock);
  487. list_for_each_entry(client, &tegra->clients, list)
  488. if (client->base.class == args->client) {
  489. err = tegra_client_open(fpriv, client, context);
  490. if (err < 0)
  491. break;
  492. args->context = context->id;
  493. break;
  494. }
  495. if (err < 0)
  496. kfree(context);
  497. mutex_unlock(&fpriv->lock);
  498. return err;
  499. }
  500. static int tegra_close_channel(struct drm_device *drm, void *data,
  501. struct drm_file *file)
  502. {
  503. struct tegra_drm_file *fpriv = file->driver_priv;
  504. struct drm_tegra_close_channel *args = data;
  505. struct tegra_drm_context *context;
  506. int err = 0;
  507. mutex_lock(&fpriv->lock);
  508. context = idr_find(&fpriv->contexts, args->context);
  509. if (!context) {
  510. err = -EINVAL;
  511. goto unlock;
  512. }
  513. idr_remove(&fpriv->contexts, context->id);
  514. tegra_drm_context_free(context);
  515. unlock:
  516. mutex_unlock(&fpriv->lock);
  517. return err;
  518. }
  519. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  520. struct drm_file *file)
  521. {
  522. struct tegra_drm_file *fpriv = file->driver_priv;
  523. struct drm_tegra_get_syncpt *args = data;
  524. struct tegra_drm_context *context;
  525. struct host1x_syncpt *syncpt;
  526. int err = 0;
  527. mutex_lock(&fpriv->lock);
  528. context = idr_find(&fpriv->contexts, args->context);
  529. if (!context) {
  530. err = -ENODEV;
  531. goto unlock;
  532. }
  533. if (args->index >= context->client->base.num_syncpts) {
  534. err = -EINVAL;
  535. goto unlock;
  536. }
  537. syncpt = context->client->base.syncpts[args->index];
  538. args->id = host1x_syncpt_id(syncpt);
  539. unlock:
  540. mutex_unlock(&fpriv->lock);
  541. return err;
  542. }
  543. static int tegra_submit(struct drm_device *drm, void *data,
  544. struct drm_file *file)
  545. {
  546. struct tegra_drm_file *fpriv = file->driver_priv;
  547. struct drm_tegra_submit *args = data;
  548. struct tegra_drm_context *context;
  549. int err;
  550. mutex_lock(&fpriv->lock);
  551. context = idr_find(&fpriv->contexts, args->context);
  552. if (!context) {
  553. err = -ENODEV;
  554. goto unlock;
  555. }
  556. err = context->client->ops->submit(context, args, drm, file);
  557. unlock:
  558. mutex_unlock(&fpriv->lock);
  559. return err;
  560. }
  561. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  562. struct drm_file *file)
  563. {
  564. struct tegra_drm_file *fpriv = file->driver_priv;
  565. struct drm_tegra_get_syncpt_base *args = data;
  566. struct tegra_drm_context *context;
  567. struct host1x_syncpt_base *base;
  568. struct host1x_syncpt *syncpt;
  569. int err = 0;
  570. mutex_lock(&fpriv->lock);
  571. context = idr_find(&fpriv->contexts, args->context);
  572. if (!context) {
  573. err = -ENODEV;
  574. goto unlock;
  575. }
  576. if (args->syncpt >= context->client->base.num_syncpts) {
  577. err = -EINVAL;
  578. goto unlock;
  579. }
  580. syncpt = context->client->base.syncpts[args->syncpt];
  581. base = host1x_syncpt_get_base(syncpt);
  582. if (!base) {
  583. err = -ENXIO;
  584. goto unlock;
  585. }
  586. args->id = host1x_syncpt_base_id(base);
  587. unlock:
  588. mutex_unlock(&fpriv->lock);
  589. return err;
  590. }
  591. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  592. struct drm_file *file)
  593. {
  594. struct drm_tegra_gem_set_tiling *args = data;
  595. enum tegra_bo_tiling_mode mode;
  596. struct drm_gem_object *gem;
  597. unsigned long value = 0;
  598. struct tegra_bo *bo;
  599. switch (args->mode) {
  600. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  601. mode = TEGRA_BO_TILING_MODE_PITCH;
  602. if (args->value != 0)
  603. return -EINVAL;
  604. break;
  605. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  606. mode = TEGRA_BO_TILING_MODE_TILED;
  607. if (args->value != 0)
  608. return -EINVAL;
  609. break;
  610. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  611. mode = TEGRA_BO_TILING_MODE_BLOCK;
  612. if (args->value > 5)
  613. return -EINVAL;
  614. value = args->value;
  615. break;
  616. default:
  617. return -EINVAL;
  618. }
  619. gem = drm_gem_object_lookup(file, args->handle);
  620. if (!gem)
  621. return -ENOENT;
  622. bo = to_tegra_bo(gem);
  623. bo->tiling.mode = mode;
  624. bo->tiling.value = value;
  625. drm_gem_object_put_unlocked(gem);
  626. return 0;
  627. }
  628. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  629. struct drm_file *file)
  630. {
  631. struct drm_tegra_gem_get_tiling *args = data;
  632. struct drm_gem_object *gem;
  633. struct tegra_bo *bo;
  634. int err = 0;
  635. gem = drm_gem_object_lookup(file, args->handle);
  636. if (!gem)
  637. return -ENOENT;
  638. bo = to_tegra_bo(gem);
  639. switch (bo->tiling.mode) {
  640. case TEGRA_BO_TILING_MODE_PITCH:
  641. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  642. args->value = 0;
  643. break;
  644. case TEGRA_BO_TILING_MODE_TILED:
  645. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  646. args->value = 0;
  647. break;
  648. case TEGRA_BO_TILING_MODE_BLOCK:
  649. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  650. args->value = bo->tiling.value;
  651. break;
  652. default:
  653. err = -EINVAL;
  654. break;
  655. }
  656. drm_gem_object_put_unlocked(gem);
  657. return err;
  658. }
  659. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  660. struct drm_file *file)
  661. {
  662. struct drm_tegra_gem_set_flags *args = data;
  663. struct drm_gem_object *gem;
  664. struct tegra_bo *bo;
  665. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  666. return -EINVAL;
  667. gem = drm_gem_object_lookup(file, args->handle);
  668. if (!gem)
  669. return -ENOENT;
  670. bo = to_tegra_bo(gem);
  671. bo->flags = 0;
  672. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  673. bo->flags |= TEGRA_BO_BOTTOM_UP;
  674. drm_gem_object_put_unlocked(gem);
  675. return 0;
  676. }
  677. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  678. struct drm_file *file)
  679. {
  680. struct drm_tegra_gem_get_flags *args = data;
  681. struct drm_gem_object *gem;
  682. struct tegra_bo *bo;
  683. gem = drm_gem_object_lookup(file, args->handle);
  684. if (!gem)
  685. return -ENOENT;
  686. bo = to_tegra_bo(gem);
  687. args->flags = 0;
  688. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  689. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  690. drm_gem_object_put_unlocked(gem);
  691. return 0;
  692. }
  693. #endif
  694. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  695. #ifdef CONFIG_DRM_TEGRA_STAGING
  696. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
  697. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  698. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
  699. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  700. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  701. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  702. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  703. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  704. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  705. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  706. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  707. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  708. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  709. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  710. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  711. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  712. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  713. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  714. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  715. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  716. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  717. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  718. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  719. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  720. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  721. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  722. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  723. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  724. #endif
  725. };
  726. static const struct file_operations tegra_drm_fops = {
  727. .owner = THIS_MODULE,
  728. .open = drm_open,
  729. .release = drm_release,
  730. .unlocked_ioctl = drm_ioctl,
  731. .mmap = tegra_drm_mmap,
  732. .poll = drm_poll,
  733. .read = drm_read,
  734. .compat_ioctl = drm_compat_ioctl,
  735. .llseek = noop_llseek,
  736. };
  737. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  738. {
  739. struct tegra_drm_context *context = p;
  740. tegra_drm_context_free(context);
  741. return 0;
  742. }
  743. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  744. {
  745. struct tegra_drm_file *fpriv = file->driver_priv;
  746. mutex_lock(&fpriv->lock);
  747. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  748. mutex_unlock(&fpriv->lock);
  749. idr_destroy(&fpriv->contexts);
  750. mutex_destroy(&fpriv->lock);
  751. kfree(fpriv);
  752. }
  753. #ifdef CONFIG_DEBUG_FS
  754. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  755. {
  756. struct drm_info_node *node = (struct drm_info_node *)s->private;
  757. struct drm_device *drm = node->minor->dev;
  758. struct drm_framebuffer *fb;
  759. mutex_lock(&drm->mode_config.fb_lock);
  760. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  761. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  762. fb->base.id, fb->width, fb->height,
  763. fb->format->depth,
  764. fb->format->cpp[0] * 8,
  765. drm_framebuffer_read_refcount(fb));
  766. }
  767. mutex_unlock(&drm->mode_config.fb_lock);
  768. return 0;
  769. }
  770. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  771. {
  772. struct drm_info_node *node = (struct drm_info_node *)s->private;
  773. struct drm_device *drm = node->minor->dev;
  774. struct tegra_drm *tegra = drm->dev_private;
  775. struct drm_printer p = drm_seq_file_printer(s);
  776. if (tegra->domain) {
  777. mutex_lock(&tegra->mm_lock);
  778. drm_mm_print(&tegra->mm, &p);
  779. mutex_unlock(&tegra->mm_lock);
  780. }
  781. return 0;
  782. }
  783. static struct drm_info_list tegra_debugfs_list[] = {
  784. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  785. { "iova", tegra_debugfs_iova, 0 },
  786. };
  787. static int tegra_debugfs_init(struct drm_minor *minor)
  788. {
  789. return drm_debugfs_create_files(tegra_debugfs_list,
  790. ARRAY_SIZE(tegra_debugfs_list),
  791. minor->debugfs_root, minor);
  792. }
  793. #endif
  794. static struct drm_driver tegra_drm_driver = {
  795. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  796. DRIVER_ATOMIC | DRIVER_RENDER,
  797. .load = tegra_drm_load,
  798. .unload = tegra_drm_unload,
  799. .open = tegra_drm_open,
  800. .postclose = tegra_drm_postclose,
  801. .lastclose = drm_fb_helper_lastclose,
  802. #if defined(CONFIG_DEBUG_FS)
  803. .debugfs_init = tegra_debugfs_init,
  804. #endif
  805. .gem_free_object_unlocked = tegra_bo_free_object,
  806. .gem_vm_ops = &tegra_bo_vm_ops,
  807. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  808. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  809. .gem_prime_export = tegra_gem_prime_export,
  810. .gem_prime_import = tegra_gem_prime_import,
  811. .dumb_create = tegra_bo_dumb_create,
  812. .ioctls = tegra_drm_ioctls,
  813. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  814. .fops = &tegra_drm_fops,
  815. .name = DRIVER_NAME,
  816. .desc = DRIVER_DESC,
  817. .date = DRIVER_DATE,
  818. .major = DRIVER_MAJOR,
  819. .minor = DRIVER_MINOR,
  820. .patchlevel = DRIVER_PATCHLEVEL,
  821. };
  822. int tegra_drm_register_client(struct tegra_drm *tegra,
  823. struct tegra_drm_client *client)
  824. {
  825. mutex_lock(&tegra->clients_lock);
  826. list_add_tail(&client->list, &tegra->clients);
  827. mutex_unlock(&tegra->clients_lock);
  828. return 0;
  829. }
  830. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  831. struct tegra_drm_client *client)
  832. {
  833. mutex_lock(&tegra->clients_lock);
  834. list_del_init(&client->list);
  835. mutex_unlock(&tegra->clients_lock);
  836. return 0;
  837. }
  838. struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
  839. bool shared)
  840. {
  841. struct drm_device *drm = dev_get_drvdata(client->parent);
  842. struct tegra_drm *tegra = drm->dev_private;
  843. struct iommu_group *group = NULL;
  844. int err;
  845. if (tegra->domain) {
  846. group = iommu_group_get(client->dev);
  847. if (!group) {
  848. dev_err(client->dev, "failed to get IOMMU group\n");
  849. return ERR_PTR(-ENODEV);
  850. }
  851. if (!shared || (shared && (group != tegra->group))) {
  852. #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
  853. if (client->dev->archdata.mapping) {
  854. struct dma_iommu_mapping *mapping =
  855. to_dma_iommu_mapping(client->dev);
  856. arm_iommu_detach_device(client->dev);
  857. arm_iommu_release_mapping(mapping);
  858. }
  859. #endif
  860. err = iommu_attach_group(tegra->domain, group);
  861. if (err < 0) {
  862. iommu_group_put(group);
  863. return ERR_PTR(err);
  864. }
  865. if (shared && !tegra->group)
  866. tegra->group = group;
  867. }
  868. }
  869. return group;
  870. }
  871. void host1x_client_iommu_detach(struct host1x_client *client,
  872. struct iommu_group *group)
  873. {
  874. struct drm_device *drm = dev_get_drvdata(client->parent);
  875. struct tegra_drm *tegra = drm->dev_private;
  876. if (group) {
  877. if (group == tegra->group) {
  878. iommu_detach_group(tegra->domain, group);
  879. tegra->group = NULL;
  880. }
  881. iommu_group_put(group);
  882. }
  883. }
  884. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  885. {
  886. struct iova *alloc;
  887. void *virt;
  888. gfp_t gfp;
  889. int err;
  890. if (tegra->domain)
  891. size = iova_align(&tegra->carveout.domain, size);
  892. else
  893. size = PAGE_ALIGN(size);
  894. gfp = GFP_KERNEL | __GFP_ZERO;
  895. if (!tegra->domain) {
  896. /*
  897. * Many units only support 32-bit addresses, even on 64-bit
  898. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  899. * virtual address space, force allocations to be in the
  900. * lower 32-bit range.
  901. */
  902. gfp |= GFP_DMA;
  903. }
  904. virt = (void *)__get_free_pages(gfp, get_order(size));
  905. if (!virt)
  906. return ERR_PTR(-ENOMEM);
  907. if (!tegra->domain) {
  908. /*
  909. * If IOMMU is disabled, devices address physical memory
  910. * directly.
  911. */
  912. *dma = virt_to_phys(virt);
  913. return virt;
  914. }
  915. alloc = alloc_iova(&tegra->carveout.domain,
  916. size >> tegra->carveout.shift,
  917. tegra->carveout.limit, true);
  918. if (!alloc) {
  919. err = -EBUSY;
  920. goto free_pages;
  921. }
  922. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  923. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  924. size, IOMMU_READ | IOMMU_WRITE);
  925. if (err < 0)
  926. goto free_iova;
  927. return virt;
  928. free_iova:
  929. __free_iova(&tegra->carveout.domain, alloc);
  930. free_pages:
  931. free_pages((unsigned long)virt, get_order(size));
  932. return ERR_PTR(err);
  933. }
  934. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  935. dma_addr_t dma)
  936. {
  937. if (tegra->domain)
  938. size = iova_align(&tegra->carveout.domain, size);
  939. else
  940. size = PAGE_ALIGN(size);
  941. if (tegra->domain) {
  942. iommu_unmap(tegra->domain, dma, size);
  943. free_iova(&tegra->carveout.domain,
  944. iova_pfn(&tegra->carveout.domain, dma));
  945. }
  946. free_pages((unsigned long)virt, get_order(size));
  947. }
  948. static int host1x_drm_probe(struct host1x_device *dev)
  949. {
  950. struct drm_driver *driver = &tegra_drm_driver;
  951. struct drm_device *drm;
  952. int err;
  953. drm = drm_dev_alloc(driver, &dev->dev);
  954. if (IS_ERR(drm))
  955. return PTR_ERR(drm);
  956. dev_set_drvdata(&dev->dev, drm);
  957. err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
  958. if (err < 0)
  959. goto put;
  960. err = drm_dev_register(drm, 0);
  961. if (err < 0)
  962. goto put;
  963. return 0;
  964. put:
  965. drm_dev_put(drm);
  966. return err;
  967. }
  968. static int host1x_drm_remove(struct host1x_device *dev)
  969. {
  970. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  971. drm_dev_unregister(drm);
  972. drm_dev_put(drm);
  973. return 0;
  974. }
  975. #ifdef CONFIG_PM_SLEEP
  976. static int host1x_drm_suspend(struct device *dev)
  977. {
  978. struct drm_device *drm = dev_get_drvdata(dev);
  979. return drm_mode_config_helper_suspend(drm);
  980. }
  981. static int host1x_drm_resume(struct device *dev)
  982. {
  983. struct drm_device *drm = dev_get_drvdata(dev);
  984. return drm_mode_config_helper_resume(drm);
  985. }
  986. #endif
  987. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  988. host1x_drm_resume);
  989. static const struct of_device_id host1x_drm_subdevs[] = {
  990. { .compatible = "nvidia,tegra20-dc", },
  991. { .compatible = "nvidia,tegra20-hdmi", },
  992. { .compatible = "nvidia,tegra20-gr2d", },
  993. { .compatible = "nvidia,tegra20-gr3d", },
  994. { .compatible = "nvidia,tegra30-dc", },
  995. { .compatible = "nvidia,tegra30-hdmi", },
  996. { .compatible = "nvidia,tegra30-gr2d", },
  997. { .compatible = "nvidia,tegra30-gr3d", },
  998. { .compatible = "nvidia,tegra114-dsi", },
  999. { .compatible = "nvidia,tegra114-hdmi", },
  1000. { .compatible = "nvidia,tegra114-gr3d", },
  1001. { .compatible = "nvidia,tegra124-dc", },
  1002. { .compatible = "nvidia,tegra124-sor", },
  1003. { .compatible = "nvidia,tegra124-hdmi", },
  1004. { .compatible = "nvidia,tegra124-dsi", },
  1005. { .compatible = "nvidia,tegra124-vic", },
  1006. { .compatible = "nvidia,tegra132-dsi", },
  1007. { .compatible = "nvidia,tegra210-dc", },
  1008. { .compatible = "nvidia,tegra210-dsi", },
  1009. { .compatible = "nvidia,tegra210-sor", },
  1010. { .compatible = "nvidia,tegra210-sor1", },
  1011. { .compatible = "nvidia,tegra210-vic", },
  1012. { .compatible = "nvidia,tegra186-display", },
  1013. { .compatible = "nvidia,tegra186-dc", },
  1014. { .compatible = "nvidia,tegra186-sor", },
  1015. { .compatible = "nvidia,tegra186-sor1", },
  1016. { .compatible = "nvidia,tegra186-vic", },
  1017. { .compatible = "nvidia,tegra194-display", },
  1018. { .compatible = "nvidia,tegra194-dc", },
  1019. { .compatible = "nvidia,tegra194-sor", },
  1020. { /* sentinel */ }
  1021. };
  1022. static struct host1x_driver host1x_drm_driver = {
  1023. .driver = {
  1024. .name = "drm",
  1025. .pm = &host1x_drm_pm_ops,
  1026. },
  1027. .probe = host1x_drm_probe,
  1028. .remove = host1x_drm_remove,
  1029. .subdevs = host1x_drm_subdevs,
  1030. };
  1031. static struct platform_driver * const drivers[] = {
  1032. &tegra_display_hub_driver,
  1033. &tegra_dc_driver,
  1034. &tegra_hdmi_driver,
  1035. &tegra_dsi_driver,
  1036. &tegra_dpaux_driver,
  1037. &tegra_sor_driver,
  1038. &tegra_gr2d_driver,
  1039. &tegra_gr3d_driver,
  1040. &tegra_vic_driver,
  1041. };
  1042. static int __init host1x_drm_init(void)
  1043. {
  1044. int err;
  1045. err = host1x_driver_register(&host1x_drm_driver);
  1046. if (err < 0)
  1047. return err;
  1048. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1049. if (err < 0)
  1050. goto unregister_host1x;
  1051. return 0;
  1052. unregister_host1x:
  1053. host1x_driver_unregister(&host1x_drm_driver);
  1054. return err;
  1055. }
  1056. module_init(host1x_drm_init);
  1057. static void __exit host1x_drm_exit(void)
  1058. {
  1059. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1060. host1x_driver_unregister(&host1x_drm_driver);
  1061. }
  1062. module_exit(host1x_drm_exit);
  1063. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1064. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1065. MODULE_LICENSE("GPL v2");