drm.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328
  1. /*
  2. * Copyright (C) 2012 Avionic Design GmbH
  3. * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/host1x.h>
  11. #include <linux/idr.h>
  12. #include <linux/iommu.h>
  13. #include <drm/drm_atomic.h>
  14. #include <drm/drm_atomic_helper.h>
  15. #include "drm.h"
  16. #include "gem.h"
  17. #define DRIVER_NAME "tegra"
  18. #define DRIVER_DESC "NVIDIA Tegra graphics"
  19. #define DRIVER_DATE "20120330"
  20. #define DRIVER_MAJOR 0
  21. #define DRIVER_MINOR 0
  22. #define DRIVER_PATCHLEVEL 0
  23. #define CARVEOUT_SZ SZ_64M
  24. #define CDMA_GATHER_FETCHES_MAX_NB 16383
  25. struct tegra_drm_file {
  26. struct idr contexts;
  27. struct mutex lock;
  28. };
  29. static int tegra_atomic_check(struct drm_device *drm,
  30. struct drm_atomic_state *state)
  31. {
  32. int err;
  33. err = drm_atomic_helper_check(drm, state);
  34. if (err < 0)
  35. return err;
  36. return tegra_display_hub_atomic_check(drm, state);
  37. }
  38. static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
  39. .fb_create = tegra_fb_create,
  40. #ifdef CONFIG_DRM_FBDEV_EMULATION
  41. .output_poll_changed = drm_fb_helper_output_poll_changed,
  42. #endif
  43. .atomic_check = tegra_atomic_check,
  44. .atomic_commit = drm_atomic_helper_commit,
  45. };
  46. static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
  47. {
  48. struct drm_device *drm = old_state->dev;
  49. struct tegra_drm *tegra = drm->dev_private;
  50. if (tegra->hub) {
  51. drm_atomic_helper_commit_modeset_disables(drm, old_state);
  52. tegra_display_hub_atomic_commit(drm, old_state);
  53. drm_atomic_helper_commit_planes(drm, old_state, 0);
  54. drm_atomic_helper_commit_modeset_enables(drm, old_state);
  55. drm_atomic_helper_commit_hw_done(old_state);
  56. drm_atomic_helper_wait_for_vblanks(drm, old_state);
  57. drm_atomic_helper_cleanup_planes(drm, old_state);
  58. } else {
  59. drm_atomic_helper_commit_tail_rpm(old_state);
  60. }
  61. }
  62. static const struct drm_mode_config_helper_funcs
  63. tegra_drm_mode_config_helpers = {
  64. .atomic_commit_tail = tegra_atomic_commit_tail,
  65. };
  66. static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
  67. {
  68. struct host1x_device *device = to_host1x_device(drm->dev);
  69. struct tegra_drm *tegra;
  70. int err;
  71. tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
  72. if (!tegra)
  73. return -ENOMEM;
  74. if (iommu_present(&platform_bus_type)) {
  75. u64 carveout_start, carveout_end, gem_start, gem_end;
  76. struct iommu_domain_geometry *geometry;
  77. unsigned long order;
  78. tegra->domain = iommu_domain_alloc(&platform_bus_type);
  79. if (!tegra->domain) {
  80. err = -ENOMEM;
  81. goto free;
  82. }
  83. err = iova_cache_get();
  84. if (err < 0)
  85. goto domain;
  86. geometry = &tegra->domain->geometry;
  87. gem_start = geometry->aperture_start;
  88. gem_end = geometry->aperture_end - CARVEOUT_SZ;
  89. carveout_start = gem_end + 1;
  90. carveout_end = geometry->aperture_end;
  91. order = __ffs(tegra->domain->pgsize_bitmap);
  92. init_iova_domain(&tegra->carveout.domain, 1UL << order,
  93. carveout_start >> order);
  94. tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
  95. tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
  96. drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
  97. mutex_init(&tegra->mm_lock);
  98. DRM_DEBUG("IOMMU apertures:\n");
  99. DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
  100. DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
  101. carveout_end);
  102. }
  103. mutex_init(&tegra->clients_lock);
  104. INIT_LIST_HEAD(&tegra->clients);
  105. drm->dev_private = tegra;
  106. tegra->drm = drm;
  107. drm_mode_config_init(drm);
  108. drm->mode_config.min_width = 0;
  109. drm->mode_config.min_height = 0;
  110. drm->mode_config.max_width = 4096;
  111. drm->mode_config.max_height = 4096;
  112. drm->mode_config.allow_fb_modifiers = true;
  113. drm->mode_config.normalize_zpos = true;
  114. drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
  115. drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
  116. err = tegra_drm_fb_prepare(drm);
  117. if (err < 0)
  118. goto config;
  119. drm_kms_helper_poll_init(drm);
  120. err = host1x_device_init(device);
  121. if (err < 0)
  122. goto fbdev;
  123. if (tegra->hub) {
  124. err = tegra_display_hub_prepare(tegra->hub);
  125. if (err < 0)
  126. goto device;
  127. }
  128. /*
  129. * We don't use the drm_irq_install() helpers provided by the DRM
  130. * core, so we need to set this manually in order to allow the
  131. * DRM_IOCTL_WAIT_VBLANK to operate correctly.
  132. */
  133. drm->irq_enabled = true;
  134. /* syncpoints are used for full 32-bit hardware VBLANK counters */
  135. drm->max_vblank_count = 0xffffffff;
  136. err = drm_vblank_init(drm, drm->mode_config.num_crtc);
  137. if (err < 0)
  138. goto hub;
  139. drm_mode_config_reset(drm);
  140. err = tegra_drm_fb_init(drm);
  141. if (err < 0)
  142. goto hub;
  143. return 0;
  144. hub:
  145. if (tegra->hub)
  146. tegra_display_hub_cleanup(tegra->hub);
  147. device:
  148. host1x_device_exit(device);
  149. fbdev:
  150. drm_kms_helper_poll_fini(drm);
  151. tegra_drm_fb_free(drm);
  152. config:
  153. drm_mode_config_cleanup(drm);
  154. if (tegra->domain) {
  155. mutex_destroy(&tegra->mm_lock);
  156. drm_mm_takedown(&tegra->mm);
  157. put_iova_domain(&tegra->carveout.domain);
  158. iova_cache_put();
  159. }
  160. domain:
  161. if (tegra->domain)
  162. iommu_domain_free(tegra->domain);
  163. free:
  164. kfree(tegra);
  165. return err;
  166. }
  167. static void tegra_drm_unload(struct drm_device *drm)
  168. {
  169. struct host1x_device *device = to_host1x_device(drm->dev);
  170. struct tegra_drm *tegra = drm->dev_private;
  171. int err;
  172. drm_kms_helper_poll_fini(drm);
  173. tegra_drm_fb_exit(drm);
  174. drm_atomic_helper_shutdown(drm);
  175. drm_mode_config_cleanup(drm);
  176. err = host1x_device_exit(device);
  177. if (err < 0)
  178. return;
  179. if (tegra->domain) {
  180. mutex_destroy(&tegra->mm_lock);
  181. drm_mm_takedown(&tegra->mm);
  182. put_iova_domain(&tegra->carveout.domain);
  183. iova_cache_put();
  184. iommu_domain_free(tegra->domain);
  185. }
  186. kfree(tegra);
  187. }
  188. static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
  189. {
  190. struct tegra_drm_file *fpriv;
  191. fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
  192. if (!fpriv)
  193. return -ENOMEM;
  194. idr_init(&fpriv->contexts);
  195. mutex_init(&fpriv->lock);
  196. filp->driver_priv = fpriv;
  197. return 0;
  198. }
  199. static void tegra_drm_context_free(struct tegra_drm_context *context)
  200. {
  201. context->client->ops->close_channel(context);
  202. kfree(context);
  203. }
  204. static struct host1x_bo *
  205. host1x_bo_lookup(struct drm_file *file, u32 handle)
  206. {
  207. struct drm_gem_object *gem;
  208. struct tegra_bo *bo;
  209. gem = drm_gem_object_lookup(file, handle);
  210. if (!gem)
  211. return NULL;
  212. bo = to_tegra_bo(gem);
  213. return &bo->base;
  214. }
  215. static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
  216. struct drm_tegra_reloc __user *src,
  217. struct drm_device *drm,
  218. struct drm_file *file)
  219. {
  220. u32 cmdbuf, target;
  221. int err;
  222. err = get_user(cmdbuf, &src->cmdbuf.handle);
  223. if (err < 0)
  224. return err;
  225. err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
  226. if (err < 0)
  227. return err;
  228. err = get_user(target, &src->target.handle);
  229. if (err < 0)
  230. return err;
  231. err = get_user(dest->target.offset, &src->target.offset);
  232. if (err < 0)
  233. return err;
  234. err = get_user(dest->shift, &src->shift);
  235. if (err < 0)
  236. return err;
  237. dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
  238. if (!dest->cmdbuf.bo)
  239. return -ENOENT;
  240. dest->target.bo = host1x_bo_lookup(file, target);
  241. if (!dest->target.bo)
  242. return -ENOENT;
  243. return 0;
  244. }
  245. int tegra_drm_submit(struct tegra_drm_context *context,
  246. struct drm_tegra_submit *args, struct drm_device *drm,
  247. struct drm_file *file)
  248. {
  249. struct host1x_client *client = &context->client->base;
  250. unsigned int num_cmdbufs = args->num_cmdbufs;
  251. unsigned int num_relocs = args->num_relocs;
  252. struct drm_tegra_cmdbuf __user *user_cmdbufs;
  253. struct drm_tegra_reloc __user *user_relocs;
  254. struct drm_tegra_syncpt __user *user_syncpt;
  255. struct drm_tegra_syncpt syncpt;
  256. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  257. struct drm_gem_object **refs;
  258. struct host1x_syncpt *sp;
  259. struct host1x_job *job;
  260. unsigned int num_refs;
  261. int err;
  262. user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
  263. user_relocs = u64_to_user_ptr(args->relocs);
  264. user_syncpt = u64_to_user_ptr(args->syncpts);
  265. /* We don't yet support other than one syncpt_incr struct per submit */
  266. if (args->num_syncpts != 1)
  267. return -EINVAL;
  268. /* We don't yet support waitchks */
  269. if (args->num_waitchks != 0)
  270. return -EINVAL;
  271. job = host1x_job_alloc(context->channel, args->num_cmdbufs,
  272. args->num_relocs);
  273. if (!job)
  274. return -ENOMEM;
  275. job->num_relocs = args->num_relocs;
  276. job->client = client;
  277. job->class = client->class;
  278. job->serialize = true;
  279. /*
  280. * Track referenced BOs so that they can be unreferenced after the
  281. * submission is complete.
  282. */
  283. num_refs = num_cmdbufs + num_relocs * 2;
  284. refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
  285. if (!refs) {
  286. err = -ENOMEM;
  287. goto put;
  288. }
  289. /* reuse as an iterator later */
  290. num_refs = 0;
  291. while (num_cmdbufs) {
  292. struct drm_tegra_cmdbuf cmdbuf;
  293. struct host1x_bo *bo;
  294. struct tegra_bo *obj;
  295. u64 offset;
  296. if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
  297. err = -EFAULT;
  298. goto fail;
  299. }
  300. /*
  301. * The maximum number of CDMA gather fetches is 16383, a higher
  302. * value means the words count is malformed.
  303. */
  304. if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
  305. err = -EINVAL;
  306. goto fail;
  307. }
  308. bo = host1x_bo_lookup(file, cmdbuf.handle);
  309. if (!bo) {
  310. err = -ENOENT;
  311. goto fail;
  312. }
  313. offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
  314. obj = host1x_to_tegra_bo(bo);
  315. refs[num_refs++] = &obj->gem;
  316. /*
  317. * Gather buffer base address must be 4-bytes aligned,
  318. * unaligned offset is malformed and cause commands stream
  319. * corruption on the buffer address relocation.
  320. */
  321. if (offset & 3 || offset >= obj->gem.size) {
  322. err = -EINVAL;
  323. goto fail;
  324. }
  325. host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
  326. num_cmdbufs--;
  327. user_cmdbufs++;
  328. }
  329. /* copy and resolve relocations from submit */
  330. while (num_relocs--) {
  331. struct host1x_reloc *reloc;
  332. struct tegra_bo *obj;
  333. err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
  334. &user_relocs[num_relocs], drm,
  335. file);
  336. if (err < 0)
  337. goto fail;
  338. reloc = &job->relocs[num_relocs];
  339. obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
  340. refs[num_refs++] = &obj->gem;
  341. /*
  342. * The unaligned cmdbuf offset will cause an unaligned write
  343. * during of the relocations patching, corrupting the commands
  344. * stream.
  345. */
  346. if (reloc->cmdbuf.offset & 3 ||
  347. reloc->cmdbuf.offset >= obj->gem.size) {
  348. err = -EINVAL;
  349. goto fail;
  350. }
  351. obj = host1x_to_tegra_bo(reloc->target.bo);
  352. refs[num_refs++] = &obj->gem;
  353. if (reloc->target.offset >= obj->gem.size) {
  354. err = -EINVAL;
  355. goto fail;
  356. }
  357. }
  358. if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
  359. err = -EFAULT;
  360. goto fail;
  361. }
  362. /* check whether syncpoint ID is valid */
  363. sp = host1x_syncpt_get(host1x, syncpt.id);
  364. if (!sp) {
  365. err = -ENOENT;
  366. goto fail;
  367. }
  368. job->is_addr_reg = context->client->ops->is_addr_reg;
  369. job->is_valid_class = context->client->ops->is_valid_class;
  370. job->syncpt_incrs = syncpt.incrs;
  371. job->syncpt_id = syncpt.id;
  372. job->timeout = 10000;
  373. if (args->timeout && args->timeout < 10000)
  374. job->timeout = args->timeout;
  375. err = host1x_job_pin(job, context->client->base.dev);
  376. if (err)
  377. goto fail;
  378. err = host1x_job_submit(job);
  379. if (err) {
  380. host1x_job_unpin(job);
  381. goto fail;
  382. }
  383. args->fence = job->syncpt_end;
  384. fail:
  385. while (num_refs--)
  386. drm_gem_object_put_unlocked(refs[num_refs]);
  387. kfree(refs);
  388. put:
  389. host1x_job_put(job);
  390. return err;
  391. }
  392. #ifdef CONFIG_DRM_TEGRA_STAGING
  393. static int tegra_gem_create(struct drm_device *drm, void *data,
  394. struct drm_file *file)
  395. {
  396. struct drm_tegra_gem_create *args = data;
  397. struct tegra_bo *bo;
  398. bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
  399. &args->handle);
  400. if (IS_ERR(bo))
  401. return PTR_ERR(bo);
  402. return 0;
  403. }
  404. static int tegra_gem_mmap(struct drm_device *drm, void *data,
  405. struct drm_file *file)
  406. {
  407. struct drm_tegra_gem_mmap *args = data;
  408. struct drm_gem_object *gem;
  409. struct tegra_bo *bo;
  410. gem = drm_gem_object_lookup(file, args->handle);
  411. if (!gem)
  412. return -EINVAL;
  413. bo = to_tegra_bo(gem);
  414. args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
  415. drm_gem_object_put_unlocked(gem);
  416. return 0;
  417. }
  418. static int tegra_syncpt_read(struct drm_device *drm, void *data,
  419. struct drm_file *file)
  420. {
  421. struct host1x *host = dev_get_drvdata(drm->dev->parent);
  422. struct drm_tegra_syncpt_read *args = data;
  423. struct host1x_syncpt *sp;
  424. sp = host1x_syncpt_get(host, args->id);
  425. if (!sp)
  426. return -EINVAL;
  427. args->value = host1x_syncpt_read_min(sp);
  428. return 0;
  429. }
  430. static int tegra_syncpt_incr(struct drm_device *drm, void *data,
  431. struct drm_file *file)
  432. {
  433. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  434. struct drm_tegra_syncpt_incr *args = data;
  435. struct host1x_syncpt *sp;
  436. sp = host1x_syncpt_get(host1x, args->id);
  437. if (!sp)
  438. return -EINVAL;
  439. return host1x_syncpt_incr(sp);
  440. }
  441. static int tegra_syncpt_wait(struct drm_device *drm, void *data,
  442. struct drm_file *file)
  443. {
  444. struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
  445. struct drm_tegra_syncpt_wait *args = data;
  446. struct host1x_syncpt *sp;
  447. sp = host1x_syncpt_get(host1x, args->id);
  448. if (!sp)
  449. return -EINVAL;
  450. return host1x_syncpt_wait(sp, args->thresh,
  451. msecs_to_jiffies(args->timeout),
  452. &args->value);
  453. }
  454. static int tegra_client_open(struct tegra_drm_file *fpriv,
  455. struct tegra_drm_client *client,
  456. struct tegra_drm_context *context)
  457. {
  458. int err;
  459. err = client->ops->open_channel(client, context);
  460. if (err < 0)
  461. return err;
  462. err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
  463. if (err < 0) {
  464. client->ops->close_channel(context);
  465. return err;
  466. }
  467. context->client = client;
  468. context->id = err;
  469. return 0;
  470. }
  471. static int tegra_open_channel(struct drm_device *drm, void *data,
  472. struct drm_file *file)
  473. {
  474. struct tegra_drm_file *fpriv = file->driver_priv;
  475. struct tegra_drm *tegra = drm->dev_private;
  476. struct drm_tegra_open_channel *args = data;
  477. struct tegra_drm_context *context;
  478. struct tegra_drm_client *client;
  479. int err = -ENODEV;
  480. context = kzalloc(sizeof(*context), GFP_KERNEL);
  481. if (!context)
  482. return -ENOMEM;
  483. mutex_lock(&fpriv->lock);
  484. list_for_each_entry(client, &tegra->clients, list)
  485. if (client->base.class == args->client) {
  486. err = tegra_client_open(fpriv, client, context);
  487. if (err < 0)
  488. break;
  489. args->context = context->id;
  490. break;
  491. }
  492. if (err < 0)
  493. kfree(context);
  494. mutex_unlock(&fpriv->lock);
  495. return err;
  496. }
  497. static int tegra_close_channel(struct drm_device *drm, void *data,
  498. struct drm_file *file)
  499. {
  500. struct tegra_drm_file *fpriv = file->driver_priv;
  501. struct drm_tegra_close_channel *args = data;
  502. struct tegra_drm_context *context;
  503. int err = 0;
  504. mutex_lock(&fpriv->lock);
  505. context = idr_find(&fpriv->contexts, args->context);
  506. if (!context) {
  507. err = -EINVAL;
  508. goto unlock;
  509. }
  510. idr_remove(&fpriv->contexts, context->id);
  511. tegra_drm_context_free(context);
  512. unlock:
  513. mutex_unlock(&fpriv->lock);
  514. return err;
  515. }
  516. static int tegra_get_syncpt(struct drm_device *drm, void *data,
  517. struct drm_file *file)
  518. {
  519. struct tegra_drm_file *fpriv = file->driver_priv;
  520. struct drm_tegra_get_syncpt *args = data;
  521. struct tegra_drm_context *context;
  522. struct host1x_syncpt *syncpt;
  523. int err = 0;
  524. mutex_lock(&fpriv->lock);
  525. context = idr_find(&fpriv->contexts, args->context);
  526. if (!context) {
  527. err = -ENODEV;
  528. goto unlock;
  529. }
  530. if (args->index >= context->client->base.num_syncpts) {
  531. err = -EINVAL;
  532. goto unlock;
  533. }
  534. syncpt = context->client->base.syncpts[args->index];
  535. args->id = host1x_syncpt_id(syncpt);
  536. unlock:
  537. mutex_unlock(&fpriv->lock);
  538. return err;
  539. }
  540. static int tegra_submit(struct drm_device *drm, void *data,
  541. struct drm_file *file)
  542. {
  543. struct tegra_drm_file *fpriv = file->driver_priv;
  544. struct drm_tegra_submit *args = data;
  545. struct tegra_drm_context *context;
  546. int err;
  547. mutex_lock(&fpriv->lock);
  548. context = idr_find(&fpriv->contexts, args->context);
  549. if (!context) {
  550. err = -ENODEV;
  551. goto unlock;
  552. }
  553. err = context->client->ops->submit(context, args, drm, file);
  554. unlock:
  555. mutex_unlock(&fpriv->lock);
  556. return err;
  557. }
  558. static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
  559. struct drm_file *file)
  560. {
  561. struct tegra_drm_file *fpriv = file->driver_priv;
  562. struct drm_tegra_get_syncpt_base *args = data;
  563. struct tegra_drm_context *context;
  564. struct host1x_syncpt_base *base;
  565. struct host1x_syncpt *syncpt;
  566. int err = 0;
  567. mutex_lock(&fpriv->lock);
  568. context = idr_find(&fpriv->contexts, args->context);
  569. if (!context) {
  570. err = -ENODEV;
  571. goto unlock;
  572. }
  573. if (args->syncpt >= context->client->base.num_syncpts) {
  574. err = -EINVAL;
  575. goto unlock;
  576. }
  577. syncpt = context->client->base.syncpts[args->syncpt];
  578. base = host1x_syncpt_get_base(syncpt);
  579. if (!base) {
  580. err = -ENXIO;
  581. goto unlock;
  582. }
  583. args->id = host1x_syncpt_base_id(base);
  584. unlock:
  585. mutex_unlock(&fpriv->lock);
  586. return err;
  587. }
  588. static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
  589. struct drm_file *file)
  590. {
  591. struct drm_tegra_gem_set_tiling *args = data;
  592. enum tegra_bo_tiling_mode mode;
  593. struct drm_gem_object *gem;
  594. unsigned long value = 0;
  595. struct tegra_bo *bo;
  596. switch (args->mode) {
  597. case DRM_TEGRA_GEM_TILING_MODE_PITCH:
  598. mode = TEGRA_BO_TILING_MODE_PITCH;
  599. if (args->value != 0)
  600. return -EINVAL;
  601. break;
  602. case DRM_TEGRA_GEM_TILING_MODE_TILED:
  603. mode = TEGRA_BO_TILING_MODE_TILED;
  604. if (args->value != 0)
  605. return -EINVAL;
  606. break;
  607. case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
  608. mode = TEGRA_BO_TILING_MODE_BLOCK;
  609. if (args->value > 5)
  610. return -EINVAL;
  611. value = args->value;
  612. break;
  613. default:
  614. return -EINVAL;
  615. }
  616. gem = drm_gem_object_lookup(file, args->handle);
  617. if (!gem)
  618. return -ENOENT;
  619. bo = to_tegra_bo(gem);
  620. bo->tiling.mode = mode;
  621. bo->tiling.value = value;
  622. drm_gem_object_put_unlocked(gem);
  623. return 0;
  624. }
  625. static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
  626. struct drm_file *file)
  627. {
  628. struct drm_tegra_gem_get_tiling *args = data;
  629. struct drm_gem_object *gem;
  630. struct tegra_bo *bo;
  631. int err = 0;
  632. gem = drm_gem_object_lookup(file, args->handle);
  633. if (!gem)
  634. return -ENOENT;
  635. bo = to_tegra_bo(gem);
  636. switch (bo->tiling.mode) {
  637. case TEGRA_BO_TILING_MODE_PITCH:
  638. args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
  639. args->value = 0;
  640. break;
  641. case TEGRA_BO_TILING_MODE_TILED:
  642. args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
  643. args->value = 0;
  644. break;
  645. case TEGRA_BO_TILING_MODE_BLOCK:
  646. args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
  647. args->value = bo->tiling.value;
  648. break;
  649. default:
  650. err = -EINVAL;
  651. break;
  652. }
  653. drm_gem_object_put_unlocked(gem);
  654. return err;
  655. }
  656. static int tegra_gem_set_flags(struct drm_device *drm, void *data,
  657. struct drm_file *file)
  658. {
  659. struct drm_tegra_gem_set_flags *args = data;
  660. struct drm_gem_object *gem;
  661. struct tegra_bo *bo;
  662. if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
  663. return -EINVAL;
  664. gem = drm_gem_object_lookup(file, args->handle);
  665. if (!gem)
  666. return -ENOENT;
  667. bo = to_tegra_bo(gem);
  668. bo->flags = 0;
  669. if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
  670. bo->flags |= TEGRA_BO_BOTTOM_UP;
  671. drm_gem_object_put_unlocked(gem);
  672. return 0;
  673. }
  674. static int tegra_gem_get_flags(struct drm_device *drm, void *data,
  675. struct drm_file *file)
  676. {
  677. struct drm_tegra_gem_get_flags *args = data;
  678. struct drm_gem_object *gem;
  679. struct tegra_bo *bo;
  680. gem = drm_gem_object_lookup(file, args->handle);
  681. if (!gem)
  682. return -ENOENT;
  683. bo = to_tegra_bo(gem);
  684. args->flags = 0;
  685. if (bo->flags & TEGRA_BO_BOTTOM_UP)
  686. args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
  687. drm_gem_object_put_unlocked(gem);
  688. return 0;
  689. }
  690. #endif
  691. static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
  692. #ifdef CONFIG_DRM_TEGRA_STAGING
  693. DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
  694. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  695. DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
  696. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  697. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
  698. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  699. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
  700. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  701. DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
  702. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  703. DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
  704. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  705. DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
  706. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  707. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
  708. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  709. DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
  710. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  711. DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
  712. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  713. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
  714. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  715. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
  716. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  717. DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
  718. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  719. DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
  720. DRM_UNLOCKED | DRM_RENDER_ALLOW),
  721. #endif
  722. };
  723. static const struct file_operations tegra_drm_fops = {
  724. .owner = THIS_MODULE,
  725. .open = drm_open,
  726. .release = drm_release,
  727. .unlocked_ioctl = drm_ioctl,
  728. .mmap = tegra_drm_mmap,
  729. .poll = drm_poll,
  730. .read = drm_read,
  731. .compat_ioctl = drm_compat_ioctl,
  732. .llseek = noop_llseek,
  733. };
  734. static int tegra_drm_context_cleanup(int id, void *p, void *data)
  735. {
  736. struct tegra_drm_context *context = p;
  737. tegra_drm_context_free(context);
  738. return 0;
  739. }
  740. static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
  741. {
  742. struct tegra_drm_file *fpriv = file->driver_priv;
  743. mutex_lock(&fpriv->lock);
  744. idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
  745. mutex_unlock(&fpriv->lock);
  746. idr_destroy(&fpriv->contexts);
  747. mutex_destroy(&fpriv->lock);
  748. kfree(fpriv);
  749. }
  750. #ifdef CONFIG_DEBUG_FS
  751. static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
  752. {
  753. struct drm_info_node *node = (struct drm_info_node *)s->private;
  754. struct drm_device *drm = node->minor->dev;
  755. struct drm_framebuffer *fb;
  756. mutex_lock(&drm->mode_config.fb_lock);
  757. list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
  758. seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
  759. fb->base.id, fb->width, fb->height,
  760. fb->format->depth,
  761. fb->format->cpp[0] * 8,
  762. drm_framebuffer_read_refcount(fb));
  763. }
  764. mutex_unlock(&drm->mode_config.fb_lock);
  765. return 0;
  766. }
  767. static int tegra_debugfs_iova(struct seq_file *s, void *data)
  768. {
  769. struct drm_info_node *node = (struct drm_info_node *)s->private;
  770. struct drm_device *drm = node->minor->dev;
  771. struct tegra_drm *tegra = drm->dev_private;
  772. struct drm_printer p = drm_seq_file_printer(s);
  773. if (tegra->domain) {
  774. mutex_lock(&tegra->mm_lock);
  775. drm_mm_print(&tegra->mm, &p);
  776. mutex_unlock(&tegra->mm_lock);
  777. }
  778. return 0;
  779. }
  780. static struct drm_info_list tegra_debugfs_list[] = {
  781. { "framebuffers", tegra_debugfs_framebuffers, 0 },
  782. { "iova", tegra_debugfs_iova, 0 },
  783. };
  784. static int tegra_debugfs_init(struct drm_minor *minor)
  785. {
  786. return drm_debugfs_create_files(tegra_debugfs_list,
  787. ARRAY_SIZE(tegra_debugfs_list),
  788. minor->debugfs_root, minor);
  789. }
  790. #endif
  791. static struct drm_driver tegra_drm_driver = {
  792. .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
  793. DRIVER_ATOMIC | DRIVER_RENDER,
  794. .load = tegra_drm_load,
  795. .unload = tegra_drm_unload,
  796. .open = tegra_drm_open,
  797. .postclose = tegra_drm_postclose,
  798. .lastclose = drm_fb_helper_lastclose,
  799. #if defined(CONFIG_DEBUG_FS)
  800. .debugfs_init = tegra_debugfs_init,
  801. #endif
  802. .gem_free_object_unlocked = tegra_bo_free_object,
  803. .gem_vm_ops = &tegra_bo_vm_ops,
  804. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  805. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  806. .gem_prime_export = tegra_gem_prime_export,
  807. .gem_prime_import = tegra_gem_prime_import,
  808. .dumb_create = tegra_bo_dumb_create,
  809. .ioctls = tegra_drm_ioctls,
  810. .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
  811. .fops = &tegra_drm_fops,
  812. .name = DRIVER_NAME,
  813. .desc = DRIVER_DESC,
  814. .date = DRIVER_DATE,
  815. .major = DRIVER_MAJOR,
  816. .minor = DRIVER_MINOR,
  817. .patchlevel = DRIVER_PATCHLEVEL,
  818. };
  819. int tegra_drm_register_client(struct tegra_drm *tegra,
  820. struct tegra_drm_client *client)
  821. {
  822. mutex_lock(&tegra->clients_lock);
  823. list_add_tail(&client->list, &tegra->clients);
  824. mutex_unlock(&tegra->clients_lock);
  825. return 0;
  826. }
  827. int tegra_drm_unregister_client(struct tegra_drm *tegra,
  828. struct tegra_drm_client *client)
  829. {
  830. mutex_lock(&tegra->clients_lock);
  831. list_del_init(&client->list);
  832. mutex_unlock(&tegra->clients_lock);
  833. return 0;
  834. }
  835. struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
  836. bool shared)
  837. {
  838. struct drm_device *drm = dev_get_drvdata(client->parent);
  839. struct tegra_drm *tegra = drm->dev_private;
  840. struct iommu_group *group = NULL;
  841. int err;
  842. if (tegra->domain) {
  843. group = iommu_group_get(client->dev);
  844. if (!group) {
  845. dev_err(client->dev, "failed to get IOMMU group\n");
  846. return ERR_PTR(-ENODEV);
  847. }
  848. if (!shared || (shared && (group != tegra->group))) {
  849. err = iommu_attach_group(tegra->domain, group);
  850. if (err < 0) {
  851. iommu_group_put(group);
  852. return ERR_PTR(err);
  853. }
  854. if (shared && !tegra->group)
  855. tegra->group = group;
  856. }
  857. }
  858. return group;
  859. }
  860. void host1x_client_iommu_detach(struct host1x_client *client,
  861. struct iommu_group *group)
  862. {
  863. struct drm_device *drm = dev_get_drvdata(client->parent);
  864. struct tegra_drm *tegra = drm->dev_private;
  865. if (group) {
  866. if (group == tegra->group) {
  867. iommu_detach_group(tegra->domain, group);
  868. tegra->group = NULL;
  869. }
  870. iommu_group_put(group);
  871. }
  872. }
  873. void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
  874. {
  875. struct iova *alloc;
  876. void *virt;
  877. gfp_t gfp;
  878. int err;
  879. if (tegra->domain)
  880. size = iova_align(&tegra->carveout.domain, size);
  881. else
  882. size = PAGE_ALIGN(size);
  883. gfp = GFP_KERNEL | __GFP_ZERO;
  884. if (!tegra->domain) {
  885. /*
  886. * Many units only support 32-bit addresses, even on 64-bit
  887. * SoCs. If there is no IOMMU to translate into a 32-bit IO
  888. * virtual address space, force allocations to be in the
  889. * lower 32-bit range.
  890. */
  891. gfp |= GFP_DMA;
  892. }
  893. virt = (void *)__get_free_pages(gfp, get_order(size));
  894. if (!virt)
  895. return ERR_PTR(-ENOMEM);
  896. if (!tegra->domain) {
  897. /*
  898. * If IOMMU is disabled, devices address physical memory
  899. * directly.
  900. */
  901. *dma = virt_to_phys(virt);
  902. return virt;
  903. }
  904. alloc = alloc_iova(&tegra->carveout.domain,
  905. size >> tegra->carveout.shift,
  906. tegra->carveout.limit, true);
  907. if (!alloc) {
  908. err = -EBUSY;
  909. goto free_pages;
  910. }
  911. *dma = iova_dma_addr(&tegra->carveout.domain, alloc);
  912. err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
  913. size, IOMMU_READ | IOMMU_WRITE);
  914. if (err < 0)
  915. goto free_iova;
  916. return virt;
  917. free_iova:
  918. __free_iova(&tegra->carveout.domain, alloc);
  919. free_pages:
  920. free_pages((unsigned long)virt, get_order(size));
  921. return ERR_PTR(err);
  922. }
  923. void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
  924. dma_addr_t dma)
  925. {
  926. if (tegra->domain)
  927. size = iova_align(&tegra->carveout.domain, size);
  928. else
  929. size = PAGE_ALIGN(size);
  930. if (tegra->domain) {
  931. iommu_unmap(tegra->domain, dma, size);
  932. free_iova(&tegra->carveout.domain,
  933. iova_pfn(&tegra->carveout.domain, dma));
  934. }
  935. free_pages((unsigned long)virt, get_order(size));
  936. }
  937. static int host1x_drm_probe(struct host1x_device *dev)
  938. {
  939. struct drm_driver *driver = &tegra_drm_driver;
  940. struct drm_device *drm;
  941. int err;
  942. drm = drm_dev_alloc(driver, &dev->dev);
  943. if (IS_ERR(drm))
  944. return PTR_ERR(drm);
  945. dev_set_drvdata(&dev->dev, drm);
  946. err = drm_dev_register(drm, 0);
  947. if (err < 0)
  948. goto unref;
  949. return 0;
  950. unref:
  951. drm_dev_unref(drm);
  952. return err;
  953. }
  954. static int host1x_drm_remove(struct host1x_device *dev)
  955. {
  956. struct drm_device *drm = dev_get_drvdata(&dev->dev);
  957. drm_dev_unregister(drm);
  958. drm_dev_unref(drm);
  959. return 0;
  960. }
  961. #ifdef CONFIG_PM_SLEEP
  962. static int host1x_drm_suspend(struct device *dev)
  963. {
  964. struct drm_device *drm = dev_get_drvdata(dev);
  965. struct tegra_drm *tegra = drm->dev_private;
  966. drm_kms_helper_poll_disable(drm);
  967. tegra_drm_fb_suspend(drm);
  968. tegra->state = drm_atomic_helper_suspend(drm);
  969. if (IS_ERR(tegra->state)) {
  970. tegra_drm_fb_resume(drm);
  971. drm_kms_helper_poll_enable(drm);
  972. return PTR_ERR(tegra->state);
  973. }
  974. return 0;
  975. }
  976. static int host1x_drm_resume(struct device *dev)
  977. {
  978. struct drm_device *drm = dev_get_drvdata(dev);
  979. struct tegra_drm *tegra = drm->dev_private;
  980. drm_atomic_helper_resume(drm, tegra->state);
  981. tegra_drm_fb_resume(drm);
  982. drm_kms_helper_poll_enable(drm);
  983. return 0;
  984. }
  985. #endif
  986. static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
  987. host1x_drm_resume);
  988. static const struct of_device_id host1x_drm_subdevs[] = {
  989. { .compatible = "nvidia,tegra20-dc", },
  990. { .compatible = "nvidia,tegra20-hdmi", },
  991. { .compatible = "nvidia,tegra20-gr2d", },
  992. { .compatible = "nvidia,tegra20-gr3d", },
  993. { .compatible = "nvidia,tegra30-dc", },
  994. { .compatible = "nvidia,tegra30-hdmi", },
  995. { .compatible = "nvidia,tegra30-gr2d", },
  996. { .compatible = "nvidia,tegra30-gr3d", },
  997. { .compatible = "nvidia,tegra114-dsi", },
  998. { .compatible = "nvidia,tegra114-hdmi", },
  999. { .compatible = "nvidia,tegra114-gr3d", },
  1000. { .compatible = "nvidia,tegra124-dc", },
  1001. { .compatible = "nvidia,tegra124-sor", },
  1002. { .compatible = "nvidia,tegra124-hdmi", },
  1003. { .compatible = "nvidia,tegra124-dsi", },
  1004. { .compatible = "nvidia,tegra124-vic", },
  1005. { .compatible = "nvidia,tegra132-dsi", },
  1006. { .compatible = "nvidia,tegra210-dc", },
  1007. { .compatible = "nvidia,tegra210-dsi", },
  1008. { .compatible = "nvidia,tegra210-sor", },
  1009. { .compatible = "nvidia,tegra210-sor1", },
  1010. { .compatible = "nvidia,tegra210-vic", },
  1011. { .compatible = "nvidia,tegra186-display", },
  1012. { .compatible = "nvidia,tegra186-dc", },
  1013. { .compatible = "nvidia,tegra186-sor", },
  1014. { .compatible = "nvidia,tegra186-sor1", },
  1015. { .compatible = "nvidia,tegra186-vic", },
  1016. { /* sentinel */ }
  1017. };
  1018. static struct host1x_driver host1x_drm_driver = {
  1019. .driver = {
  1020. .name = "drm",
  1021. .pm = &host1x_drm_pm_ops,
  1022. },
  1023. .probe = host1x_drm_probe,
  1024. .remove = host1x_drm_remove,
  1025. .subdevs = host1x_drm_subdevs,
  1026. };
  1027. static struct platform_driver * const drivers[] = {
  1028. &tegra_display_hub_driver,
  1029. &tegra_dc_driver,
  1030. &tegra_hdmi_driver,
  1031. &tegra_dsi_driver,
  1032. &tegra_dpaux_driver,
  1033. &tegra_sor_driver,
  1034. &tegra_gr2d_driver,
  1035. &tegra_gr3d_driver,
  1036. &tegra_vic_driver,
  1037. };
  1038. static int __init host1x_drm_init(void)
  1039. {
  1040. int err;
  1041. err = host1x_driver_register(&host1x_drm_driver);
  1042. if (err < 0)
  1043. return err;
  1044. err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1045. if (err < 0)
  1046. goto unregister_host1x;
  1047. return 0;
  1048. unregister_host1x:
  1049. host1x_driver_unregister(&host1x_drm_driver);
  1050. return err;
  1051. }
  1052. module_init(host1x_drm_init);
  1053. static void __exit host1x_drm_exit(void)
  1054. {
  1055. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1056. host1x_driver_unregister(&host1x_drm_driver);
  1057. }
  1058. module_exit(host1x_drm_exit);
  1059. MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
  1060. MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
  1061. MODULE_LICENSE("GPL v2");