exynos_drm_ipp.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912
  1. /*
  2. * Copyright (C) 2012 Samsung Electronics Co.Ltd
  3. * Authors:
  4. * Eunchul Kim <chulspro.kim@samsung.com>
  5. * Jinyoung Jeon <jy0.jeon@samsung.com>
  6. * Sangmin Lee <lsmin.lee@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/types.h>
  17. #include <linux/clk.h>
  18. #include <linux/pm_runtime.h>
  19. #include <drm/drmP.h>
  20. #include <drm/exynos_drm.h>
  21. #include "exynos_drm_drv.h"
  22. #include "exynos_drm_gem.h"
  23. #include "exynos_drm_ipp.h"
  24. #include "exynos_drm_iommu.h"
  25. /*
  26. * IPP stands for Image Post Processing and
  27. * supports image scaler/rotator and input/output DMA operations.
  28. * using FIMC, GSC, Rotator, so on.
  29. * IPP is integration device driver of same attribute h/w
  30. */
  31. /*
  32. * TODO
  33. * 1. expand command control id.
  34. * 2. integrate property and config.
  35. * 3. removed send_event id check routine.
  36. * 4. compare send_event id if needed.
  37. * 5. free subdrv_remove notifier callback list if needed.
  38. * 6. need to check subdrv_open about multi-open.
  39. * 7. need to power_on implement power and sysmmu ctrl.
  40. */
  41. #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
  42. #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
  43. /* platform device pointer for ipp device. */
  44. static struct platform_device *exynos_drm_ipp_pdev;
  45. /*
  46. * A structure of event.
  47. *
  48. * @base: base of event.
  49. * @event: ipp event.
  50. */
  51. struct drm_exynos_ipp_send_event {
  52. struct drm_pending_event base;
  53. struct drm_exynos_ipp_event event;
  54. };
  55. /*
  56. * A structure of memory node.
  57. *
  58. * @list: list head to memory queue information.
  59. * @ops_id: id of operations.
  60. * @prop_id: id of property.
  61. * @buf_id: id of buffer.
  62. * @buf_info: gem objects and dma address, size.
  63. * @filp: a pointer to drm_file.
  64. */
  65. struct drm_exynos_ipp_mem_node {
  66. struct list_head list;
  67. enum drm_exynos_ops_id ops_id;
  68. u32 prop_id;
  69. u32 buf_id;
  70. struct drm_exynos_ipp_buf_info buf_info;
  71. struct drm_file *filp;
  72. };
  73. /*
  74. * A structure of ipp context.
  75. *
  76. * @subdrv: prepare initialization using subdrv.
  77. * @ipp_lock: lock for synchronization of access to ipp_idr.
  78. * @prop_lock: lock for synchronization of access to prop_idr.
  79. * @ipp_idr: ipp driver idr.
  80. * @prop_idr: property idr.
  81. * @event_workq: event work queue.
  82. * @cmd_workq: command work queue.
  83. */
  84. struct ipp_context {
  85. struct exynos_drm_subdrv subdrv;
  86. struct mutex ipp_lock;
  87. struct mutex prop_lock;
  88. struct idr ipp_idr;
  89. struct idr prop_idr;
  90. struct workqueue_struct *event_workq;
  91. struct workqueue_struct *cmd_workq;
  92. };
  93. static LIST_HEAD(exynos_drm_ippdrv_list);
  94. static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
  95. static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
  96. int exynos_platform_device_ipp_register(void)
  97. {
  98. struct platform_device *pdev;
  99. if (exynos_drm_ipp_pdev)
  100. return -EEXIST;
  101. pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
  102. if (IS_ERR(pdev))
  103. return PTR_ERR(pdev);
  104. exynos_drm_ipp_pdev = pdev;
  105. return 0;
  106. }
  107. void exynos_platform_device_ipp_unregister(void)
  108. {
  109. if (exynos_drm_ipp_pdev) {
  110. platform_device_unregister(exynos_drm_ipp_pdev);
  111. exynos_drm_ipp_pdev = NULL;
  112. }
  113. }
  114. int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
  115. {
  116. mutex_lock(&exynos_drm_ippdrv_lock);
  117. list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
  118. mutex_unlock(&exynos_drm_ippdrv_lock);
  119. return 0;
  120. }
  121. int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
  122. {
  123. mutex_lock(&exynos_drm_ippdrv_lock);
  124. list_del(&ippdrv->drv_list);
  125. mutex_unlock(&exynos_drm_ippdrv_lock);
  126. return 0;
  127. }
  128. static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
  129. u32 *idp)
  130. {
  131. int ret;
  132. /* do the allocation under our mutexlock */
  133. mutex_lock(lock);
  134. ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
  135. mutex_unlock(lock);
  136. if (ret < 0)
  137. return ret;
  138. *idp = ret;
  139. return 0;
  140. }
  141. static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
  142. {
  143. mutex_lock(lock);
  144. idr_remove(id_idr, id);
  145. mutex_unlock(lock);
  146. }
  147. static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
  148. {
  149. void *obj;
  150. mutex_lock(lock);
  151. obj = idr_find(id_idr, id);
  152. mutex_unlock(lock);
  153. return obj;
  154. }
  155. static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
  156. enum drm_exynos_ipp_cmd cmd)
  157. {
  158. /*
  159. * check dedicated flag and WB, OUTPUT operation with
  160. * power on state.
  161. */
  162. if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
  163. !pm_runtime_suspended(ippdrv->dev)))
  164. return true;
  165. return false;
  166. }
  167. static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
  168. struct drm_exynos_ipp_property *property)
  169. {
  170. struct exynos_drm_ippdrv *ippdrv;
  171. u32 ipp_id = property->ipp_id;
  172. DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
  173. if (ipp_id) {
  174. /* find ipp driver using idr */
  175. ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
  176. ipp_id);
  177. if (!ippdrv) {
  178. DRM_ERROR("not found ipp%d driver.\n", ipp_id);
  179. return ERR_PTR(-ENODEV);
  180. }
  181. /*
  182. * WB, OUTPUT opertion not supported multi-operation.
  183. * so, make dedicated state at set property ioctl.
  184. * when ipp driver finished operations, clear dedicated flags.
  185. */
  186. if (ipp_check_dedicated(ippdrv, property->cmd)) {
  187. DRM_ERROR("already used choose device.\n");
  188. return ERR_PTR(-EBUSY);
  189. }
  190. /*
  191. * This is necessary to find correct device in ipp drivers.
  192. * ipp drivers have different abilities,
  193. * so need to check property.
  194. */
  195. if (ippdrv->check_property &&
  196. ippdrv->check_property(ippdrv->dev, property)) {
  197. DRM_ERROR("not support property.\n");
  198. return ERR_PTR(-EINVAL);
  199. }
  200. return ippdrv;
  201. } else {
  202. /*
  203. * This case is search all ipp driver for finding.
  204. * user application don't set ipp_id in this case,
  205. * so ipp subsystem search correct driver in driver list.
  206. */
  207. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  208. if (ipp_check_dedicated(ippdrv, property->cmd)) {
  209. DRM_DEBUG_KMS("used device.\n");
  210. continue;
  211. }
  212. if (ippdrv->check_property &&
  213. ippdrv->check_property(ippdrv->dev, property)) {
  214. DRM_DEBUG_KMS("not support property.\n");
  215. continue;
  216. }
  217. return ippdrv;
  218. }
  219. DRM_ERROR("not support ipp driver operations.\n");
  220. }
  221. return ERR_PTR(-ENODEV);
  222. }
  223. static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
  224. {
  225. struct exynos_drm_ippdrv *ippdrv;
  226. struct drm_exynos_ipp_cmd_node *c_node;
  227. int count = 0;
  228. DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
  229. /*
  230. * This case is search ipp driver by prop_id handle.
  231. * sometimes, ipp subsystem find driver by prop_id.
  232. * e.g PAUSE state, queue buf, command control.
  233. */
  234. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  235. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
  236. mutex_lock(&ippdrv->cmd_lock);
  237. list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
  238. if (c_node->property.prop_id == prop_id) {
  239. mutex_unlock(&ippdrv->cmd_lock);
  240. return ippdrv;
  241. }
  242. }
  243. mutex_unlock(&ippdrv->cmd_lock);
  244. }
  245. return ERR_PTR(-ENODEV);
  246. }
  247. int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
  248. struct drm_file *file)
  249. {
  250. struct drm_exynos_file_private *file_priv = file->driver_priv;
  251. struct device *dev = file_priv->ipp_dev;
  252. struct ipp_context *ctx = get_ipp_context(dev);
  253. struct drm_exynos_ipp_prop_list *prop_list = data;
  254. struct exynos_drm_ippdrv *ippdrv;
  255. int count = 0;
  256. if (!ctx) {
  257. DRM_ERROR("invalid context.\n");
  258. return -EINVAL;
  259. }
  260. if (!prop_list) {
  261. DRM_ERROR("invalid property parameter.\n");
  262. return -EINVAL;
  263. }
  264. DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
  265. if (!prop_list->ipp_id) {
  266. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
  267. count++;
  268. /*
  269. * Supports ippdrv list count for user application.
  270. * First step user application getting ippdrv count.
  271. * and second step getting ippdrv capability using ipp_id.
  272. */
  273. prop_list->count = count;
  274. } else {
  275. /*
  276. * Getting ippdrv capability by ipp_id.
  277. * some device not supported wb, output interface.
  278. * so, user application detect correct ipp driver
  279. * using this ioctl.
  280. */
  281. ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
  282. prop_list->ipp_id);
  283. if (!ippdrv) {
  284. DRM_ERROR("not found ipp%d driver.\n",
  285. prop_list->ipp_id);
  286. return -ENODEV;
  287. }
  288. *prop_list = ippdrv->prop_list;
  289. }
  290. return 0;
  291. }
  292. static void ipp_print_property(struct drm_exynos_ipp_property *property,
  293. int idx)
  294. {
  295. struct drm_exynos_ipp_config *config = &property->config[idx];
  296. struct drm_exynos_pos *pos = &config->pos;
  297. struct drm_exynos_sz *sz = &config->sz;
  298. DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
  299. property->prop_id, idx ? "dst" : "src", config->fmt);
  300. DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
  301. pos->x, pos->y, pos->w, pos->h,
  302. sz->hsize, sz->vsize, config->flip, config->degree);
  303. }
  304. static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
  305. {
  306. struct exynos_drm_ippdrv *ippdrv;
  307. struct drm_exynos_ipp_cmd_node *c_node;
  308. u32 prop_id = property->prop_id;
  309. DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
  310. ippdrv = ipp_find_drv_by_handle(prop_id);
  311. if (IS_ERR(ippdrv)) {
  312. DRM_ERROR("failed to get ipp driver.\n");
  313. return -EINVAL;
  314. }
  315. /*
  316. * Find command node using command list in ippdrv.
  317. * when we find this command no using prop_id.
  318. * return property information set in this command node.
  319. */
  320. mutex_lock(&ippdrv->cmd_lock);
  321. list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
  322. if ((c_node->property.prop_id == prop_id) &&
  323. (c_node->state == IPP_STATE_STOP)) {
  324. mutex_unlock(&ippdrv->cmd_lock);
  325. DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
  326. property->cmd, (int)ippdrv);
  327. c_node->property = *property;
  328. return 0;
  329. }
  330. }
  331. mutex_unlock(&ippdrv->cmd_lock);
  332. DRM_ERROR("failed to search property.\n");
  333. return -EINVAL;
  334. }
  335. static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
  336. {
  337. struct drm_exynos_ipp_cmd_work *cmd_work;
  338. cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
  339. if (!cmd_work)
  340. return ERR_PTR(-ENOMEM);
  341. INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
  342. return cmd_work;
  343. }
  344. static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
  345. {
  346. struct drm_exynos_ipp_event_work *event_work;
  347. event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
  348. if (!event_work)
  349. return ERR_PTR(-ENOMEM);
  350. INIT_WORK(&event_work->work, ipp_sched_event);
  351. return event_work;
  352. }
  353. int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
  354. struct drm_file *file)
  355. {
  356. struct drm_exynos_file_private *file_priv = file->driver_priv;
  357. struct device *dev = file_priv->ipp_dev;
  358. struct ipp_context *ctx = get_ipp_context(dev);
  359. struct drm_exynos_ipp_property *property = data;
  360. struct exynos_drm_ippdrv *ippdrv;
  361. struct drm_exynos_ipp_cmd_node *c_node;
  362. int ret, i;
  363. if (!ctx) {
  364. DRM_ERROR("invalid context.\n");
  365. return -EINVAL;
  366. }
  367. if (!property) {
  368. DRM_ERROR("invalid property parameter.\n");
  369. return -EINVAL;
  370. }
  371. /*
  372. * This is log print for user application property.
  373. * user application set various property.
  374. */
  375. for_each_ipp_ops(i)
  376. ipp_print_property(property, i);
  377. /*
  378. * set property ioctl generated new prop_id.
  379. * but in this case already asigned prop_id using old set property.
  380. * e.g PAUSE state. this case supports find current prop_id and use it
  381. * instead of allocation.
  382. */
  383. if (property->prop_id) {
  384. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  385. return ipp_find_and_set_property(property);
  386. }
  387. /* find ipp driver using ipp id */
  388. ippdrv = ipp_find_driver(ctx, property);
  389. if (IS_ERR(ippdrv)) {
  390. DRM_ERROR("failed to get ipp driver.\n");
  391. return -EINVAL;
  392. }
  393. /* allocate command node */
  394. c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
  395. if (!c_node)
  396. return -ENOMEM;
  397. /* create property id */
  398. ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
  399. &property->prop_id);
  400. if (ret) {
  401. DRM_ERROR("failed to create id.\n");
  402. goto err_clear;
  403. }
  404. DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
  405. property->prop_id, property->cmd, (int)ippdrv);
  406. /* stored property information and ippdrv in private data */
  407. c_node->dev = dev;
  408. c_node->property = *property;
  409. c_node->state = IPP_STATE_IDLE;
  410. c_node->start_work = ipp_create_cmd_work();
  411. if (IS_ERR(c_node->start_work)) {
  412. DRM_ERROR("failed to create start work.\n");
  413. goto err_remove_id;
  414. }
  415. c_node->stop_work = ipp_create_cmd_work();
  416. if (IS_ERR(c_node->stop_work)) {
  417. DRM_ERROR("failed to create stop work.\n");
  418. goto err_free_start;
  419. }
  420. c_node->event_work = ipp_create_event_work();
  421. if (IS_ERR(c_node->event_work)) {
  422. DRM_ERROR("failed to create event work.\n");
  423. goto err_free_stop;
  424. }
  425. mutex_init(&c_node->lock);
  426. mutex_init(&c_node->mem_lock);
  427. mutex_init(&c_node->event_lock);
  428. init_completion(&c_node->start_complete);
  429. init_completion(&c_node->stop_complete);
  430. for_each_ipp_ops(i)
  431. INIT_LIST_HEAD(&c_node->mem_list[i]);
  432. INIT_LIST_HEAD(&c_node->event_list);
  433. mutex_lock(&ippdrv->cmd_lock);
  434. list_add_tail(&c_node->list, &ippdrv->cmd_list);
  435. mutex_unlock(&ippdrv->cmd_lock);
  436. /* make dedicated state without m2m */
  437. if (!ipp_is_m2m_cmd(property->cmd))
  438. ippdrv->dedicated = true;
  439. return 0;
  440. err_free_stop:
  441. kfree(c_node->stop_work);
  442. err_free_start:
  443. kfree(c_node->start_work);
  444. err_remove_id:
  445. ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
  446. err_clear:
  447. kfree(c_node);
  448. return ret;
  449. }
  450. static void ipp_clean_cmd_node(struct ipp_context *ctx,
  451. struct drm_exynos_ipp_cmd_node *c_node)
  452. {
  453. /* delete list */
  454. list_del(&c_node->list);
  455. ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
  456. c_node->property.prop_id);
  457. /* destroy mutex */
  458. mutex_destroy(&c_node->lock);
  459. mutex_destroy(&c_node->mem_lock);
  460. mutex_destroy(&c_node->event_lock);
  461. /* free command node */
  462. kfree(c_node->start_work);
  463. kfree(c_node->stop_work);
  464. kfree(c_node->event_work);
  465. kfree(c_node);
  466. }
  467. static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
  468. {
  469. switch (c_node->property.cmd) {
  470. case IPP_CMD_WB:
  471. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
  472. case IPP_CMD_OUTPUT:
  473. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
  474. case IPP_CMD_M2M:
  475. default:
  476. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
  477. !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
  478. }
  479. }
  480. static struct drm_exynos_ipp_mem_node
  481. *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
  482. struct drm_exynos_ipp_queue_buf *qbuf)
  483. {
  484. struct drm_exynos_ipp_mem_node *m_node;
  485. struct list_head *head;
  486. int count = 0;
  487. DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
  488. /* source/destination memory list */
  489. head = &c_node->mem_list[qbuf->ops_id];
  490. /* find memory node from memory list */
  491. list_for_each_entry(m_node, head, list) {
  492. DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
  493. /* compare buffer id */
  494. if (m_node->buf_id == qbuf->buf_id)
  495. return m_node;
  496. }
  497. return NULL;
  498. }
  499. static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
  500. struct drm_exynos_ipp_cmd_node *c_node,
  501. struct drm_exynos_ipp_mem_node *m_node)
  502. {
  503. struct exynos_drm_ipp_ops *ops = NULL;
  504. int ret = 0;
  505. DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
  506. if (!m_node) {
  507. DRM_ERROR("invalid queue node.\n");
  508. return -EFAULT;
  509. }
  510. DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
  511. /* get operations callback */
  512. ops = ippdrv->ops[m_node->ops_id];
  513. if (!ops) {
  514. DRM_ERROR("not support ops.\n");
  515. return -EFAULT;
  516. }
  517. /* set address and enable irq */
  518. if (ops->set_addr) {
  519. ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
  520. m_node->buf_id, IPP_BUF_ENQUEUE);
  521. if (ret) {
  522. DRM_ERROR("failed to set addr.\n");
  523. return ret;
  524. }
  525. }
  526. return ret;
  527. }
  528. static struct drm_exynos_ipp_mem_node
  529. *ipp_get_mem_node(struct drm_device *drm_dev,
  530. struct drm_file *file,
  531. struct drm_exynos_ipp_cmd_node *c_node,
  532. struct drm_exynos_ipp_queue_buf *qbuf)
  533. {
  534. struct drm_exynos_ipp_mem_node *m_node;
  535. struct drm_exynos_ipp_buf_info *buf_info;
  536. int i;
  537. m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
  538. if (!m_node)
  539. return ERR_PTR(-ENOMEM);
  540. buf_info = &m_node->buf_info;
  541. /* operations, buffer id */
  542. m_node->ops_id = qbuf->ops_id;
  543. m_node->prop_id = qbuf->prop_id;
  544. m_node->buf_id = qbuf->buf_id;
  545. DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
  546. DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
  547. for_each_ipp_planar(i) {
  548. DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
  549. /* get dma address by handle */
  550. if (qbuf->handle[i]) {
  551. dma_addr_t *addr;
  552. addr = exynos_drm_gem_get_dma_addr(drm_dev,
  553. qbuf->handle[i], file);
  554. if (IS_ERR(addr)) {
  555. DRM_ERROR("failed to get addr.\n");
  556. goto err_clear;
  557. }
  558. buf_info->handles[i] = qbuf->handle[i];
  559. buf_info->base[i] = *addr;
  560. DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
  561. buf_info->base[i], buf_info->handles[i]);
  562. }
  563. }
  564. m_node->filp = file;
  565. mutex_lock(&c_node->mem_lock);
  566. list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
  567. mutex_unlock(&c_node->mem_lock);
  568. return m_node;
  569. err_clear:
  570. kfree(m_node);
  571. return ERR_PTR(-EFAULT);
  572. }
  573. static int ipp_put_mem_node(struct drm_device *drm_dev,
  574. struct drm_exynos_ipp_cmd_node *c_node,
  575. struct drm_exynos_ipp_mem_node *m_node)
  576. {
  577. int i;
  578. DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
  579. if (!m_node) {
  580. DRM_ERROR("invalid dequeue node.\n");
  581. return -EFAULT;
  582. }
  583. DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
  584. /* put gem buffer */
  585. for_each_ipp_planar(i) {
  586. unsigned long handle = m_node->buf_info.handles[i];
  587. if (handle)
  588. exynos_drm_gem_put_dma_addr(drm_dev, handle,
  589. m_node->filp);
  590. }
  591. /* delete list in queue */
  592. list_del(&m_node->list);
  593. kfree(m_node);
  594. return 0;
  595. }
  596. static void ipp_free_event(struct drm_pending_event *event)
  597. {
  598. kfree(event);
  599. }
  600. static int ipp_get_event(struct drm_device *drm_dev,
  601. struct drm_file *file,
  602. struct drm_exynos_ipp_cmd_node *c_node,
  603. struct drm_exynos_ipp_queue_buf *qbuf)
  604. {
  605. struct drm_exynos_ipp_send_event *e;
  606. unsigned long flags;
  607. DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
  608. e = kzalloc(sizeof(*e), GFP_KERNEL);
  609. if (!e) {
  610. spin_lock_irqsave(&drm_dev->event_lock, flags);
  611. file->event_space += sizeof(e->event);
  612. spin_unlock_irqrestore(&drm_dev->event_lock, flags);
  613. return -ENOMEM;
  614. }
  615. /* make event */
  616. e->event.base.type = DRM_EXYNOS_IPP_EVENT;
  617. e->event.base.length = sizeof(e->event);
  618. e->event.user_data = qbuf->user_data;
  619. e->event.prop_id = qbuf->prop_id;
  620. e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
  621. e->base.event = &e->event.base;
  622. e->base.file_priv = file;
  623. e->base.destroy = ipp_free_event;
  624. mutex_lock(&c_node->event_lock);
  625. list_add_tail(&e->base.link, &c_node->event_list);
  626. mutex_unlock(&c_node->event_lock);
  627. return 0;
  628. }
  629. static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
  630. struct drm_exynos_ipp_queue_buf *qbuf)
  631. {
  632. struct drm_exynos_ipp_send_event *e, *te;
  633. int count = 0;
  634. mutex_lock(&c_node->event_lock);
  635. list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
  636. DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
  637. /*
  638. * qbuf == NULL condition means all event deletion.
  639. * stop operations want to delete all event list.
  640. * another case delete only same buf id.
  641. */
  642. if (!qbuf) {
  643. /* delete list */
  644. list_del(&e->base.link);
  645. kfree(e);
  646. }
  647. /* compare buffer id */
  648. if (qbuf && (qbuf->buf_id ==
  649. e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
  650. /* delete list */
  651. list_del(&e->base.link);
  652. kfree(e);
  653. goto out_unlock;
  654. }
  655. }
  656. out_unlock:
  657. mutex_unlock(&c_node->event_lock);
  658. return;
  659. }
  660. static void ipp_handle_cmd_work(struct device *dev,
  661. struct exynos_drm_ippdrv *ippdrv,
  662. struct drm_exynos_ipp_cmd_work *cmd_work,
  663. struct drm_exynos_ipp_cmd_node *c_node)
  664. {
  665. struct ipp_context *ctx = get_ipp_context(dev);
  666. cmd_work->ippdrv = ippdrv;
  667. cmd_work->c_node = c_node;
  668. queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
  669. }
  670. static int ipp_queue_buf_with_run(struct device *dev,
  671. struct drm_exynos_ipp_cmd_node *c_node,
  672. struct drm_exynos_ipp_mem_node *m_node,
  673. struct drm_exynos_ipp_queue_buf *qbuf)
  674. {
  675. struct exynos_drm_ippdrv *ippdrv;
  676. struct drm_exynos_ipp_property *property;
  677. struct exynos_drm_ipp_ops *ops;
  678. int ret;
  679. ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
  680. if (IS_ERR(ippdrv)) {
  681. DRM_ERROR("failed to get ipp driver.\n");
  682. return -EFAULT;
  683. }
  684. ops = ippdrv->ops[qbuf->ops_id];
  685. if (!ops) {
  686. DRM_ERROR("failed to get ops.\n");
  687. return -EFAULT;
  688. }
  689. property = &c_node->property;
  690. if (c_node->state != IPP_STATE_START) {
  691. DRM_DEBUG_KMS("bypass for invalid state.\n");
  692. return 0;
  693. }
  694. mutex_lock(&c_node->mem_lock);
  695. if (!ipp_check_mem_list(c_node)) {
  696. mutex_unlock(&c_node->mem_lock);
  697. DRM_DEBUG_KMS("empty memory.\n");
  698. return 0;
  699. }
  700. /*
  701. * If set destination buffer and enabled clock,
  702. * then m2m operations need start operations at queue_buf
  703. */
  704. if (ipp_is_m2m_cmd(property->cmd)) {
  705. struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
  706. cmd_work->ctrl = IPP_CTRL_PLAY;
  707. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  708. } else {
  709. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  710. if (ret) {
  711. mutex_unlock(&c_node->mem_lock);
  712. DRM_ERROR("failed to set m node.\n");
  713. return ret;
  714. }
  715. }
  716. mutex_unlock(&c_node->mem_lock);
  717. return 0;
  718. }
  719. static void ipp_clean_queue_buf(struct drm_device *drm_dev,
  720. struct drm_exynos_ipp_cmd_node *c_node,
  721. struct drm_exynos_ipp_queue_buf *qbuf)
  722. {
  723. struct drm_exynos_ipp_mem_node *m_node, *tm_node;
  724. /* delete list */
  725. mutex_lock(&c_node->mem_lock);
  726. list_for_each_entry_safe(m_node, tm_node,
  727. &c_node->mem_list[qbuf->ops_id], list) {
  728. if (m_node->buf_id == qbuf->buf_id &&
  729. m_node->ops_id == qbuf->ops_id)
  730. ipp_put_mem_node(drm_dev, c_node, m_node);
  731. }
  732. mutex_unlock(&c_node->mem_lock);
  733. }
  734. int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
  735. struct drm_file *file)
  736. {
  737. struct drm_exynos_file_private *file_priv = file->driver_priv;
  738. struct device *dev = file_priv->ipp_dev;
  739. struct ipp_context *ctx = get_ipp_context(dev);
  740. struct drm_exynos_ipp_queue_buf *qbuf = data;
  741. struct drm_exynos_ipp_cmd_node *c_node;
  742. struct drm_exynos_ipp_mem_node *m_node;
  743. int ret;
  744. if (!qbuf) {
  745. DRM_ERROR("invalid buf parameter.\n");
  746. return -EINVAL;
  747. }
  748. if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
  749. DRM_ERROR("invalid ops parameter.\n");
  750. return -EINVAL;
  751. }
  752. DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
  753. qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
  754. qbuf->buf_id, qbuf->buf_type);
  755. /* find command node */
  756. c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
  757. qbuf->prop_id);
  758. if (!c_node) {
  759. DRM_ERROR("failed to get command node.\n");
  760. return -ENODEV;
  761. }
  762. /* buffer control */
  763. switch (qbuf->buf_type) {
  764. case IPP_BUF_ENQUEUE:
  765. /* get memory node */
  766. m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
  767. if (IS_ERR(m_node)) {
  768. DRM_ERROR("failed to get m_node.\n");
  769. return PTR_ERR(m_node);
  770. }
  771. /*
  772. * first step get event for destination buffer.
  773. * and second step when M2M case run with destination buffer
  774. * if needed.
  775. */
  776. if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
  777. /* get event for destination buffer */
  778. ret = ipp_get_event(drm_dev, file, c_node, qbuf);
  779. if (ret) {
  780. DRM_ERROR("failed to get event.\n");
  781. goto err_clean_node;
  782. }
  783. /*
  784. * M2M case run play control for streaming feature.
  785. * other case set address and waiting.
  786. */
  787. ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
  788. if (ret) {
  789. DRM_ERROR("failed to run command.\n");
  790. goto err_clean_node;
  791. }
  792. }
  793. break;
  794. case IPP_BUF_DEQUEUE:
  795. mutex_lock(&c_node->lock);
  796. /* put event for destination buffer */
  797. if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
  798. ipp_put_event(c_node, qbuf);
  799. ipp_clean_queue_buf(drm_dev, c_node, qbuf);
  800. mutex_unlock(&c_node->lock);
  801. break;
  802. default:
  803. DRM_ERROR("invalid buffer control.\n");
  804. return -EINVAL;
  805. }
  806. return 0;
  807. err_clean_node:
  808. DRM_ERROR("clean memory nodes.\n");
  809. ipp_clean_queue_buf(drm_dev, c_node, qbuf);
  810. return ret;
  811. }
  812. static bool exynos_drm_ipp_check_valid(struct device *dev,
  813. enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
  814. {
  815. if (ctrl != IPP_CTRL_PLAY) {
  816. if (pm_runtime_suspended(dev)) {
  817. DRM_ERROR("pm:runtime_suspended.\n");
  818. goto err_status;
  819. }
  820. }
  821. switch (ctrl) {
  822. case IPP_CTRL_PLAY:
  823. if (state != IPP_STATE_IDLE)
  824. goto err_status;
  825. break;
  826. case IPP_CTRL_STOP:
  827. if (state == IPP_STATE_STOP)
  828. goto err_status;
  829. break;
  830. case IPP_CTRL_PAUSE:
  831. if (state != IPP_STATE_START)
  832. goto err_status;
  833. break;
  834. case IPP_CTRL_RESUME:
  835. if (state != IPP_STATE_STOP)
  836. goto err_status;
  837. break;
  838. default:
  839. DRM_ERROR("invalid state.\n");
  840. goto err_status;
  841. }
  842. return true;
  843. err_status:
  844. DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
  845. return false;
  846. }
  847. int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
  848. struct drm_file *file)
  849. {
  850. struct drm_exynos_file_private *file_priv = file->driver_priv;
  851. struct exynos_drm_ippdrv *ippdrv = NULL;
  852. struct device *dev = file_priv->ipp_dev;
  853. struct ipp_context *ctx = get_ipp_context(dev);
  854. struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
  855. struct drm_exynos_ipp_cmd_work *cmd_work;
  856. struct drm_exynos_ipp_cmd_node *c_node;
  857. if (!ctx) {
  858. DRM_ERROR("invalid context.\n");
  859. return -EINVAL;
  860. }
  861. if (!cmd_ctrl) {
  862. DRM_ERROR("invalid control parameter.\n");
  863. return -EINVAL;
  864. }
  865. DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
  866. cmd_ctrl->ctrl, cmd_ctrl->prop_id);
  867. ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
  868. if (IS_ERR(ippdrv)) {
  869. DRM_ERROR("failed to get ipp driver.\n");
  870. return PTR_ERR(ippdrv);
  871. }
  872. c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
  873. cmd_ctrl->prop_id);
  874. if (!c_node) {
  875. DRM_ERROR("invalid command node list.\n");
  876. return -ENODEV;
  877. }
  878. if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
  879. c_node->state)) {
  880. DRM_ERROR("invalid state.\n");
  881. return -EINVAL;
  882. }
  883. switch (cmd_ctrl->ctrl) {
  884. case IPP_CTRL_PLAY:
  885. if (pm_runtime_suspended(ippdrv->dev))
  886. pm_runtime_get_sync(ippdrv->dev);
  887. c_node->state = IPP_STATE_START;
  888. cmd_work = c_node->start_work;
  889. cmd_work->ctrl = cmd_ctrl->ctrl;
  890. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  891. break;
  892. case IPP_CTRL_STOP:
  893. cmd_work = c_node->stop_work;
  894. cmd_work->ctrl = cmd_ctrl->ctrl;
  895. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  896. if (!wait_for_completion_timeout(&c_node->stop_complete,
  897. msecs_to_jiffies(300))) {
  898. DRM_ERROR("timeout stop:prop_id[%d]\n",
  899. c_node->property.prop_id);
  900. }
  901. c_node->state = IPP_STATE_STOP;
  902. ippdrv->dedicated = false;
  903. mutex_lock(&ippdrv->cmd_lock);
  904. ipp_clean_cmd_node(ctx, c_node);
  905. if (list_empty(&ippdrv->cmd_list))
  906. pm_runtime_put_sync(ippdrv->dev);
  907. mutex_unlock(&ippdrv->cmd_lock);
  908. break;
  909. case IPP_CTRL_PAUSE:
  910. cmd_work = c_node->stop_work;
  911. cmd_work->ctrl = cmd_ctrl->ctrl;
  912. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  913. if (!wait_for_completion_timeout(&c_node->stop_complete,
  914. msecs_to_jiffies(200))) {
  915. DRM_ERROR("timeout stop:prop_id[%d]\n",
  916. c_node->property.prop_id);
  917. }
  918. c_node->state = IPP_STATE_STOP;
  919. break;
  920. case IPP_CTRL_RESUME:
  921. c_node->state = IPP_STATE_START;
  922. cmd_work = c_node->start_work;
  923. cmd_work->ctrl = cmd_ctrl->ctrl;
  924. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  925. break;
  926. default:
  927. DRM_ERROR("could not support this state currently.\n");
  928. return -EINVAL;
  929. }
  930. DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
  931. cmd_ctrl->ctrl, cmd_ctrl->prop_id);
  932. return 0;
  933. }
  934. int exynos_drm_ippnb_register(struct notifier_block *nb)
  935. {
  936. return blocking_notifier_chain_register(
  937. &exynos_drm_ippnb_list, nb);
  938. }
  939. int exynos_drm_ippnb_unregister(struct notifier_block *nb)
  940. {
  941. return blocking_notifier_chain_unregister(
  942. &exynos_drm_ippnb_list, nb);
  943. }
  944. int exynos_drm_ippnb_send_event(unsigned long val, void *v)
  945. {
  946. return blocking_notifier_call_chain(
  947. &exynos_drm_ippnb_list, val, v);
  948. }
  949. static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
  950. struct drm_exynos_ipp_property *property)
  951. {
  952. struct exynos_drm_ipp_ops *ops = NULL;
  953. bool swap = false;
  954. int ret, i;
  955. if (!property) {
  956. DRM_ERROR("invalid property parameter.\n");
  957. return -EINVAL;
  958. }
  959. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  960. /* reset h/w block */
  961. if (ippdrv->reset &&
  962. ippdrv->reset(ippdrv->dev)) {
  963. return -EINVAL;
  964. }
  965. /* set source,destination operations */
  966. for_each_ipp_ops(i) {
  967. struct drm_exynos_ipp_config *config =
  968. &property->config[i];
  969. ops = ippdrv->ops[i];
  970. if (!ops || !config) {
  971. DRM_ERROR("not support ops and config.\n");
  972. return -EINVAL;
  973. }
  974. /* set format */
  975. if (ops->set_fmt) {
  976. ret = ops->set_fmt(ippdrv->dev, config->fmt);
  977. if (ret)
  978. return ret;
  979. }
  980. /* set transform for rotation, flip */
  981. if (ops->set_transf) {
  982. ret = ops->set_transf(ippdrv->dev, config->degree,
  983. config->flip, &swap);
  984. if (ret)
  985. return ret;
  986. }
  987. /* set size */
  988. if (ops->set_size) {
  989. ret = ops->set_size(ippdrv->dev, swap, &config->pos,
  990. &config->sz);
  991. if (ret)
  992. return ret;
  993. }
  994. }
  995. return 0;
  996. }
  997. static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
  998. struct drm_exynos_ipp_cmd_node *c_node)
  999. {
  1000. struct drm_exynos_ipp_mem_node *m_node;
  1001. struct drm_exynos_ipp_property *property = &c_node->property;
  1002. struct list_head *head;
  1003. int ret, i;
  1004. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  1005. /* store command info in ippdrv */
  1006. ippdrv->c_node = c_node;
  1007. mutex_lock(&c_node->mem_lock);
  1008. if (!ipp_check_mem_list(c_node)) {
  1009. DRM_DEBUG_KMS("empty memory.\n");
  1010. ret = -ENOMEM;
  1011. goto err_unlock;
  1012. }
  1013. /* set current property in ippdrv */
  1014. ret = ipp_set_property(ippdrv, property);
  1015. if (ret) {
  1016. DRM_ERROR("failed to set property.\n");
  1017. ippdrv->c_node = NULL;
  1018. goto err_unlock;
  1019. }
  1020. /* check command */
  1021. switch (property->cmd) {
  1022. case IPP_CMD_M2M:
  1023. for_each_ipp_ops(i) {
  1024. /* source/destination memory list */
  1025. head = &c_node->mem_list[i];
  1026. m_node = list_first_entry(head,
  1027. struct drm_exynos_ipp_mem_node, list);
  1028. DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
  1029. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  1030. if (ret) {
  1031. DRM_ERROR("failed to set m node.\n");
  1032. goto err_unlock;
  1033. }
  1034. }
  1035. break;
  1036. case IPP_CMD_WB:
  1037. /* destination memory list */
  1038. head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
  1039. list_for_each_entry(m_node, head, list) {
  1040. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  1041. if (ret) {
  1042. DRM_ERROR("failed to set m node.\n");
  1043. goto err_unlock;
  1044. }
  1045. }
  1046. break;
  1047. case IPP_CMD_OUTPUT:
  1048. /* source memory list */
  1049. head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
  1050. list_for_each_entry(m_node, head, list) {
  1051. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  1052. if (ret) {
  1053. DRM_ERROR("failed to set m node.\n");
  1054. goto err_unlock;
  1055. }
  1056. }
  1057. break;
  1058. default:
  1059. DRM_ERROR("invalid operations.\n");
  1060. ret = -EINVAL;
  1061. goto err_unlock;
  1062. }
  1063. mutex_unlock(&c_node->mem_lock);
  1064. DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
  1065. /* start operations */
  1066. if (ippdrv->start) {
  1067. ret = ippdrv->start(ippdrv->dev, property->cmd);
  1068. if (ret) {
  1069. DRM_ERROR("failed to start ops.\n");
  1070. ippdrv->c_node = NULL;
  1071. return ret;
  1072. }
  1073. }
  1074. return 0;
  1075. err_unlock:
  1076. mutex_unlock(&c_node->mem_lock);
  1077. ippdrv->c_node = NULL;
  1078. return ret;
  1079. }
  1080. static int ipp_stop_property(struct drm_device *drm_dev,
  1081. struct exynos_drm_ippdrv *ippdrv,
  1082. struct drm_exynos_ipp_cmd_node *c_node)
  1083. {
  1084. struct drm_exynos_ipp_mem_node *m_node, *tm_node;
  1085. struct drm_exynos_ipp_property *property = &c_node->property;
  1086. struct list_head *head;
  1087. int ret = 0, i;
  1088. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  1089. /* put event */
  1090. ipp_put_event(c_node, NULL);
  1091. mutex_lock(&c_node->mem_lock);
  1092. /* check command */
  1093. switch (property->cmd) {
  1094. case IPP_CMD_M2M:
  1095. for_each_ipp_ops(i) {
  1096. /* source/destination memory list */
  1097. head = &c_node->mem_list[i];
  1098. list_for_each_entry_safe(m_node, tm_node,
  1099. head, list) {
  1100. ret = ipp_put_mem_node(drm_dev, c_node,
  1101. m_node);
  1102. if (ret) {
  1103. DRM_ERROR("failed to put m_node.\n");
  1104. goto err_clear;
  1105. }
  1106. }
  1107. }
  1108. break;
  1109. case IPP_CMD_WB:
  1110. /* destination memory list */
  1111. head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
  1112. list_for_each_entry_safe(m_node, tm_node, head, list) {
  1113. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1114. if (ret) {
  1115. DRM_ERROR("failed to put m_node.\n");
  1116. goto err_clear;
  1117. }
  1118. }
  1119. break;
  1120. case IPP_CMD_OUTPUT:
  1121. /* source memory list */
  1122. head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
  1123. list_for_each_entry_safe(m_node, tm_node, head, list) {
  1124. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1125. if (ret) {
  1126. DRM_ERROR("failed to put m_node.\n");
  1127. goto err_clear;
  1128. }
  1129. }
  1130. break;
  1131. default:
  1132. DRM_ERROR("invalid operations.\n");
  1133. ret = -EINVAL;
  1134. goto err_clear;
  1135. }
  1136. err_clear:
  1137. mutex_unlock(&c_node->mem_lock);
  1138. /* stop operations */
  1139. if (ippdrv->stop)
  1140. ippdrv->stop(ippdrv->dev, property->cmd);
  1141. return ret;
  1142. }
  1143. void ipp_sched_cmd(struct work_struct *work)
  1144. {
  1145. struct drm_exynos_ipp_cmd_work *cmd_work =
  1146. (struct drm_exynos_ipp_cmd_work *)work;
  1147. struct exynos_drm_ippdrv *ippdrv;
  1148. struct drm_exynos_ipp_cmd_node *c_node;
  1149. struct drm_exynos_ipp_property *property;
  1150. int ret;
  1151. ippdrv = cmd_work->ippdrv;
  1152. if (!ippdrv) {
  1153. DRM_ERROR("invalid ippdrv list.\n");
  1154. return;
  1155. }
  1156. c_node = cmd_work->c_node;
  1157. if (!c_node) {
  1158. DRM_ERROR("invalid command node list.\n");
  1159. return;
  1160. }
  1161. mutex_lock(&c_node->lock);
  1162. property = &c_node->property;
  1163. switch (cmd_work->ctrl) {
  1164. case IPP_CTRL_PLAY:
  1165. case IPP_CTRL_RESUME:
  1166. ret = ipp_start_property(ippdrv, c_node);
  1167. if (ret) {
  1168. DRM_ERROR("failed to start property:prop_id[%d]\n",
  1169. c_node->property.prop_id);
  1170. goto err_unlock;
  1171. }
  1172. /*
  1173. * M2M case supports wait_completion of transfer.
  1174. * because M2M case supports single unit operation
  1175. * with multiple queue.
  1176. * M2M need to wait completion of data transfer.
  1177. */
  1178. if (ipp_is_m2m_cmd(property->cmd)) {
  1179. if (!wait_for_completion_timeout
  1180. (&c_node->start_complete, msecs_to_jiffies(200))) {
  1181. DRM_ERROR("timeout event:prop_id[%d]\n",
  1182. c_node->property.prop_id);
  1183. goto err_unlock;
  1184. }
  1185. }
  1186. break;
  1187. case IPP_CTRL_STOP:
  1188. case IPP_CTRL_PAUSE:
  1189. ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
  1190. c_node);
  1191. if (ret) {
  1192. DRM_ERROR("failed to stop property.\n");
  1193. goto err_unlock;
  1194. }
  1195. complete(&c_node->stop_complete);
  1196. break;
  1197. default:
  1198. DRM_ERROR("unknown control type\n");
  1199. break;
  1200. }
  1201. DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
  1202. err_unlock:
  1203. mutex_unlock(&c_node->lock);
  1204. }
  1205. static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
  1206. struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
  1207. {
  1208. struct drm_device *drm_dev = ippdrv->drm_dev;
  1209. struct drm_exynos_ipp_property *property = &c_node->property;
  1210. struct drm_exynos_ipp_mem_node *m_node;
  1211. struct drm_exynos_ipp_queue_buf qbuf;
  1212. struct drm_exynos_ipp_send_event *e;
  1213. struct list_head *head;
  1214. struct timeval now;
  1215. unsigned long flags;
  1216. u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
  1217. int ret, i;
  1218. for_each_ipp_ops(i)
  1219. DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
  1220. if (!drm_dev) {
  1221. DRM_ERROR("failed to get drm_dev.\n");
  1222. return -EINVAL;
  1223. }
  1224. if (!property) {
  1225. DRM_ERROR("failed to get property.\n");
  1226. return -EINVAL;
  1227. }
  1228. mutex_lock(&c_node->event_lock);
  1229. if (list_empty(&c_node->event_list)) {
  1230. DRM_DEBUG_KMS("event list is empty.\n");
  1231. ret = 0;
  1232. goto err_event_unlock;
  1233. }
  1234. mutex_lock(&c_node->mem_lock);
  1235. if (!ipp_check_mem_list(c_node)) {
  1236. DRM_DEBUG_KMS("empty memory.\n");
  1237. ret = 0;
  1238. goto err_mem_unlock;
  1239. }
  1240. /* check command */
  1241. switch (property->cmd) {
  1242. case IPP_CMD_M2M:
  1243. for_each_ipp_ops(i) {
  1244. /* source/destination memory list */
  1245. head = &c_node->mem_list[i];
  1246. m_node = list_first_entry(head,
  1247. struct drm_exynos_ipp_mem_node, list);
  1248. tbuf_id[i] = m_node->buf_id;
  1249. DRM_DEBUG_KMS("%s buf_id[%d]\n",
  1250. i ? "dst" : "src", tbuf_id[i]);
  1251. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1252. if (ret)
  1253. DRM_ERROR("failed to put m_node.\n");
  1254. }
  1255. break;
  1256. case IPP_CMD_WB:
  1257. /* clear buf for finding */
  1258. memset(&qbuf, 0x0, sizeof(qbuf));
  1259. qbuf.ops_id = EXYNOS_DRM_OPS_DST;
  1260. qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
  1261. /* get memory node entry */
  1262. m_node = ipp_find_mem_node(c_node, &qbuf);
  1263. if (!m_node) {
  1264. DRM_ERROR("empty memory node.\n");
  1265. ret = -ENOMEM;
  1266. goto err_mem_unlock;
  1267. }
  1268. tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
  1269. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1270. if (ret)
  1271. DRM_ERROR("failed to put m_node.\n");
  1272. break;
  1273. case IPP_CMD_OUTPUT:
  1274. /* source memory list */
  1275. head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
  1276. m_node = list_first_entry(head,
  1277. struct drm_exynos_ipp_mem_node, list);
  1278. tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
  1279. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1280. if (ret)
  1281. DRM_ERROR("failed to put m_node.\n");
  1282. break;
  1283. default:
  1284. DRM_ERROR("invalid operations.\n");
  1285. ret = -EINVAL;
  1286. goto err_mem_unlock;
  1287. }
  1288. mutex_unlock(&c_node->mem_lock);
  1289. if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
  1290. DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
  1291. tbuf_id[1], buf_id[1], property->prop_id);
  1292. /*
  1293. * command node have event list of destination buffer
  1294. * If destination buffer enqueue to mem list,
  1295. * then we make event and link to event list tail.
  1296. * so, we get first event for first enqueued buffer.
  1297. */
  1298. e = list_first_entry(&c_node->event_list,
  1299. struct drm_exynos_ipp_send_event, base.link);
  1300. do_gettimeofday(&now);
  1301. DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
  1302. e->event.tv_sec = now.tv_sec;
  1303. e->event.tv_usec = now.tv_usec;
  1304. e->event.prop_id = property->prop_id;
  1305. /* set buffer id about source destination */
  1306. for_each_ipp_ops(i)
  1307. e->event.buf_id[i] = tbuf_id[i];
  1308. spin_lock_irqsave(&drm_dev->event_lock, flags);
  1309. list_move_tail(&e->base.link, &e->base.file_priv->event_list);
  1310. wake_up_interruptible(&e->base.file_priv->event_wait);
  1311. spin_unlock_irqrestore(&drm_dev->event_lock, flags);
  1312. mutex_unlock(&c_node->event_lock);
  1313. DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
  1314. property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
  1315. return 0;
  1316. err_mem_unlock:
  1317. mutex_unlock(&c_node->mem_lock);
  1318. err_event_unlock:
  1319. mutex_unlock(&c_node->event_lock);
  1320. return ret;
  1321. }
  1322. void ipp_sched_event(struct work_struct *work)
  1323. {
  1324. struct drm_exynos_ipp_event_work *event_work =
  1325. (struct drm_exynos_ipp_event_work *)work;
  1326. struct exynos_drm_ippdrv *ippdrv;
  1327. struct drm_exynos_ipp_cmd_node *c_node;
  1328. int ret;
  1329. if (!event_work) {
  1330. DRM_ERROR("failed to get event_work.\n");
  1331. return;
  1332. }
  1333. DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
  1334. ippdrv = event_work->ippdrv;
  1335. if (!ippdrv) {
  1336. DRM_ERROR("failed to get ipp driver.\n");
  1337. return;
  1338. }
  1339. c_node = ippdrv->c_node;
  1340. if (!c_node) {
  1341. DRM_ERROR("failed to get command node.\n");
  1342. return;
  1343. }
  1344. /*
  1345. * IPP supports command thread, event thread synchronization.
  1346. * If IPP close immediately from user land, then IPP make
  1347. * synchronization with command thread, so make complete event.
  1348. * or going out operations.
  1349. */
  1350. if (c_node->state != IPP_STATE_START) {
  1351. DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
  1352. c_node->state, c_node->property.prop_id);
  1353. goto err_completion;
  1354. }
  1355. ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
  1356. if (ret) {
  1357. DRM_ERROR("failed to send event.\n");
  1358. goto err_completion;
  1359. }
  1360. err_completion:
  1361. if (ipp_is_m2m_cmd(c_node->property.cmd))
  1362. complete(&c_node->start_complete);
  1363. }
  1364. static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
  1365. {
  1366. struct ipp_context *ctx = get_ipp_context(dev);
  1367. struct exynos_drm_ippdrv *ippdrv;
  1368. int ret, count = 0;
  1369. /* get ipp driver entry */
  1370. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  1371. u32 ipp_id;
  1372. ippdrv->drm_dev = drm_dev;
  1373. ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
  1374. &ipp_id);
  1375. if (ret || ipp_id == 0) {
  1376. DRM_ERROR("failed to create id.\n");
  1377. goto err;
  1378. }
  1379. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
  1380. count++, (int)ippdrv, ipp_id);
  1381. ippdrv->prop_list.ipp_id = ipp_id;
  1382. /* store parent device for node */
  1383. ippdrv->parent_dev = dev;
  1384. /* store event work queue and handler */
  1385. ippdrv->event_workq = ctx->event_workq;
  1386. ippdrv->sched_event = ipp_sched_event;
  1387. INIT_LIST_HEAD(&ippdrv->cmd_list);
  1388. mutex_init(&ippdrv->cmd_lock);
  1389. if (is_drm_iommu_supported(drm_dev)) {
  1390. ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
  1391. if (ret) {
  1392. DRM_ERROR("failed to activate iommu\n");
  1393. goto err;
  1394. }
  1395. }
  1396. }
  1397. return 0;
  1398. err:
  1399. /* get ipp driver entry */
  1400. list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
  1401. drv_list) {
  1402. if (is_drm_iommu_supported(drm_dev))
  1403. drm_iommu_detach_device(drm_dev, ippdrv->dev);
  1404. ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
  1405. ippdrv->prop_list.ipp_id);
  1406. }
  1407. return ret;
  1408. }
  1409. static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
  1410. {
  1411. struct exynos_drm_ippdrv *ippdrv;
  1412. struct ipp_context *ctx = get_ipp_context(dev);
  1413. /* get ipp driver entry */
  1414. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  1415. if (is_drm_iommu_supported(drm_dev))
  1416. drm_iommu_detach_device(drm_dev, ippdrv->dev);
  1417. ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
  1418. ippdrv->prop_list.ipp_id);
  1419. ippdrv->drm_dev = NULL;
  1420. exynos_drm_ippdrv_unregister(ippdrv);
  1421. }
  1422. }
  1423. static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
  1424. struct drm_file *file)
  1425. {
  1426. struct drm_exynos_file_private *file_priv = file->driver_priv;
  1427. file_priv->ipp_dev = dev;
  1428. DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
  1429. return 0;
  1430. }
  1431. static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
  1432. struct drm_file *file)
  1433. {
  1434. struct drm_exynos_file_private *file_priv = file->driver_priv;
  1435. struct exynos_drm_ippdrv *ippdrv = NULL;
  1436. struct ipp_context *ctx = get_ipp_context(dev);
  1437. struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
  1438. int count = 0;
  1439. DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
  1440. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  1441. mutex_lock(&ippdrv->cmd_lock);
  1442. list_for_each_entry_safe(c_node, tc_node,
  1443. &ippdrv->cmd_list, list) {
  1444. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
  1445. count++, (int)ippdrv);
  1446. if (c_node->dev == file_priv->ipp_dev) {
  1447. /*
  1448. * userland goto unnormal state. process killed.
  1449. * and close the file.
  1450. * so, IPP didn't called stop cmd ctrl.
  1451. * so, we are make stop operation in this state.
  1452. */
  1453. if (c_node->state == IPP_STATE_START) {
  1454. ipp_stop_property(drm_dev, ippdrv,
  1455. c_node);
  1456. c_node->state = IPP_STATE_STOP;
  1457. }
  1458. ippdrv->dedicated = false;
  1459. ipp_clean_cmd_node(ctx, c_node);
  1460. if (list_empty(&ippdrv->cmd_list))
  1461. pm_runtime_put_sync(ippdrv->dev);
  1462. }
  1463. }
  1464. mutex_unlock(&ippdrv->cmd_lock);
  1465. }
  1466. return;
  1467. }
  1468. static int ipp_probe(struct platform_device *pdev)
  1469. {
  1470. struct device *dev = &pdev->dev;
  1471. struct ipp_context *ctx;
  1472. struct exynos_drm_subdrv *subdrv;
  1473. int ret;
  1474. ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
  1475. if (!ctx)
  1476. return -ENOMEM;
  1477. mutex_init(&ctx->ipp_lock);
  1478. mutex_init(&ctx->prop_lock);
  1479. idr_init(&ctx->ipp_idr);
  1480. idr_init(&ctx->prop_idr);
  1481. /*
  1482. * create single thread for ipp event
  1483. * IPP supports event thread for IPP drivers.
  1484. * IPP driver send event_work to this thread.
  1485. * and IPP event thread send event to user process.
  1486. */
  1487. ctx->event_workq = create_singlethread_workqueue("ipp_event");
  1488. if (!ctx->event_workq) {
  1489. dev_err(dev, "failed to create event workqueue\n");
  1490. return -EINVAL;
  1491. }
  1492. /*
  1493. * create single thread for ipp command
  1494. * IPP supports command thread for user process.
  1495. * user process make command node using set property ioctl.
  1496. * and make start_work and send this work to command thread.
  1497. * and then this command thread start property.
  1498. */
  1499. ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
  1500. if (!ctx->cmd_workq) {
  1501. dev_err(dev, "failed to create cmd workqueue\n");
  1502. ret = -EINVAL;
  1503. goto err_event_workq;
  1504. }
  1505. /* set sub driver informations */
  1506. subdrv = &ctx->subdrv;
  1507. subdrv->dev = dev;
  1508. subdrv->probe = ipp_subdrv_probe;
  1509. subdrv->remove = ipp_subdrv_remove;
  1510. subdrv->open = ipp_subdrv_open;
  1511. subdrv->close = ipp_subdrv_close;
  1512. platform_set_drvdata(pdev, ctx);
  1513. ret = exynos_drm_subdrv_register(subdrv);
  1514. if (ret < 0) {
  1515. DRM_ERROR("failed to register drm ipp device.\n");
  1516. goto err_cmd_workq;
  1517. }
  1518. dev_info(dev, "drm ipp registered successfully.\n");
  1519. return 0;
  1520. err_cmd_workq:
  1521. destroy_workqueue(ctx->cmd_workq);
  1522. err_event_workq:
  1523. destroy_workqueue(ctx->event_workq);
  1524. return ret;
  1525. }
  1526. static int ipp_remove(struct platform_device *pdev)
  1527. {
  1528. struct ipp_context *ctx = platform_get_drvdata(pdev);
  1529. /* unregister sub driver */
  1530. exynos_drm_subdrv_unregister(&ctx->subdrv);
  1531. /* remove,destroy ipp idr */
  1532. idr_destroy(&ctx->ipp_idr);
  1533. idr_destroy(&ctx->prop_idr);
  1534. mutex_destroy(&ctx->ipp_lock);
  1535. mutex_destroy(&ctx->prop_lock);
  1536. /* destroy command, event work queue */
  1537. destroy_workqueue(ctx->cmd_workq);
  1538. destroy_workqueue(ctx->event_workq);
  1539. return 0;
  1540. }
  1541. static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
  1542. {
  1543. DRM_DEBUG_KMS("enable[%d]\n", enable);
  1544. return 0;
  1545. }
  1546. #ifdef CONFIG_PM_SLEEP
  1547. static int ipp_suspend(struct device *dev)
  1548. {
  1549. struct ipp_context *ctx = get_ipp_context(dev);
  1550. if (pm_runtime_suspended(dev))
  1551. return 0;
  1552. return ipp_power_ctrl(ctx, false);
  1553. }
  1554. static int ipp_resume(struct device *dev)
  1555. {
  1556. struct ipp_context *ctx = get_ipp_context(dev);
  1557. if (!pm_runtime_suspended(dev))
  1558. return ipp_power_ctrl(ctx, true);
  1559. return 0;
  1560. }
  1561. #endif
  1562. #ifdef CONFIG_PM_RUNTIME
  1563. static int ipp_runtime_suspend(struct device *dev)
  1564. {
  1565. struct ipp_context *ctx = get_ipp_context(dev);
  1566. return ipp_power_ctrl(ctx, false);
  1567. }
  1568. static int ipp_runtime_resume(struct device *dev)
  1569. {
  1570. struct ipp_context *ctx = get_ipp_context(dev);
  1571. return ipp_power_ctrl(ctx, true);
  1572. }
  1573. #endif
  1574. static const struct dev_pm_ops ipp_pm_ops = {
  1575. SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
  1576. SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
  1577. };
  1578. struct platform_driver ipp_driver = {
  1579. .probe = ipp_probe,
  1580. .remove = ipp_remove,
  1581. .driver = {
  1582. .name = "exynos-drm-ipp",
  1583. .owner = THIS_MODULE,
  1584. .pm = &ipp_pm_ops,
  1585. },
  1586. };