exynos_drm_ipp.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. /*
  2. * Copyright (C) 2012 Samsung Electronics Co.Ltd
  3. * Authors:
  4. * Eunchul Kim <chulspro.kim@samsung.com>
  5. * Jinyoung Jeon <jy0.jeon@samsung.com>
  6. * Sangmin Lee <lsmin.lee@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/types.h>
  17. #include <linux/clk.h>
  18. #include <linux/pm_runtime.h>
  19. #include <drm/drmP.h>
  20. #include <drm/exynos_drm.h>
  21. #include "exynos_drm_drv.h"
  22. #include "exynos_drm_gem.h"
  23. #include "exynos_drm_ipp.h"
  24. #include "exynos_drm_iommu.h"
  25. /*
  26. * IPP stands for Image Post Processing and
  27. * supports image scaler/rotator and input/output DMA operations.
  28. * using FIMC, GSC, Rotator, so on.
  29. * IPP is integration device driver of same attribute h/w
  30. */
  31. /*
  32. * TODO
  33. * 1. expand command control id.
  34. * 2. integrate property and config.
  35. * 3. removed send_event id check routine.
  36. * 4. compare send_event id if needed.
  37. * 5. free subdrv_remove notifier callback list if needed.
  38. * 6. need to check subdrv_open about multi-open.
  39. * 7. need to power_on implement power and sysmmu ctrl.
  40. */
  41. #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
  42. #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
  43. /* platform device pointer for ipp device. */
  44. static struct platform_device *exynos_drm_ipp_pdev;
  45. /*
  46. * A structure of event.
  47. *
  48. * @base: base of event.
  49. * @event: ipp event.
  50. */
  51. struct drm_exynos_ipp_send_event {
  52. struct drm_pending_event base;
  53. struct drm_exynos_ipp_event event;
  54. };
  55. /*
  56. * A structure of memory node.
  57. *
  58. * @list: list head to memory queue information.
  59. * @ops_id: id of operations.
  60. * @prop_id: id of property.
  61. * @buf_id: id of buffer.
  62. * @buf_info: gem objects and dma address, size.
  63. * @filp: a pointer to drm_file.
  64. */
  65. struct drm_exynos_ipp_mem_node {
  66. struct list_head list;
  67. enum drm_exynos_ops_id ops_id;
  68. u32 prop_id;
  69. u32 buf_id;
  70. struct drm_exynos_ipp_buf_info buf_info;
  71. };
  72. /*
  73. * A structure of ipp context.
  74. *
  75. * @subdrv: prepare initialization using subdrv.
  76. * @ipp_lock: lock for synchronization of access to ipp_idr.
  77. * @prop_lock: lock for synchronization of access to prop_idr.
  78. * @ipp_idr: ipp driver idr.
  79. * @prop_idr: property idr.
  80. * @event_workq: event work queue.
  81. * @cmd_workq: command work queue.
  82. */
  83. struct ipp_context {
  84. struct exynos_drm_subdrv subdrv;
  85. struct mutex ipp_lock;
  86. struct mutex prop_lock;
  87. struct idr ipp_idr;
  88. struct idr prop_idr;
  89. struct workqueue_struct *event_workq;
  90. struct workqueue_struct *cmd_workq;
  91. };
  92. static LIST_HEAD(exynos_drm_ippdrv_list);
  93. static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
  94. static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
  95. int exynos_platform_device_ipp_register(void)
  96. {
  97. struct platform_device *pdev;
  98. if (exynos_drm_ipp_pdev)
  99. return -EEXIST;
  100. pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
  101. if (IS_ERR(pdev))
  102. return PTR_ERR(pdev);
  103. exynos_drm_ipp_pdev = pdev;
  104. return 0;
  105. }
  106. void exynos_platform_device_ipp_unregister(void)
  107. {
  108. if (exynos_drm_ipp_pdev) {
  109. platform_device_unregister(exynos_drm_ipp_pdev);
  110. exynos_drm_ipp_pdev = NULL;
  111. }
  112. }
  113. int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
  114. {
  115. mutex_lock(&exynos_drm_ippdrv_lock);
  116. list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
  117. mutex_unlock(&exynos_drm_ippdrv_lock);
  118. return 0;
  119. }
  120. int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
  121. {
  122. mutex_lock(&exynos_drm_ippdrv_lock);
  123. list_del(&ippdrv->drv_list);
  124. mutex_unlock(&exynos_drm_ippdrv_lock);
  125. return 0;
  126. }
  127. static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
  128. {
  129. int ret;
  130. mutex_lock(lock);
  131. ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
  132. mutex_unlock(lock);
  133. return ret;
  134. }
  135. static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
  136. {
  137. mutex_lock(lock);
  138. idr_remove(id_idr, id);
  139. mutex_unlock(lock);
  140. }
  141. static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
  142. {
  143. void *obj;
  144. mutex_lock(lock);
  145. obj = idr_find(id_idr, id);
  146. mutex_unlock(lock);
  147. return obj;
  148. }
  149. static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
  150. struct drm_exynos_ipp_property *property)
  151. {
  152. if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
  153. !pm_runtime_suspended(ippdrv->dev)))
  154. return -EBUSY;
  155. if (ippdrv->check_property &&
  156. ippdrv->check_property(ippdrv->dev, property))
  157. return -EINVAL;
  158. return 0;
  159. }
  160. static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
  161. struct drm_exynos_ipp_property *property)
  162. {
  163. struct exynos_drm_ippdrv *ippdrv;
  164. u32 ipp_id = property->ipp_id;
  165. int ret;
  166. if (ipp_id) {
  167. ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
  168. if (!ippdrv) {
  169. DRM_DEBUG("ipp%d driver not found\n", ipp_id);
  170. return ERR_PTR(-ENODEV);
  171. }
  172. ret = ipp_check_driver(ippdrv, property);
  173. if (ret < 0) {
  174. DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
  175. return ERR_PTR(ret);
  176. }
  177. return ippdrv;
  178. } else {
  179. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  180. ret = ipp_check_driver(ippdrv, property);
  181. if (ret == 0)
  182. return ippdrv;
  183. }
  184. DRM_DEBUG("cannot find driver suitable for given property.\n");
  185. }
  186. return ERR_PTR(-ENODEV);
  187. }
  188. static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
  189. {
  190. struct exynos_drm_ippdrv *ippdrv;
  191. struct drm_exynos_ipp_cmd_node *c_node;
  192. int count = 0;
  193. DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
  194. /*
  195. * This case is search ipp driver by prop_id handle.
  196. * sometimes, ipp subsystem find driver by prop_id.
  197. * e.g PAUSE state, queue buf, command control.
  198. */
  199. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  200. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
  201. mutex_lock(&ippdrv->cmd_lock);
  202. list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
  203. if (c_node->property.prop_id == prop_id) {
  204. mutex_unlock(&ippdrv->cmd_lock);
  205. return ippdrv;
  206. }
  207. }
  208. mutex_unlock(&ippdrv->cmd_lock);
  209. }
  210. return ERR_PTR(-ENODEV);
  211. }
  212. int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
  213. struct drm_file *file)
  214. {
  215. struct drm_exynos_file_private *file_priv = file->driver_priv;
  216. struct device *dev = file_priv->ipp_dev;
  217. struct ipp_context *ctx = get_ipp_context(dev);
  218. struct drm_exynos_ipp_prop_list *prop_list = data;
  219. struct exynos_drm_ippdrv *ippdrv;
  220. int count = 0;
  221. if (!ctx) {
  222. DRM_ERROR("invalid context.\n");
  223. return -EINVAL;
  224. }
  225. if (!prop_list) {
  226. DRM_ERROR("invalid property parameter.\n");
  227. return -EINVAL;
  228. }
  229. DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
  230. if (!prop_list->ipp_id) {
  231. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
  232. count++;
  233. /*
  234. * Supports ippdrv list count for user application.
  235. * First step user application getting ippdrv count.
  236. * and second step getting ippdrv capability using ipp_id.
  237. */
  238. prop_list->count = count;
  239. } else {
  240. /*
  241. * Getting ippdrv capability by ipp_id.
  242. * some device not supported wb, output interface.
  243. * so, user application detect correct ipp driver
  244. * using this ioctl.
  245. */
  246. ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
  247. prop_list->ipp_id);
  248. if (!ippdrv) {
  249. DRM_ERROR("not found ipp%d driver.\n",
  250. prop_list->ipp_id);
  251. return -ENODEV;
  252. }
  253. *prop_list = ippdrv->prop_list;
  254. }
  255. return 0;
  256. }
  257. static void ipp_print_property(struct drm_exynos_ipp_property *property,
  258. int idx)
  259. {
  260. struct drm_exynos_ipp_config *config = &property->config[idx];
  261. struct drm_exynos_pos *pos = &config->pos;
  262. struct drm_exynos_sz *sz = &config->sz;
  263. DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
  264. property->prop_id, idx ? "dst" : "src", config->fmt);
  265. DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
  266. pos->x, pos->y, pos->w, pos->h,
  267. sz->hsize, sz->vsize, config->flip, config->degree);
  268. }
  269. static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
  270. {
  271. struct drm_exynos_ipp_cmd_work *cmd_work;
  272. cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
  273. if (!cmd_work)
  274. return ERR_PTR(-ENOMEM);
  275. INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
  276. return cmd_work;
  277. }
  278. static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
  279. {
  280. struct drm_exynos_ipp_event_work *event_work;
  281. event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
  282. if (!event_work)
  283. return ERR_PTR(-ENOMEM);
  284. INIT_WORK(&event_work->work, ipp_sched_event);
  285. return event_work;
  286. }
  287. int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
  288. struct drm_file *file)
  289. {
  290. struct drm_exynos_file_private *file_priv = file->driver_priv;
  291. struct device *dev = file_priv->ipp_dev;
  292. struct ipp_context *ctx = get_ipp_context(dev);
  293. struct drm_exynos_ipp_property *property = data;
  294. struct exynos_drm_ippdrv *ippdrv;
  295. struct drm_exynos_ipp_cmd_node *c_node;
  296. u32 prop_id;
  297. int ret, i;
  298. if (!ctx) {
  299. DRM_ERROR("invalid context.\n");
  300. return -EINVAL;
  301. }
  302. if (!property) {
  303. DRM_ERROR("invalid property parameter.\n");
  304. return -EINVAL;
  305. }
  306. prop_id = property->prop_id;
  307. /*
  308. * This is log print for user application property.
  309. * user application set various property.
  310. */
  311. for_each_ipp_ops(i)
  312. ipp_print_property(property, i);
  313. /*
  314. * In case prop_id is not zero try to set existing property.
  315. */
  316. if (prop_id) {
  317. c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
  318. if (!c_node || c_node->filp != file) {
  319. DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
  320. return -EINVAL;
  321. }
  322. if (c_node->state != IPP_STATE_STOP) {
  323. DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
  324. return -EINVAL;
  325. }
  326. c_node->property = *property;
  327. return 0;
  328. }
  329. /* find ipp driver using ipp id */
  330. ippdrv = ipp_find_driver(ctx, property);
  331. if (IS_ERR(ippdrv)) {
  332. DRM_ERROR("failed to get ipp driver.\n");
  333. return -EINVAL;
  334. }
  335. /* allocate command node */
  336. c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
  337. if (!c_node)
  338. return -ENOMEM;
  339. ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
  340. if (ret < 0) {
  341. DRM_ERROR("failed to create id.\n");
  342. goto err_clear;
  343. }
  344. property->prop_id = ret;
  345. DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
  346. property->prop_id, property->cmd, (int)ippdrv);
  347. /* stored property information and ippdrv in private data */
  348. c_node->property = *property;
  349. c_node->state = IPP_STATE_IDLE;
  350. c_node->filp = file;
  351. c_node->start_work = ipp_create_cmd_work();
  352. if (IS_ERR(c_node->start_work)) {
  353. DRM_ERROR("failed to create start work.\n");
  354. ret = PTR_ERR(c_node->start_work);
  355. goto err_remove_id;
  356. }
  357. c_node->stop_work = ipp_create_cmd_work();
  358. if (IS_ERR(c_node->stop_work)) {
  359. DRM_ERROR("failed to create stop work.\n");
  360. ret = PTR_ERR(c_node->stop_work);
  361. goto err_free_start;
  362. }
  363. c_node->event_work = ipp_create_event_work();
  364. if (IS_ERR(c_node->event_work)) {
  365. DRM_ERROR("failed to create event work.\n");
  366. ret = PTR_ERR(c_node->event_work);
  367. goto err_free_stop;
  368. }
  369. mutex_init(&c_node->lock);
  370. mutex_init(&c_node->mem_lock);
  371. mutex_init(&c_node->event_lock);
  372. init_completion(&c_node->start_complete);
  373. init_completion(&c_node->stop_complete);
  374. for_each_ipp_ops(i)
  375. INIT_LIST_HEAD(&c_node->mem_list[i]);
  376. INIT_LIST_HEAD(&c_node->event_list);
  377. mutex_lock(&ippdrv->cmd_lock);
  378. list_add_tail(&c_node->list, &ippdrv->cmd_list);
  379. mutex_unlock(&ippdrv->cmd_lock);
  380. /* make dedicated state without m2m */
  381. if (!ipp_is_m2m_cmd(property->cmd))
  382. ippdrv->dedicated = true;
  383. return 0;
  384. err_free_stop:
  385. kfree(c_node->stop_work);
  386. err_free_start:
  387. kfree(c_node->start_work);
  388. err_remove_id:
  389. ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
  390. err_clear:
  391. kfree(c_node);
  392. return ret;
  393. }
  394. static int ipp_put_mem_node(struct drm_device *drm_dev,
  395. struct drm_exynos_ipp_cmd_node *c_node,
  396. struct drm_exynos_ipp_mem_node *m_node)
  397. {
  398. int i;
  399. DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
  400. if (!m_node) {
  401. DRM_ERROR("invalid dequeue node.\n");
  402. return -EFAULT;
  403. }
  404. DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
  405. /* put gem buffer */
  406. for_each_ipp_planar(i) {
  407. unsigned long handle = m_node->buf_info.handles[i];
  408. if (handle)
  409. exynos_drm_gem_put_dma_addr(drm_dev, handle,
  410. c_node->filp);
  411. }
  412. list_del(&m_node->list);
  413. kfree(m_node);
  414. return 0;
  415. }
  416. static struct drm_exynos_ipp_mem_node
  417. *ipp_get_mem_node(struct drm_device *drm_dev,
  418. struct drm_exynos_ipp_cmd_node *c_node,
  419. struct drm_exynos_ipp_queue_buf *qbuf)
  420. {
  421. struct drm_exynos_ipp_mem_node *m_node;
  422. struct drm_exynos_ipp_buf_info *buf_info;
  423. int i;
  424. m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
  425. if (!m_node)
  426. return ERR_PTR(-ENOMEM);
  427. buf_info = &m_node->buf_info;
  428. /* operations, buffer id */
  429. m_node->ops_id = qbuf->ops_id;
  430. m_node->prop_id = qbuf->prop_id;
  431. m_node->buf_id = qbuf->buf_id;
  432. INIT_LIST_HEAD(&m_node->list);
  433. DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
  434. DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
  435. for_each_ipp_planar(i) {
  436. DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
  437. /* get dma address by handle */
  438. if (qbuf->handle[i]) {
  439. dma_addr_t *addr;
  440. addr = exynos_drm_gem_get_dma_addr(drm_dev,
  441. qbuf->handle[i], c_node->filp);
  442. if (IS_ERR(addr)) {
  443. DRM_ERROR("failed to get addr.\n");
  444. ipp_put_mem_node(drm_dev, c_node, m_node);
  445. return ERR_PTR(-EFAULT);
  446. }
  447. buf_info->handles[i] = qbuf->handle[i];
  448. buf_info->base[i] = *addr;
  449. DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
  450. buf_info->base[i], buf_info->handles[i]);
  451. }
  452. }
  453. mutex_lock(&c_node->mem_lock);
  454. list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
  455. mutex_unlock(&c_node->mem_lock);
  456. return m_node;
  457. }
  458. static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
  459. struct drm_exynos_ipp_cmd_node *c_node, int ops)
  460. {
  461. struct drm_exynos_ipp_mem_node *m_node, *tm_node;
  462. struct list_head *head = &c_node->mem_list[ops];
  463. mutex_lock(&c_node->mem_lock);
  464. list_for_each_entry_safe(m_node, tm_node, head, list) {
  465. int ret;
  466. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  467. if (ret)
  468. DRM_ERROR("failed to put m_node.\n");
  469. }
  470. mutex_unlock(&c_node->mem_lock);
  471. }
  472. static void ipp_free_event(struct drm_pending_event *event)
  473. {
  474. kfree(event);
  475. }
  476. static int ipp_get_event(struct drm_device *drm_dev,
  477. struct drm_exynos_ipp_cmd_node *c_node,
  478. struct drm_exynos_ipp_queue_buf *qbuf)
  479. {
  480. struct drm_exynos_ipp_send_event *e;
  481. unsigned long flags;
  482. DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
  483. e = kzalloc(sizeof(*e), GFP_KERNEL);
  484. if (!e) {
  485. spin_lock_irqsave(&drm_dev->event_lock, flags);
  486. c_node->filp->event_space += sizeof(e->event);
  487. spin_unlock_irqrestore(&drm_dev->event_lock, flags);
  488. return -ENOMEM;
  489. }
  490. /* make event */
  491. e->event.base.type = DRM_EXYNOS_IPP_EVENT;
  492. e->event.base.length = sizeof(e->event);
  493. e->event.user_data = qbuf->user_data;
  494. e->event.prop_id = qbuf->prop_id;
  495. e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
  496. e->base.event = &e->event.base;
  497. e->base.file_priv = c_node->filp;
  498. e->base.destroy = ipp_free_event;
  499. mutex_lock(&c_node->event_lock);
  500. list_add_tail(&e->base.link, &c_node->event_list);
  501. mutex_unlock(&c_node->event_lock);
  502. return 0;
  503. }
  504. static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
  505. struct drm_exynos_ipp_queue_buf *qbuf)
  506. {
  507. struct drm_exynos_ipp_send_event *e, *te;
  508. int count = 0;
  509. mutex_lock(&c_node->event_lock);
  510. list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
  511. DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
  512. /*
  513. * qbuf == NULL condition means all event deletion.
  514. * stop operations want to delete all event list.
  515. * another case delete only same buf id.
  516. */
  517. if (!qbuf) {
  518. /* delete list */
  519. list_del(&e->base.link);
  520. kfree(e);
  521. }
  522. /* compare buffer id */
  523. if (qbuf && (qbuf->buf_id ==
  524. e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
  525. /* delete list */
  526. list_del(&e->base.link);
  527. kfree(e);
  528. goto out_unlock;
  529. }
  530. }
  531. out_unlock:
  532. mutex_unlock(&c_node->event_lock);
  533. return;
  534. }
  535. static void ipp_clean_cmd_node(struct ipp_context *ctx,
  536. struct drm_exynos_ipp_cmd_node *c_node)
  537. {
  538. int i;
  539. /* cancel works */
  540. cancel_work_sync(&c_node->start_work->work);
  541. cancel_work_sync(&c_node->stop_work->work);
  542. cancel_work_sync(&c_node->event_work->work);
  543. /* put event */
  544. ipp_put_event(c_node, NULL);
  545. for_each_ipp_ops(i)
  546. ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
  547. /* delete list */
  548. list_del(&c_node->list);
  549. ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
  550. c_node->property.prop_id);
  551. /* destroy mutex */
  552. mutex_destroy(&c_node->lock);
  553. mutex_destroy(&c_node->mem_lock);
  554. mutex_destroy(&c_node->event_lock);
  555. /* free command node */
  556. kfree(c_node->start_work);
  557. kfree(c_node->stop_work);
  558. kfree(c_node->event_work);
  559. kfree(c_node);
  560. }
  561. static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
  562. {
  563. switch (c_node->property.cmd) {
  564. case IPP_CMD_WB:
  565. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
  566. case IPP_CMD_OUTPUT:
  567. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
  568. case IPP_CMD_M2M:
  569. default:
  570. return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
  571. !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
  572. }
  573. }
  574. static struct drm_exynos_ipp_mem_node
  575. *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
  576. struct drm_exynos_ipp_queue_buf *qbuf)
  577. {
  578. struct drm_exynos_ipp_mem_node *m_node;
  579. struct list_head *head;
  580. int count = 0;
  581. DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
  582. /* source/destination memory list */
  583. head = &c_node->mem_list[qbuf->ops_id];
  584. /* find memory node from memory list */
  585. list_for_each_entry(m_node, head, list) {
  586. DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
  587. /* compare buffer id */
  588. if (m_node->buf_id == qbuf->buf_id)
  589. return m_node;
  590. }
  591. return NULL;
  592. }
  593. static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
  594. struct drm_exynos_ipp_cmd_node *c_node,
  595. struct drm_exynos_ipp_mem_node *m_node)
  596. {
  597. struct exynos_drm_ipp_ops *ops = NULL;
  598. int ret = 0;
  599. DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
  600. if (!m_node) {
  601. DRM_ERROR("invalid queue node.\n");
  602. return -EFAULT;
  603. }
  604. DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
  605. /* get operations callback */
  606. ops = ippdrv->ops[m_node->ops_id];
  607. if (!ops) {
  608. DRM_ERROR("not support ops.\n");
  609. return -EFAULT;
  610. }
  611. /* set address and enable irq */
  612. if (ops->set_addr) {
  613. ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
  614. m_node->buf_id, IPP_BUF_ENQUEUE);
  615. if (ret) {
  616. DRM_ERROR("failed to set addr.\n");
  617. return ret;
  618. }
  619. }
  620. return ret;
  621. }
  622. static void ipp_handle_cmd_work(struct device *dev,
  623. struct exynos_drm_ippdrv *ippdrv,
  624. struct drm_exynos_ipp_cmd_work *cmd_work,
  625. struct drm_exynos_ipp_cmd_node *c_node)
  626. {
  627. struct ipp_context *ctx = get_ipp_context(dev);
  628. cmd_work->ippdrv = ippdrv;
  629. cmd_work->c_node = c_node;
  630. queue_work(ctx->cmd_workq, &cmd_work->work);
  631. }
  632. static int ipp_queue_buf_with_run(struct device *dev,
  633. struct drm_exynos_ipp_cmd_node *c_node,
  634. struct drm_exynos_ipp_mem_node *m_node,
  635. struct drm_exynos_ipp_queue_buf *qbuf)
  636. {
  637. struct exynos_drm_ippdrv *ippdrv;
  638. struct drm_exynos_ipp_property *property;
  639. struct exynos_drm_ipp_ops *ops;
  640. int ret;
  641. ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
  642. if (IS_ERR(ippdrv)) {
  643. DRM_ERROR("failed to get ipp driver.\n");
  644. return -EFAULT;
  645. }
  646. ops = ippdrv->ops[qbuf->ops_id];
  647. if (!ops) {
  648. DRM_ERROR("failed to get ops.\n");
  649. return -EFAULT;
  650. }
  651. property = &c_node->property;
  652. if (c_node->state != IPP_STATE_START) {
  653. DRM_DEBUG_KMS("bypass for invalid state.\n");
  654. return 0;
  655. }
  656. mutex_lock(&c_node->mem_lock);
  657. if (!ipp_check_mem_list(c_node)) {
  658. mutex_unlock(&c_node->mem_lock);
  659. DRM_DEBUG_KMS("empty memory.\n");
  660. return 0;
  661. }
  662. /*
  663. * If set destination buffer and enabled clock,
  664. * then m2m operations need start operations at queue_buf
  665. */
  666. if (ipp_is_m2m_cmd(property->cmd)) {
  667. struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
  668. cmd_work->ctrl = IPP_CTRL_PLAY;
  669. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  670. } else {
  671. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  672. if (ret) {
  673. mutex_unlock(&c_node->mem_lock);
  674. DRM_ERROR("failed to set m node.\n");
  675. return ret;
  676. }
  677. }
  678. mutex_unlock(&c_node->mem_lock);
  679. return 0;
  680. }
  681. static void ipp_clean_queue_buf(struct drm_device *drm_dev,
  682. struct drm_exynos_ipp_cmd_node *c_node,
  683. struct drm_exynos_ipp_queue_buf *qbuf)
  684. {
  685. struct drm_exynos_ipp_mem_node *m_node, *tm_node;
  686. /* delete list */
  687. mutex_lock(&c_node->mem_lock);
  688. list_for_each_entry_safe(m_node, tm_node,
  689. &c_node->mem_list[qbuf->ops_id], list) {
  690. if (m_node->buf_id == qbuf->buf_id &&
  691. m_node->ops_id == qbuf->ops_id)
  692. ipp_put_mem_node(drm_dev, c_node, m_node);
  693. }
  694. mutex_unlock(&c_node->mem_lock);
  695. }
  696. int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
  697. struct drm_file *file)
  698. {
  699. struct drm_exynos_file_private *file_priv = file->driver_priv;
  700. struct device *dev = file_priv->ipp_dev;
  701. struct ipp_context *ctx = get_ipp_context(dev);
  702. struct drm_exynos_ipp_queue_buf *qbuf = data;
  703. struct drm_exynos_ipp_cmd_node *c_node;
  704. struct drm_exynos_ipp_mem_node *m_node;
  705. int ret;
  706. if (!qbuf) {
  707. DRM_ERROR("invalid buf parameter.\n");
  708. return -EINVAL;
  709. }
  710. if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
  711. DRM_ERROR("invalid ops parameter.\n");
  712. return -EINVAL;
  713. }
  714. DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
  715. qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
  716. qbuf->buf_id, qbuf->buf_type);
  717. /* find command node */
  718. c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
  719. qbuf->prop_id);
  720. if (!c_node || c_node->filp != file) {
  721. DRM_ERROR("failed to get command node.\n");
  722. return -ENODEV;
  723. }
  724. /* buffer control */
  725. switch (qbuf->buf_type) {
  726. case IPP_BUF_ENQUEUE:
  727. /* get memory node */
  728. m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
  729. if (IS_ERR(m_node)) {
  730. DRM_ERROR("failed to get m_node.\n");
  731. return PTR_ERR(m_node);
  732. }
  733. /*
  734. * first step get event for destination buffer.
  735. * and second step when M2M case run with destination buffer
  736. * if needed.
  737. */
  738. if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
  739. /* get event for destination buffer */
  740. ret = ipp_get_event(drm_dev, c_node, qbuf);
  741. if (ret) {
  742. DRM_ERROR("failed to get event.\n");
  743. goto err_clean_node;
  744. }
  745. /*
  746. * M2M case run play control for streaming feature.
  747. * other case set address and waiting.
  748. */
  749. ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
  750. if (ret) {
  751. DRM_ERROR("failed to run command.\n");
  752. goto err_clean_node;
  753. }
  754. }
  755. break;
  756. case IPP_BUF_DEQUEUE:
  757. mutex_lock(&c_node->lock);
  758. /* put event for destination buffer */
  759. if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
  760. ipp_put_event(c_node, qbuf);
  761. ipp_clean_queue_buf(drm_dev, c_node, qbuf);
  762. mutex_unlock(&c_node->lock);
  763. break;
  764. default:
  765. DRM_ERROR("invalid buffer control.\n");
  766. return -EINVAL;
  767. }
  768. return 0;
  769. err_clean_node:
  770. DRM_ERROR("clean memory nodes.\n");
  771. ipp_clean_queue_buf(drm_dev, c_node, qbuf);
  772. return ret;
  773. }
  774. static bool exynos_drm_ipp_check_valid(struct device *dev,
  775. enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
  776. {
  777. if (ctrl != IPP_CTRL_PLAY) {
  778. if (pm_runtime_suspended(dev)) {
  779. DRM_ERROR("pm:runtime_suspended.\n");
  780. goto err_status;
  781. }
  782. }
  783. switch (ctrl) {
  784. case IPP_CTRL_PLAY:
  785. if (state != IPP_STATE_IDLE)
  786. goto err_status;
  787. break;
  788. case IPP_CTRL_STOP:
  789. if (state == IPP_STATE_STOP)
  790. goto err_status;
  791. break;
  792. case IPP_CTRL_PAUSE:
  793. if (state != IPP_STATE_START)
  794. goto err_status;
  795. break;
  796. case IPP_CTRL_RESUME:
  797. if (state != IPP_STATE_STOP)
  798. goto err_status;
  799. break;
  800. default:
  801. DRM_ERROR("invalid state.\n");
  802. goto err_status;
  803. }
  804. return true;
  805. err_status:
  806. DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
  807. return false;
  808. }
  809. int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
  810. struct drm_file *file)
  811. {
  812. struct drm_exynos_file_private *file_priv = file->driver_priv;
  813. struct exynos_drm_ippdrv *ippdrv = NULL;
  814. struct device *dev = file_priv->ipp_dev;
  815. struct ipp_context *ctx = get_ipp_context(dev);
  816. struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
  817. struct drm_exynos_ipp_cmd_work *cmd_work;
  818. struct drm_exynos_ipp_cmd_node *c_node;
  819. if (!ctx) {
  820. DRM_ERROR("invalid context.\n");
  821. return -EINVAL;
  822. }
  823. if (!cmd_ctrl) {
  824. DRM_ERROR("invalid control parameter.\n");
  825. return -EINVAL;
  826. }
  827. DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
  828. cmd_ctrl->ctrl, cmd_ctrl->prop_id);
  829. ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
  830. if (IS_ERR(ippdrv)) {
  831. DRM_ERROR("failed to get ipp driver.\n");
  832. return PTR_ERR(ippdrv);
  833. }
  834. c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
  835. cmd_ctrl->prop_id);
  836. if (!c_node || c_node->filp != file) {
  837. DRM_ERROR("invalid command node list.\n");
  838. return -ENODEV;
  839. }
  840. if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
  841. c_node->state)) {
  842. DRM_ERROR("invalid state.\n");
  843. return -EINVAL;
  844. }
  845. switch (cmd_ctrl->ctrl) {
  846. case IPP_CTRL_PLAY:
  847. if (pm_runtime_suspended(ippdrv->dev))
  848. pm_runtime_get_sync(ippdrv->dev);
  849. c_node->state = IPP_STATE_START;
  850. cmd_work = c_node->start_work;
  851. cmd_work->ctrl = cmd_ctrl->ctrl;
  852. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  853. break;
  854. case IPP_CTRL_STOP:
  855. cmd_work = c_node->stop_work;
  856. cmd_work->ctrl = cmd_ctrl->ctrl;
  857. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  858. if (!wait_for_completion_timeout(&c_node->stop_complete,
  859. msecs_to_jiffies(300))) {
  860. DRM_ERROR("timeout stop:prop_id[%d]\n",
  861. c_node->property.prop_id);
  862. }
  863. c_node->state = IPP_STATE_STOP;
  864. ippdrv->dedicated = false;
  865. mutex_lock(&ippdrv->cmd_lock);
  866. ipp_clean_cmd_node(ctx, c_node);
  867. if (list_empty(&ippdrv->cmd_list))
  868. pm_runtime_put_sync(ippdrv->dev);
  869. mutex_unlock(&ippdrv->cmd_lock);
  870. break;
  871. case IPP_CTRL_PAUSE:
  872. cmd_work = c_node->stop_work;
  873. cmd_work->ctrl = cmd_ctrl->ctrl;
  874. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  875. if (!wait_for_completion_timeout(&c_node->stop_complete,
  876. msecs_to_jiffies(200))) {
  877. DRM_ERROR("timeout stop:prop_id[%d]\n",
  878. c_node->property.prop_id);
  879. }
  880. c_node->state = IPP_STATE_STOP;
  881. break;
  882. case IPP_CTRL_RESUME:
  883. c_node->state = IPP_STATE_START;
  884. cmd_work = c_node->start_work;
  885. cmd_work->ctrl = cmd_ctrl->ctrl;
  886. ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
  887. break;
  888. default:
  889. DRM_ERROR("could not support this state currently.\n");
  890. return -EINVAL;
  891. }
  892. DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
  893. cmd_ctrl->ctrl, cmd_ctrl->prop_id);
  894. return 0;
  895. }
  896. int exynos_drm_ippnb_register(struct notifier_block *nb)
  897. {
  898. return blocking_notifier_chain_register(
  899. &exynos_drm_ippnb_list, nb);
  900. }
  901. int exynos_drm_ippnb_unregister(struct notifier_block *nb)
  902. {
  903. return blocking_notifier_chain_unregister(
  904. &exynos_drm_ippnb_list, nb);
  905. }
  906. int exynos_drm_ippnb_send_event(unsigned long val, void *v)
  907. {
  908. return blocking_notifier_call_chain(
  909. &exynos_drm_ippnb_list, val, v);
  910. }
  911. static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
  912. struct drm_exynos_ipp_property *property)
  913. {
  914. struct exynos_drm_ipp_ops *ops = NULL;
  915. bool swap = false;
  916. int ret, i;
  917. if (!property) {
  918. DRM_ERROR("invalid property parameter.\n");
  919. return -EINVAL;
  920. }
  921. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  922. /* reset h/w block */
  923. if (ippdrv->reset &&
  924. ippdrv->reset(ippdrv->dev)) {
  925. return -EINVAL;
  926. }
  927. /* set source,destination operations */
  928. for_each_ipp_ops(i) {
  929. struct drm_exynos_ipp_config *config =
  930. &property->config[i];
  931. ops = ippdrv->ops[i];
  932. if (!ops || !config) {
  933. DRM_ERROR("not support ops and config.\n");
  934. return -EINVAL;
  935. }
  936. /* set format */
  937. if (ops->set_fmt) {
  938. ret = ops->set_fmt(ippdrv->dev, config->fmt);
  939. if (ret)
  940. return ret;
  941. }
  942. /* set transform for rotation, flip */
  943. if (ops->set_transf) {
  944. ret = ops->set_transf(ippdrv->dev, config->degree,
  945. config->flip, &swap);
  946. if (ret)
  947. return ret;
  948. }
  949. /* set size */
  950. if (ops->set_size) {
  951. ret = ops->set_size(ippdrv->dev, swap, &config->pos,
  952. &config->sz);
  953. if (ret)
  954. return ret;
  955. }
  956. }
  957. return 0;
  958. }
  959. static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
  960. struct drm_exynos_ipp_cmd_node *c_node)
  961. {
  962. struct drm_exynos_ipp_mem_node *m_node;
  963. struct drm_exynos_ipp_property *property = &c_node->property;
  964. struct list_head *head;
  965. int ret, i;
  966. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  967. /* store command info in ippdrv */
  968. ippdrv->c_node = c_node;
  969. mutex_lock(&c_node->mem_lock);
  970. if (!ipp_check_mem_list(c_node)) {
  971. DRM_DEBUG_KMS("empty memory.\n");
  972. ret = -ENOMEM;
  973. goto err_unlock;
  974. }
  975. /* set current property in ippdrv */
  976. ret = ipp_set_property(ippdrv, property);
  977. if (ret) {
  978. DRM_ERROR("failed to set property.\n");
  979. ippdrv->c_node = NULL;
  980. goto err_unlock;
  981. }
  982. /* check command */
  983. switch (property->cmd) {
  984. case IPP_CMD_M2M:
  985. for_each_ipp_ops(i) {
  986. /* source/destination memory list */
  987. head = &c_node->mem_list[i];
  988. m_node = list_first_entry(head,
  989. struct drm_exynos_ipp_mem_node, list);
  990. DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
  991. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  992. if (ret) {
  993. DRM_ERROR("failed to set m node.\n");
  994. goto err_unlock;
  995. }
  996. }
  997. break;
  998. case IPP_CMD_WB:
  999. /* destination memory list */
  1000. head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
  1001. list_for_each_entry(m_node, head, list) {
  1002. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  1003. if (ret) {
  1004. DRM_ERROR("failed to set m node.\n");
  1005. goto err_unlock;
  1006. }
  1007. }
  1008. break;
  1009. case IPP_CMD_OUTPUT:
  1010. /* source memory list */
  1011. head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
  1012. list_for_each_entry(m_node, head, list) {
  1013. ret = ipp_set_mem_node(ippdrv, c_node, m_node);
  1014. if (ret) {
  1015. DRM_ERROR("failed to set m node.\n");
  1016. goto err_unlock;
  1017. }
  1018. }
  1019. break;
  1020. default:
  1021. DRM_ERROR("invalid operations.\n");
  1022. ret = -EINVAL;
  1023. goto err_unlock;
  1024. }
  1025. mutex_unlock(&c_node->mem_lock);
  1026. DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
  1027. /* start operations */
  1028. if (ippdrv->start) {
  1029. ret = ippdrv->start(ippdrv->dev, property->cmd);
  1030. if (ret) {
  1031. DRM_ERROR("failed to start ops.\n");
  1032. ippdrv->c_node = NULL;
  1033. return ret;
  1034. }
  1035. }
  1036. return 0;
  1037. err_unlock:
  1038. mutex_unlock(&c_node->mem_lock);
  1039. ippdrv->c_node = NULL;
  1040. return ret;
  1041. }
  1042. static int ipp_stop_property(struct drm_device *drm_dev,
  1043. struct exynos_drm_ippdrv *ippdrv,
  1044. struct drm_exynos_ipp_cmd_node *c_node)
  1045. {
  1046. struct drm_exynos_ipp_property *property = &c_node->property;
  1047. int i;
  1048. DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
  1049. /* stop operations */
  1050. if (ippdrv->stop)
  1051. ippdrv->stop(ippdrv->dev, property->cmd);
  1052. /* check command */
  1053. switch (property->cmd) {
  1054. case IPP_CMD_M2M:
  1055. for_each_ipp_ops(i)
  1056. ipp_clean_mem_nodes(drm_dev, c_node, i);
  1057. break;
  1058. case IPP_CMD_WB:
  1059. ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
  1060. break;
  1061. case IPP_CMD_OUTPUT:
  1062. ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
  1063. break;
  1064. default:
  1065. DRM_ERROR("invalid operations.\n");
  1066. return -EINVAL;
  1067. }
  1068. return 0;
  1069. }
  1070. void ipp_sched_cmd(struct work_struct *work)
  1071. {
  1072. struct drm_exynos_ipp_cmd_work *cmd_work =
  1073. container_of(work, struct drm_exynos_ipp_cmd_work, work);
  1074. struct exynos_drm_ippdrv *ippdrv;
  1075. struct drm_exynos_ipp_cmd_node *c_node;
  1076. struct drm_exynos_ipp_property *property;
  1077. int ret;
  1078. ippdrv = cmd_work->ippdrv;
  1079. if (!ippdrv) {
  1080. DRM_ERROR("invalid ippdrv list.\n");
  1081. return;
  1082. }
  1083. c_node = cmd_work->c_node;
  1084. if (!c_node) {
  1085. DRM_ERROR("invalid command node list.\n");
  1086. return;
  1087. }
  1088. mutex_lock(&c_node->lock);
  1089. property = &c_node->property;
  1090. switch (cmd_work->ctrl) {
  1091. case IPP_CTRL_PLAY:
  1092. case IPP_CTRL_RESUME:
  1093. ret = ipp_start_property(ippdrv, c_node);
  1094. if (ret) {
  1095. DRM_ERROR("failed to start property:prop_id[%d]\n",
  1096. c_node->property.prop_id);
  1097. goto err_unlock;
  1098. }
  1099. /*
  1100. * M2M case supports wait_completion of transfer.
  1101. * because M2M case supports single unit operation
  1102. * with multiple queue.
  1103. * M2M need to wait completion of data transfer.
  1104. */
  1105. if (ipp_is_m2m_cmd(property->cmd)) {
  1106. if (!wait_for_completion_timeout
  1107. (&c_node->start_complete, msecs_to_jiffies(200))) {
  1108. DRM_ERROR("timeout event:prop_id[%d]\n",
  1109. c_node->property.prop_id);
  1110. goto err_unlock;
  1111. }
  1112. }
  1113. break;
  1114. case IPP_CTRL_STOP:
  1115. case IPP_CTRL_PAUSE:
  1116. ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
  1117. c_node);
  1118. if (ret) {
  1119. DRM_ERROR("failed to stop property.\n");
  1120. goto err_unlock;
  1121. }
  1122. complete(&c_node->stop_complete);
  1123. break;
  1124. default:
  1125. DRM_ERROR("unknown control type\n");
  1126. break;
  1127. }
  1128. DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
  1129. err_unlock:
  1130. mutex_unlock(&c_node->lock);
  1131. }
  1132. static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
  1133. struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
  1134. {
  1135. struct drm_device *drm_dev = ippdrv->drm_dev;
  1136. struct drm_exynos_ipp_property *property = &c_node->property;
  1137. struct drm_exynos_ipp_mem_node *m_node;
  1138. struct drm_exynos_ipp_queue_buf qbuf;
  1139. struct drm_exynos_ipp_send_event *e;
  1140. struct list_head *head;
  1141. struct timeval now;
  1142. unsigned long flags;
  1143. u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
  1144. int ret, i;
  1145. for_each_ipp_ops(i)
  1146. DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
  1147. if (!drm_dev) {
  1148. DRM_ERROR("failed to get drm_dev.\n");
  1149. return -EINVAL;
  1150. }
  1151. if (!property) {
  1152. DRM_ERROR("failed to get property.\n");
  1153. return -EINVAL;
  1154. }
  1155. mutex_lock(&c_node->event_lock);
  1156. if (list_empty(&c_node->event_list)) {
  1157. DRM_DEBUG_KMS("event list is empty.\n");
  1158. ret = 0;
  1159. goto err_event_unlock;
  1160. }
  1161. mutex_lock(&c_node->mem_lock);
  1162. if (!ipp_check_mem_list(c_node)) {
  1163. DRM_DEBUG_KMS("empty memory.\n");
  1164. ret = 0;
  1165. goto err_mem_unlock;
  1166. }
  1167. /* check command */
  1168. switch (property->cmd) {
  1169. case IPP_CMD_M2M:
  1170. for_each_ipp_ops(i) {
  1171. /* source/destination memory list */
  1172. head = &c_node->mem_list[i];
  1173. m_node = list_first_entry(head,
  1174. struct drm_exynos_ipp_mem_node, list);
  1175. tbuf_id[i] = m_node->buf_id;
  1176. DRM_DEBUG_KMS("%s buf_id[%d]\n",
  1177. i ? "dst" : "src", tbuf_id[i]);
  1178. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1179. if (ret)
  1180. DRM_ERROR("failed to put m_node.\n");
  1181. }
  1182. break;
  1183. case IPP_CMD_WB:
  1184. /* clear buf for finding */
  1185. memset(&qbuf, 0x0, sizeof(qbuf));
  1186. qbuf.ops_id = EXYNOS_DRM_OPS_DST;
  1187. qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
  1188. /* get memory node entry */
  1189. m_node = ipp_find_mem_node(c_node, &qbuf);
  1190. if (!m_node) {
  1191. DRM_ERROR("empty memory node.\n");
  1192. ret = -ENOMEM;
  1193. goto err_mem_unlock;
  1194. }
  1195. tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
  1196. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1197. if (ret)
  1198. DRM_ERROR("failed to put m_node.\n");
  1199. break;
  1200. case IPP_CMD_OUTPUT:
  1201. /* source memory list */
  1202. head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
  1203. m_node = list_first_entry(head,
  1204. struct drm_exynos_ipp_mem_node, list);
  1205. tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
  1206. ret = ipp_put_mem_node(drm_dev, c_node, m_node);
  1207. if (ret)
  1208. DRM_ERROR("failed to put m_node.\n");
  1209. break;
  1210. default:
  1211. DRM_ERROR("invalid operations.\n");
  1212. ret = -EINVAL;
  1213. goto err_mem_unlock;
  1214. }
  1215. mutex_unlock(&c_node->mem_lock);
  1216. if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
  1217. DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
  1218. tbuf_id[1], buf_id[1], property->prop_id);
  1219. /*
  1220. * command node have event list of destination buffer
  1221. * If destination buffer enqueue to mem list,
  1222. * then we make event and link to event list tail.
  1223. * so, we get first event for first enqueued buffer.
  1224. */
  1225. e = list_first_entry(&c_node->event_list,
  1226. struct drm_exynos_ipp_send_event, base.link);
  1227. do_gettimeofday(&now);
  1228. DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
  1229. e->event.tv_sec = now.tv_sec;
  1230. e->event.tv_usec = now.tv_usec;
  1231. e->event.prop_id = property->prop_id;
  1232. /* set buffer id about source destination */
  1233. for_each_ipp_ops(i)
  1234. e->event.buf_id[i] = tbuf_id[i];
  1235. spin_lock_irqsave(&drm_dev->event_lock, flags);
  1236. list_move_tail(&e->base.link, &e->base.file_priv->event_list);
  1237. wake_up_interruptible(&e->base.file_priv->event_wait);
  1238. spin_unlock_irqrestore(&drm_dev->event_lock, flags);
  1239. mutex_unlock(&c_node->event_lock);
  1240. DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
  1241. property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
  1242. return 0;
  1243. err_mem_unlock:
  1244. mutex_unlock(&c_node->mem_lock);
  1245. err_event_unlock:
  1246. mutex_unlock(&c_node->event_lock);
  1247. return ret;
  1248. }
  1249. void ipp_sched_event(struct work_struct *work)
  1250. {
  1251. struct drm_exynos_ipp_event_work *event_work =
  1252. container_of(work, struct drm_exynos_ipp_event_work, work);
  1253. struct exynos_drm_ippdrv *ippdrv;
  1254. struct drm_exynos_ipp_cmd_node *c_node;
  1255. int ret;
  1256. if (!event_work) {
  1257. DRM_ERROR("failed to get event_work.\n");
  1258. return;
  1259. }
  1260. DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
  1261. ippdrv = event_work->ippdrv;
  1262. if (!ippdrv) {
  1263. DRM_ERROR("failed to get ipp driver.\n");
  1264. return;
  1265. }
  1266. c_node = ippdrv->c_node;
  1267. if (!c_node) {
  1268. DRM_ERROR("failed to get command node.\n");
  1269. return;
  1270. }
  1271. /*
  1272. * IPP supports command thread, event thread synchronization.
  1273. * If IPP close immediately from user land, then IPP make
  1274. * synchronization with command thread, so make complete event.
  1275. * or going out operations.
  1276. */
  1277. if (c_node->state != IPP_STATE_START) {
  1278. DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
  1279. c_node->state, c_node->property.prop_id);
  1280. goto err_completion;
  1281. }
  1282. ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
  1283. if (ret) {
  1284. DRM_ERROR("failed to send event.\n");
  1285. goto err_completion;
  1286. }
  1287. err_completion:
  1288. if (ipp_is_m2m_cmd(c_node->property.cmd))
  1289. complete(&c_node->start_complete);
  1290. }
  1291. static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
  1292. {
  1293. struct ipp_context *ctx = get_ipp_context(dev);
  1294. struct exynos_drm_ippdrv *ippdrv;
  1295. int ret, count = 0;
  1296. /* get ipp driver entry */
  1297. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  1298. ippdrv->drm_dev = drm_dev;
  1299. ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
  1300. if (ret < 0) {
  1301. DRM_ERROR("failed to create id.\n");
  1302. goto err;
  1303. }
  1304. ippdrv->prop_list.ipp_id = ret;
  1305. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
  1306. count++, (int)ippdrv, ret);
  1307. /* store parent device for node */
  1308. ippdrv->parent_dev = dev;
  1309. /* store event work queue and handler */
  1310. ippdrv->event_workq = ctx->event_workq;
  1311. ippdrv->sched_event = ipp_sched_event;
  1312. INIT_LIST_HEAD(&ippdrv->cmd_list);
  1313. mutex_init(&ippdrv->cmd_lock);
  1314. if (is_drm_iommu_supported(drm_dev)) {
  1315. ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
  1316. if (ret) {
  1317. DRM_ERROR("failed to activate iommu\n");
  1318. goto err;
  1319. }
  1320. }
  1321. }
  1322. return 0;
  1323. err:
  1324. /* get ipp driver entry */
  1325. list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
  1326. drv_list) {
  1327. if (is_drm_iommu_supported(drm_dev))
  1328. drm_iommu_detach_device(drm_dev, ippdrv->dev);
  1329. ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
  1330. ippdrv->prop_list.ipp_id);
  1331. }
  1332. return ret;
  1333. }
  1334. static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
  1335. {
  1336. struct exynos_drm_ippdrv *ippdrv, *t;
  1337. struct ipp_context *ctx = get_ipp_context(dev);
  1338. /* get ipp driver entry */
  1339. list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
  1340. if (is_drm_iommu_supported(drm_dev))
  1341. drm_iommu_detach_device(drm_dev, ippdrv->dev);
  1342. ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
  1343. ippdrv->prop_list.ipp_id);
  1344. ippdrv->drm_dev = NULL;
  1345. exynos_drm_ippdrv_unregister(ippdrv);
  1346. }
  1347. }
  1348. static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
  1349. struct drm_file *file)
  1350. {
  1351. struct drm_exynos_file_private *file_priv = file->driver_priv;
  1352. file_priv->ipp_dev = dev;
  1353. DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
  1354. return 0;
  1355. }
  1356. static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
  1357. struct drm_file *file)
  1358. {
  1359. struct exynos_drm_ippdrv *ippdrv = NULL;
  1360. struct ipp_context *ctx = get_ipp_context(dev);
  1361. struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
  1362. int count = 0;
  1363. list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
  1364. mutex_lock(&ippdrv->cmd_lock);
  1365. list_for_each_entry_safe(c_node, tc_node,
  1366. &ippdrv->cmd_list, list) {
  1367. DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
  1368. count++, (int)ippdrv);
  1369. if (c_node->filp == file) {
  1370. /*
  1371. * userland goto unnormal state. process killed.
  1372. * and close the file.
  1373. * so, IPP didn't called stop cmd ctrl.
  1374. * so, we are make stop operation in this state.
  1375. */
  1376. if (c_node->state == IPP_STATE_START) {
  1377. ipp_stop_property(drm_dev, ippdrv,
  1378. c_node);
  1379. c_node->state = IPP_STATE_STOP;
  1380. }
  1381. ippdrv->dedicated = false;
  1382. ipp_clean_cmd_node(ctx, c_node);
  1383. if (list_empty(&ippdrv->cmd_list))
  1384. pm_runtime_put_sync(ippdrv->dev);
  1385. }
  1386. }
  1387. mutex_unlock(&ippdrv->cmd_lock);
  1388. }
  1389. return;
  1390. }
  1391. static int ipp_probe(struct platform_device *pdev)
  1392. {
  1393. struct device *dev = &pdev->dev;
  1394. struct ipp_context *ctx;
  1395. struct exynos_drm_subdrv *subdrv;
  1396. int ret;
  1397. ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
  1398. if (!ctx)
  1399. return -ENOMEM;
  1400. mutex_init(&ctx->ipp_lock);
  1401. mutex_init(&ctx->prop_lock);
  1402. idr_init(&ctx->ipp_idr);
  1403. idr_init(&ctx->prop_idr);
  1404. /*
  1405. * create single thread for ipp event
  1406. * IPP supports event thread for IPP drivers.
  1407. * IPP driver send event_work to this thread.
  1408. * and IPP event thread send event to user process.
  1409. */
  1410. ctx->event_workq = create_singlethread_workqueue("ipp_event");
  1411. if (!ctx->event_workq) {
  1412. dev_err(dev, "failed to create event workqueue\n");
  1413. return -EINVAL;
  1414. }
  1415. /*
  1416. * create single thread for ipp command
  1417. * IPP supports command thread for user process.
  1418. * user process make command node using set property ioctl.
  1419. * and make start_work and send this work to command thread.
  1420. * and then this command thread start property.
  1421. */
  1422. ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
  1423. if (!ctx->cmd_workq) {
  1424. dev_err(dev, "failed to create cmd workqueue\n");
  1425. ret = -EINVAL;
  1426. goto err_event_workq;
  1427. }
  1428. /* set sub driver informations */
  1429. subdrv = &ctx->subdrv;
  1430. subdrv->dev = dev;
  1431. subdrv->probe = ipp_subdrv_probe;
  1432. subdrv->remove = ipp_subdrv_remove;
  1433. subdrv->open = ipp_subdrv_open;
  1434. subdrv->close = ipp_subdrv_close;
  1435. platform_set_drvdata(pdev, ctx);
  1436. ret = exynos_drm_subdrv_register(subdrv);
  1437. if (ret < 0) {
  1438. DRM_ERROR("failed to register drm ipp device.\n");
  1439. goto err_cmd_workq;
  1440. }
  1441. dev_info(dev, "drm ipp registered successfully.\n");
  1442. return 0;
  1443. err_cmd_workq:
  1444. destroy_workqueue(ctx->cmd_workq);
  1445. err_event_workq:
  1446. destroy_workqueue(ctx->event_workq);
  1447. return ret;
  1448. }
  1449. static int ipp_remove(struct platform_device *pdev)
  1450. {
  1451. struct ipp_context *ctx = platform_get_drvdata(pdev);
  1452. /* unregister sub driver */
  1453. exynos_drm_subdrv_unregister(&ctx->subdrv);
  1454. /* remove,destroy ipp idr */
  1455. idr_destroy(&ctx->ipp_idr);
  1456. idr_destroy(&ctx->prop_idr);
  1457. mutex_destroy(&ctx->ipp_lock);
  1458. mutex_destroy(&ctx->prop_lock);
  1459. /* destroy command, event work queue */
  1460. destroy_workqueue(ctx->cmd_workq);
  1461. destroy_workqueue(ctx->event_workq);
  1462. return 0;
  1463. }
  1464. struct platform_driver ipp_driver = {
  1465. .probe = ipp_probe,
  1466. .remove = ipp_remove,
  1467. .driver = {
  1468. .name = "exynos-drm-ipp",
  1469. .owner = THIS_MODULE,
  1470. },
  1471. };