ipu-common.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556
  1. /*
  2. * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
  3. * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License as published by the
  7. * Free Software Foundation; either version 2 of the License, or (at your
  8. * option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. * for more details.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/export.h>
  17. #include <linux/types.h>
  18. #include <linux/reset.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/err.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/delay.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/io.h>
  25. #include <linux/clk.h>
  26. #include <linux/list.h>
  27. #include <linux/irq.h>
  28. #include <linux/irqchip/chained_irq.h>
  29. #include <linux/irqdomain.h>
  30. #include <linux/of_device.h>
  31. #include <linux/of_graph.h>
  32. #include <drm/drm_fourcc.h>
  33. #include <video/imx-ipu-v3.h>
  34. #include "ipu-prv.h"
  35. static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
  36. {
  37. return readl(ipu->cm_reg + offset);
  38. }
  39. static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
  40. {
  41. writel(value, ipu->cm_reg + offset);
  42. }
  43. int ipu_get_num(struct ipu_soc *ipu)
  44. {
  45. return ipu->id;
  46. }
  47. EXPORT_SYMBOL_GPL(ipu_get_num);
  48. void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
  49. {
  50. u32 val;
  51. val = ipu_cm_read(ipu, IPU_SRM_PRI2);
  52. val &= ~DP_S_SRM_MODE_MASK;
  53. val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
  54. DP_S_SRM_MODE_NOW;
  55. ipu_cm_write(ipu, val, IPU_SRM_PRI2);
  56. }
  57. EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
  58. enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
  59. {
  60. switch (drm_fourcc) {
  61. case DRM_FORMAT_ARGB1555:
  62. case DRM_FORMAT_ABGR1555:
  63. case DRM_FORMAT_RGBA5551:
  64. case DRM_FORMAT_BGRA5551:
  65. case DRM_FORMAT_RGB565:
  66. case DRM_FORMAT_BGR565:
  67. case DRM_FORMAT_RGB888:
  68. case DRM_FORMAT_BGR888:
  69. case DRM_FORMAT_ARGB4444:
  70. case DRM_FORMAT_XRGB8888:
  71. case DRM_FORMAT_XBGR8888:
  72. case DRM_FORMAT_RGBX8888:
  73. case DRM_FORMAT_BGRX8888:
  74. case DRM_FORMAT_ARGB8888:
  75. case DRM_FORMAT_ABGR8888:
  76. case DRM_FORMAT_RGBA8888:
  77. case DRM_FORMAT_BGRA8888:
  78. case DRM_FORMAT_RGB565_A8:
  79. case DRM_FORMAT_BGR565_A8:
  80. case DRM_FORMAT_RGB888_A8:
  81. case DRM_FORMAT_BGR888_A8:
  82. case DRM_FORMAT_RGBX8888_A8:
  83. case DRM_FORMAT_BGRX8888_A8:
  84. return IPUV3_COLORSPACE_RGB;
  85. case DRM_FORMAT_YUYV:
  86. case DRM_FORMAT_UYVY:
  87. case DRM_FORMAT_YUV420:
  88. case DRM_FORMAT_YVU420:
  89. case DRM_FORMAT_YUV422:
  90. case DRM_FORMAT_YVU422:
  91. case DRM_FORMAT_YUV444:
  92. case DRM_FORMAT_YVU444:
  93. case DRM_FORMAT_NV12:
  94. case DRM_FORMAT_NV21:
  95. case DRM_FORMAT_NV16:
  96. case DRM_FORMAT_NV61:
  97. return IPUV3_COLORSPACE_YUV;
  98. default:
  99. return IPUV3_COLORSPACE_UNKNOWN;
  100. }
  101. }
  102. EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
  103. enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
  104. {
  105. switch (pixelformat) {
  106. case V4L2_PIX_FMT_YUV420:
  107. case V4L2_PIX_FMT_YVU420:
  108. case V4L2_PIX_FMT_YUV422P:
  109. case V4L2_PIX_FMT_UYVY:
  110. case V4L2_PIX_FMT_YUYV:
  111. case V4L2_PIX_FMT_NV12:
  112. case V4L2_PIX_FMT_NV21:
  113. case V4L2_PIX_FMT_NV16:
  114. case V4L2_PIX_FMT_NV61:
  115. return IPUV3_COLORSPACE_YUV;
  116. case V4L2_PIX_FMT_RGB32:
  117. case V4L2_PIX_FMT_BGR32:
  118. case V4L2_PIX_FMT_RGB24:
  119. case V4L2_PIX_FMT_BGR24:
  120. case V4L2_PIX_FMT_RGB565:
  121. return IPUV3_COLORSPACE_RGB;
  122. default:
  123. return IPUV3_COLORSPACE_UNKNOWN;
  124. }
  125. }
  126. EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
  127. bool ipu_pixelformat_is_planar(u32 pixelformat)
  128. {
  129. switch (pixelformat) {
  130. case V4L2_PIX_FMT_YUV420:
  131. case V4L2_PIX_FMT_YVU420:
  132. case V4L2_PIX_FMT_YUV422P:
  133. case V4L2_PIX_FMT_NV12:
  134. case V4L2_PIX_FMT_NV21:
  135. case V4L2_PIX_FMT_NV16:
  136. case V4L2_PIX_FMT_NV61:
  137. return true;
  138. }
  139. return false;
  140. }
  141. EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
  142. enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
  143. {
  144. switch (mbus_code & 0xf000) {
  145. case 0x1000:
  146. return IPUV3_COLORSPACE_RGB;
  147. case 0x2000:
  148. return IPUV3_COLORSPACE_YUV;
  149. default:
  150. return IPUV3_COLORSPACE_UNKNOWN;
  151. }
  152. }
  153. EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
  154. int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
  155. {
  156. switch (pixelformat) {
  157. case V4L2_PIX_FMT_YUV420:
  158. case V4L2_PIX_FMT_YVU420:
  159. case V4L2_PIX_FMT_YUV422P:
  160. case V4L2_PIX_FMT_NV12:
  161. case V4L2_PIX_FMT_NV21:
  162. case V4L2_PIX_FMT_NV16:
  163. case V4L2_PIX_FMT_NV61:
  164. /*
  165. * for the planar YUV formats, the stride passed to
  166. * cpmem must be the stride in bytes of the Y plane.
  167. * And all the planar YUV formats have an 8-bit
  168. * Y component.
  169. */
  170. return (8 * pixel_stride) >> 3;
  171. case V4L2_PIX_FMT_RGB565:
  172. case V4L2_PIX_FMT_YUYV:
  173. case V4L2_PIX_FMT_UYVY:
  174. return (16 * pixel_stride) >> 3;
  175. case V4L2_PIX_FMT_BGR24:
  176. case V4L2_PIX_FMT_RGB24:
  177. return (24 * pixel_stride) >> 3;
  178. case V4L2_PIX_FMT_BGR32:
  179. case V4L2_PIX_FMT_RGB32:
  180. return (32 * pixel_stride) >> 3;
  181. default:
  182. break;
  183. }
  184. return -EINVAL;
  185. }
  186. EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
  187. int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
  188. bool hflip, bool vflip)
  189. {
  190. u32 r90, vf, hf;
  191. switch (degrees) {
  192. case 0:
  193. vf = hf = r90 = 0;
  194. break;
  195. case 90:
  196. vf = hf = 0;
  197. r90 = 1;
  198. break;
  199. case 180:
  200. vf = hf = 1;
  201. r90 = 0;
  202. break;
  203. case 270:
  204. vf = hf = r90 = 1;
  205. break;
  206. default:
  207. return -EINVAL;
  208. }
  209. hf ^= (u32)hflip;
  210. vf ^= (u32)vflip;
  211. *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
  212. return 0;
  213. }
  214. EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
  215. int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
  216. bool hflip, bool vflip)
  217. {
  218. u32 r90, vf, hf;
  219. r90 = ((u32)mode >> 2) & 0x1;
  220. hf = ((u32)mode >> 1) & 0x1;
  221. vf = ((u32)mode >> 0) & 0x1;
  222. hf ^= (u32)hflip;
  223. vf ^= (u32)vflip;
  224. switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
  225. case IPU_ROTATE_NONE:
  226. *degrees = 0;
  227. break;
  228. case IPU_ROTATE_90_RIGHT:
  229. *degrees = 90;
  230. break;
  231. case IPU_ROTATE_180:
  232. *degrees = 180;
  233. break;
  234. case IPU_ROTATE_90_LEFT:
  235. *degrees = 270;
  236. break;
  237. default:
  238. return -EINVAL;
  239. }
  240. return 0;
  241. }
  242. EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
  243. struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
  244. {
  245. struct ipuv3_channel *channel;
  246. dev_dbg(ipu->dev, "%s %d\n", __func__, num);
  247. if (num > 63)
  248. return ERR_PTR(-ENODEV);
  249. mutex_lock(&ipu->channel_lock);
  250. list_for_each_entry(channel, &ipu->channels, list) {
  251. if (channel->num == num) {
  252. channel = ERR_PTR(-EBUSY);
  253. goto out;
  254. }
  255. }
  256. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  257. if (!channel) {
  258. channel = ERR_PTR(-ENOMEM);
  259. goto out;
  260. }
  261. channel->num = num;
  262. channel->ipu = ipu;
  263. list_add(&channel->list, &ipu->channels);
  264. out:
  265. mutex_unlock(&ipu->channel_lock);
  266. return channel;
  267. }
  268. EXPORT_SYMBOL_GPL(ipu_idmac_get);
  269. void ipu_idmac_put(struct ipuv3_channel *channel)
  270. {
  271. struct ipu_soc *ipu = channel->ipu;
  272. dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
  273. mutex_lock(&ipu->channel_lock);
  274. list_del(&channel->list);
  275. kfree(channel);
  276. mutex_unlock(&ipu->channel_lock);
  277. }
  278. EXPORT_SYMBOL_GPL(ipu_idmac_put);
  279. #define idma_mask(ch) (1 << ((ch) & 0x1f))
  280. /*
  281. * This is an undocumented feature, a write one to a channel bit in
  282. * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
  283. * internal current buffer pointer so that transfers start from buffer
  284. * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
  285. * only says these are read-only registers). This operation is required
  286. * for channel linking to work correctly, for instance video capture
  287. * pipelines that carry out image rotations will fail after the first
  288. * streaming unless this function is called for each channel before
  289. * re-enabling the channels.
  290. */
  291. static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
  292. {
  293. struct ipu_soc *ipu = channel->ipu;
  294. unsigned int chno = channel->num;
  295. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
  296. }
  297. void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
  298. bool doublebuffer)
  299. {
  300. struct ipu_soc *ipu = channel->ipu;
  301. unsigned long flags;
  302. u32 reg;
  303. spin_lock_irqsave(&ipu->lock, flags);
  304. reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
  305. if (doublebuffer)
  306. reg |= idma_mask(channel->num);
  307. else
  308. reg &= ~idma_mask(channel->num);
  309. ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
  310. __ipu_idmac_reset_current_buffer(channel);
  311. spin_unlock_irqrestore(&ipu->lock, flags);
  312. }
  313. EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
  314. static const struct {
  315. int chnum;
  316. u32 reg;
  317. int shift;
  318. } idmac_lock_en_info[] = {
  319. { .chnum = 5, .reg = IDMAC_CH_LOCK_EN_1, .shift = 0, },
  320. { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift = 2, },
  321. { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift = 4, },
  322. { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift = 6, },
  323. { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift = 8, },
  324. { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
  325. { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
  326. { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
  327. { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
  328. { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
  329. { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
  330. { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift = 0, },
  331. { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift = 2, },
  332. { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift = 4, },
  333. { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift = 6, },
  334. { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift = 8, },
  335. { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
  336. };
  337. int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
  338. {
  339. struct ipu_soc *ipu = channel->ipu;
  340. unsigned long flags;
  341. u32 bursts, regval;
  342. int i;
  343. switch (num_bursts) {
  344. case 0:
  345. case 1:
  346. bursts = 0x00; /* locking disabled */
  347. break;
  348. case 2:
  349. bursts = 0x01;
  350. break;
  351. case 4:
  352. bursts = 0x02;
  353. break;
  354. case 8:
  355. bursts = 0x03;
  356. break;
  357. default:
  358. return -EINVAL;
  359. }
  360. for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
  361. if (channel->num == idmac_lock_en_info[i].chnum)
  362. break;
  363. }
  364. if (i >= ARRAY_SIZE(idmac_lock_en_info))
  365. return -EINVAL;
  366. spin_lock_irqsave(&ipu->lock, flags);
  367. regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
  368. regval &= ~(0x03 << idmac_lock_en_info[i].shift);
  369. regval |= (bursts << idmac_lock_en_info[i].shift);
  370. ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
  371. spin_unlock_irqrestore(&ipu->lock, flags);
  372. return 0;
  373. }
  374. EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
  375. int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
  376. {
  377. unsigned long lock_flags;
  378. u32 val;
  379. spin_lock_irqsave(&ipu->lock, lock_flags);
  380. val = ipu_cm_read(ipu, IPU_DISP_GEN);
  381. if (mask & IPU_CONF_DI0_EN)
  382. val |= IPU_DI0_COUNTER_RELEASE;
  383. if (mask & IPU_CONF_DI1_EN)
  384. val |= IPU_DI1_COUNTER_RELEASE;
  385. ipu_cm_write(ipu, val, IPU_DISP_GEN);
  386. val = ipu_cm_read(ipu, IPU_CONF);
  387. val |= mask;
  388. ipu_cm_write(ipu, val, IPU_CONF);
  389. spin_unlock_irqrestore(&ipu->lock, lock_flags);
  390. return 0;
  391. }
  392. EXPORT_SYMBOL_GPL(ipu_module_enable);
  393. int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
  394. {
  395. unsigned long lock_flags;
  396. u32 val;
  397. spin_lock_irqsave(&ipu->lock, lock_flags);
  398. val = ipu_cm_read(ipu, IPU_CONF);
  399. val &= ~mask;
  400. ipu_cm_write(ipu, val, IPU_CONF);
  401. val = ipu_cm_read(ipu, IPU_DISP_GEN);
  402. if (mask & IPU_CONF_DI0_EN)
  403. val &= ~IPU_DI0_COUNTER_RELEASE;
  404. if (mask & IPU_CONF_DI1_EN)
  405. val &= ~IPU_DI1_COUNTER_RELEASE;
  406. ipu_cm_write(ipu, val, IPU_DISP_GEN);
  407. spin_unlock_irqrestore(&ipu->lock, lock_flags);
  408. return 0;
  409. }
  410. EXPORT_SYMBOL_GPL(ipu_module_disable);
  411. int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
  412. {
  413. struct ipu_soc *ipu = channel->ipu;
  414. unsigned int chno = channel->num;
  415. return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
  416. }
  417. EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
  418. bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
  419. {
  420. struct ipu_soc *ipu = channel->ipu;
  421. unsigned long flags;
  422. u32 reg = 0;
  423. spin_lock_irqsave(&ipu->lock, flags);
  424. switch (buf_num) {
  425. case 0:
  426. reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
  427. break;
  428. case 1:
  429. reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
  430. break;
  431. case 2:
  432. reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
  433. break;
  434. }
  435. spin_unlock_irqrestore(&ipu->lock, flags);
  436. return ((reg & idma_mask(channel->num)) != 0);
  437. }
  438. EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
  439. void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
  440. {
  441. struct ipu_soc *ipu = channel->ipu;
  442. unsigned int chno = channel->num;
  443. unsigned long flags;
  444. spin_lock_irqsave(&ipu->lock, flags);
  445. /* Mark buffer as ready. */
  446. if (buf_num == 0)
  447. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
  448. else
  449. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
  450. spin_unlock_irqrestore(&ipu->lock, flags);
  451. }
  452. EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
  453. void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
  454. {
  455. struct ipu_soc *ipu = channel->ipu;
  456. unsigned int chno = channel->num;
  457. unsigned long flags;
  458. spin_lock_irqsave(&ipu->lock, flags);
  459. ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
  460. switch (buf_num) {
  461. case 0:
  462. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
  463. break;
  464. case 1:
  465. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
  466. break;
  467. case 2:
  468. ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
  469. break;
  470. default:
  471. break;
  472. }
  473. ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
  474. spin_unlock_irqrestore(&ipu->lock, flags);
  475. }
  476. EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
  477. int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
  478. {
  479. struct ipu_soc *ipu = channel->ipu;
  480. u32 val;
  481. unsigned long flags;
  482. spin_lock_irqsave(&ipu->lock, flags);
  483. val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
  484. val |= idma_mask(channel->num);
  485. ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
  486. spin_unlock_irqrestore(&ipu->lock, flags);
  487. return 0;
  488. }
  489. EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
  490. bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
  491. {
  492. return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
  493. }
  494. EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
  495. int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
  496. {
  497. struct ipu_soc *ipu = channel->ipu;
  498. unsigned long timeout;
  499. timeout = jiffies + msecs_to_jiffies(ms);
  500. while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
  501. idma_mask(channel->num)) {
  502. if (time_after(jiffies, timeout))
  503. return -ETIMEDOUT;
  504. cpu_relax();
  505. }
  506. return 0;
  507. }
  508. EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
  509. int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
  510. {
  511. struct ipu_soc *ipu = channel->ipu;
  512. u32 val;
  513. unsigned long flags;
  514. spin_lock_irqsave(&ipu->lock, flags);
  515. /* Disable DMA channel(s) */
  516. val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
  517. val &= ~idma_mask(channel->num);
  518. ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
  519. __ipu_idmac_reset_current_buffer(channel);
  520. /* Set channel buffers NOT to be ready */
  521. ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
  522. if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
  523. idma_mask(channel->num)) {
  524. ipu_cm_write(ipu, idma_mask(channel->num),
  525. IPU_CHA_BUF0_RDY(channel->num));
  526. }
  527. if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
  528. idma_mask(channel->num)) {
  529. ipu_cm_write(ipu, idma_mask(channel->num),
  530. IPU_CHA_BUF1_RDY(channel->num));
  531. }
  532. ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
  533. /* Reset the double buffer */
  534. val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
  535. val &= ~idma_mask(channel->num);
  536. ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
  537. spin_unlock_irqrestore(&ipu->lock, flags);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
  541. /*
  542. * The imx6 rev. D TRM says that enabling the WM feature will increase
  543. * a channel's priority. Refer to Table 36-8 Calculated priority value.
  544. * The sub-module that is the sink or source for the channel must enable
  545. * watermark signal for this to take effect (SMFC_WM for instance).
  546. */
  547. void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
  548. {
  549. struct ipu_soc *ipu = channel->ipu;
  550. unsigned long flags;
  551. u32 val;
  552. spin_lock_irqsave(&ipu->lock, flags);
  553. val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
  554. if (enable)
  555. val |= 1 << (channel->num % 32);
  556. else
  557. val &= ~(1 << (channel->num % 32));
  558. ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
  559. spin_unlock_irqrestore(&ipu->lock, flags);
  560. }
  561. EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
  562. static int ipu_memory_reset(struct ipu_soc *ipu)
  563. {
  564. unsigned long timeout;
  565. ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
  566. timeout = jiffies + msecs_to_jiffies(1000);
  567. while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
  568. if (time_after(jiffies, timeout))
  569. return -ETIME;
  570. cpu_relax();
  571. }
  572. return 0;
  573. }
  574. /*
  575. * Set the source mux for the given CSI. Selects either parallel or
  576. * MIPI CSI2 sources.
  577. */
  578. void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
  579. {
  580. unsigned long flags;
  581. u32 val, mask;
  582. mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
  583. IPU_CONF_CSI0_DATA_SOURCE;
  584. spin_lock_irqsave(&ipu->lock, flags);
  585. val = ipu_cm_read(ipu, IPU_CONF);
  586. if (mipi_csi2)
  587. val |= mask;
  588. else
  589. val &= ~mask;
  590. ipu_cm_write(ipu, val, IPU_CONF);
  591. spin_unlock_irqrestore(&ipu->lock, flags);
  592. }
  593. EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
  594. /*
  595. * Set the source mux for the IC. Selects either CSI[01] or the VDI.
  596. */
  597. void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
  598. {
  599. unsigned long flags;
  600. u32 val;
  601. spin_lock_irqsave(&ipu->lock, flags);
  602. val = ipu_cm_read(ipu, IPU_CONF);
  603. if (vdi)
  604. val |= IPU_CONF_IC_INPUT;
  605. else
  606. val &= ~IPU_CONF_IC_INPUT;
  607. if (csi_id == 1)
  608. val |= IPU_CONF_CSI_SEL;
  609. else
  610. val &= ~IPU_CONF_CSI_SEL;
  611. ipu_cm_write(ipu, val, IPU_CONF);
  612. spin_unlock_irqrestore(&ipu->lock, flags);
  613. }
  614. EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
  615. /* Frame Synchronization Unit Channel Linking */
  616. struct fsu_link_reg_info {
  617. int chno;
  618. u32 reg;
  619. u32 mask;
  620. u32 val;
  621. };
  622. struct fsu_link_info {
  623. struct fsu_link_reg_info src;
  624. struct fsu_link_reg_info sink;
  625. };
  626. static const struct fsu_link_info fsu_link_info[] = {
  627. {
  628. .src = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
  629. FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
  630. .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
  631. FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
  632. }, {
  633. .src = { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
  634. FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
  635. .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
  636. FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
  637. }, {
  638. .src = { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
  639. FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
  640. .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
  641. FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
  642. }, {
  643. .src = { IPUV3_CHANNEL_CSI_DIRECT, 0 },
  644. .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
  645. FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
  646. },
  647. };
  648. static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
  649. {
  650. int i;
  651. for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
  652. if (src == fsu_link_info[i].src.chno &&
  653. sink == fsu_link_info[i].sink.chno)
  654. return &fsu_link_info[i];
  655. }
  656. return NULL;
  657. }
  658. /*
  659. * Links a source channel to a sink channel in the FSU.
  660. */
  661. int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
  662. {
  663. const struct fsu_link_info *link;
  664. u32 src_reg, sink_reg;
  665. unsigned long flags;
  666. link = find_fsu_link_info(src_ch, sink_ch);
  667. if (!link)
  668. return -EINVAL;
  669. spin_lock_irqsave(&ipu->lock, flags);
  670. if (link->src.mask) {
  671. src_reg = ipu_cm_read(ipu, link->src.reg);
  672. src_reg &= ~link->src.mask;
  673. src_reg |= link->src.val;
  674. ipu_cm_write(ipu, src_reg, link->src.reg);
  675. }
  676. if (link->sink.mask) {
  677. sink_reg = ipu_cm_read(ipu, link->sink.reg);
  678. sink_reg &= ~link->sink.mask;
  679. sink_reg |= link->sink.val;
  680. ipu_cm_write(ipu, sink_reg, link->sink.reg);
  681. }
  682. spin_unlock_irqrestore(&ipu->lock, flags);
  683. return 0;
  684. }
  685. EXPORT_SYMBOL_GPL(ipu_fsu_link);
  686. /*
  687. * Unlinks source and sink channels in the FSU.
  688. */
  689. int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
  690. {
  691. const struct fsu_link_info *link;
  692. u32 src_reg, sink_reg;
  693. unsigned long flags;
  694. link = find_fsu_link_info(src_ch, sink_ch);
  695. if (!link)
  696. return -EINVAL;
  697. spin_lock_irqsave(&ipu->lock, flags);
  698. if (link->src.mask) {
  699. src_reg = ipu_cm_read(ipu, link->src.reg);
  700. src_reg &= ~link->src.mask;
  701. ipu_cm_write(ipu, src_reg, link->src.reg);
  702. }
  703. if (link->sink.mask) {
  704. sink_reg = ipu_cm_read(ipu, link->sink.reg);
  705. sink_reg &= ~link->sink.mask;
  706. ipu_cm_write(ipu, sink_reg, link->sink.reg);
  707. }
  708. spin_unlock_irqrestore(&ipu->lock, flags);
  709. return 0;
  710. }
  711. EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
  712. /* Link IDMAC channels in the FSU */
  713. int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
  714. {
  715. return ipu_fsu_link(src->ipu, src->num, sink->num);
  716. }
  717. EXPORT_SYMBOL_GPL(ipu_idmac_link);
  718. /* Unlink IDMAC channels in the FSU */
  719. int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
  720. {
  721. return ipu_fsu_unlink(src->ipu, src->num, sink->num);
  722. }
  723. EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
  724. struct ipu_devtype {
  725. const char *name;
  726. unsigned long cm_ofs;
  727. unsigned long cpmem_ofs;
  728. unsigned long srm_ofs;
  729. unsigned long tpm_ofs;
  730. unsigned long csi0_ofs;
  731. unsigned long csi1_ofs;
  732. unsigned long ic_ofs;
  733. unsigned long disp0_ofs;
  734. unsigned long disp1_ofs;
  735. unsigned long dc_tmpl_ofs;
  736. unsigned long vdi_ofs;
  737. enum ipuv3_type type;
  738. };
  739. static struct ipu_devtype ipu_type_imx51 = {
  740. .name = "IPUv3EX",
  741. .cm_ofs = 0x1e000000,
  742. .cpmem_ofs = 0x1f000000,
  743. .srm_ofs = 0x1f040000,
  744. .tpm_ofs = 0x1f060000,
  745. .csi0_ofs = 0x1f030000,
  746. .csi1_ofs = 0x1f038000,
  747. .ic_ofs = 0x1e020000,
  748. .disp0_ofs = 0x1e040000,
  749. .disp1_ofs = 0x1e048000,
  750. .dc_tmpl_ofs = 0x1f080000,
  751. .vdi_ofs = 0x1e068000,
  752. .type = IPUV3EX,
  753. };
  754. static struct ipu_devtype ipu_type_imx53 = {
  755. .name = "IPUv3M",
  756. .cm_ofs = 0x06000000,
  757. .cpmem_ofs = 0x07000000,
  758. .srm_ofs = 0x07040000,
  759. .tpm_ofs = 0x07060000,
  760. .csi0_ofs = 0x07030000,
  761. .csi1_ofs = 0x07038000,
  762. .ic_ofs = 0x06020000,
  763. .disp0_ofs = 0x06040000,
  764. .disp1_ofs = 0x06048000,
  765. .dc_tmpl_ofs = 0x07080000,
  766. .vdi_ofs = 0x06068000,
  767. .type = IPUV3M,
  768. };
  769. static struct ipu_devtype ipu_type_imx6q = {
  770. .name = "IPUv3H",
  771. .cm_ofs = 0x00200000,
  772. .cpmem_ofs = 0x00300000,
  773. .srm_ofs = 0x00340000,
  774. .tpm_ofs = 0x00360000,
  775. .csi0_ofs = 0x00230000,
  776. .csi1_ofs = 0x00238000,
  777. .ic_ofs = 0x00220000,
  778. .disp0_ofs = 0x00240000,
  779. .disp1_ofs = 0x00248000,
  780. .dc_tmpl_ofs = 0x00380000,
  781. .vdi_ofs = 0x00268000,
  782. .type = IPUV3H,
  783. };
  784. static const struct of_device_id imx_ipu_dt_ids[] = {
  785. { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
  786. { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
  787. { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
  788. { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
  789. { /* sentinel */ }
  790. };
  791. MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
  792. static int ipu_submodules_init(struct ipu_soc *ipu,
  793. struct platform_device *pdev, unsigned long ipu_base,
  794. struct clk *ipu_clk)
  795. {
  796. char *unit;
  797. int ret;
  798. struct device *dev = &pdev->dev;
  799. const struct ipu_devtype *devtype = ipu->devtype;
  800. ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
  801. if (ret) {
  802. unit = "cpmem";
  803. goto err_cpmem;
  804. }
  805. ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
  806. IPU_CONF_CSI0_EN, ipu_clk);
  807. if (ret) {
  808. unit = "csi0";
  809. goto err_csi_0;
  810. }
  811. ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
  812. IPU_CONF_CSI1_EN, ipu_clk);
  813. if (ret) {
  814. unit = "csi1";
  815. goto err_csi_1;
  816. }
  817. ret = ipu_ic_init(ipu, dev,
  818. ipu_base + devtype->ic_ofs,
  819. ipu_base + devtype->tpm_ofs);
  820. if (ret) {
  821. unit = "ic";
  822. goto err_ic;
  823. }
  824. ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
  825. IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
  826. IPU_CONF_IC_INPUT);
  827. if (ret) {
  828. unit = "vdi";
  829. goto err_vdi;
  830. }
  831. ret = ipu_image_convert_init(ipu, dev);
  832. if (ret) {
  833. unit = "image_convert";
  834. goto err_image_convert;
  835. }
  836. ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
  837. IPU_CONF_DI0_EN, ipu_clk);
  838. if (ret) {
  839. unit = "di0";
  840. goto err_di_0;
  841. }
  842. ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
  843. IPU_CONF_DI1_EN, ipu_clk);
  844. if (ret) {
  845. unit = "di1";
  846. goto err_di_1;
  847. }
  848. ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
  849. IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
  850. if (ret) {
  851. unit = "dc_template";
  852. goto err_dc;
  853. }
  854. ret = ipu_dmfc_init(ipu, dev, ipu_base +
  855. devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
  856. if (ret) {
  857. unit = "dmfc";
  858. goto err_dmfc;
  859. }
  860. ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
  861. if (ret) {
  862. unit = "dp";
  863. goto err_dp;
  864. }
  865. ret = ipu_smfc_init(ipu, dev, ipu_base +
  866. devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
  867. if (ret) {
  868. unit = "smfc";
  869. goto err_smfc;
  870. }
  871. return 0;
  872. err_smfc:
  873. ipu_dp_exit(ipu);
  874. err_dp:
  875. ipu_dmfc_exit(ipu);
  876. err_dmfc:
  877. ipu_dc_exit(ipu);
  878. err_dc:
  879. ipu_di_exit(ipu, 1);
  880. err_di_1:
  881. ipu_di_exit(ipu, 0);
  882. err_di_0:
  883. ipu_image_convert_exit(ipu);
  884. err_image_convert:
  885. ipu_vdi_exit(ipu);
  886. err_vdi:
  887. ipu_ic_exit(ipu);
  888. err_ic:
  889. ipu_csi_exit(ipu, 1);
  890. err_csi_1:
  891. ipu_csi_exit(ipu, 0);
  892. err_csi_0:
  893. ipu_cpmem_exit(ipu);
  894. err_cpmem:
  895. dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
  896. return ret;
  897. }
  898. static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
  899. {
  900. unsigned long status;
  901. int i, bit, irq;
  902. for (i = 0; i < num_regs; i++) {
  903. status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
  904. status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
  905. for_each_set_bit(bit, &status, 32) {
  906. irq = irq_linear_revmap(ipu->domain,
  907. regs[i] * 32 + bit);
  908. if (irq)
  909. generic_handle_irq(irq);
  910. }
  911. }
  912. }
  913. static void ipu_irq_handler(struct irq_desc *desc)
  914. {
  915. struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
  916. struct irq_chip *chip = irq_desc_get_chip(desc);
  917. const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
  918. chained_irq_enter(chip, desc);
  919. ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
  920. chained_irq_exit(chip, desc);
  921. }
  922. static void ipu_err_irq_handler(struct irq_desc *desc)
  923. {
  924. struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
  925. struct irq_chip *chip = irq_desc_get_chip(desc);
  926. const int int_reg[] = { 4, 5, 8, 9};
  927. chained_irq_enter(chip, desc);
  928. ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
  929. chained_irq_exit(chip, desc);
  930. }
  931. int ipu_map_irq(struct ipu_soc *ipu, int irq)
  932. {
  933. int virq;
  934. virq = irq_linear_revmap(ipu->domain, irq);
  935. if (!virq)
  936. virq = irq_create_mapping(ipu->domain, irq);
  937. return virq;
  938. }
  939. EXPORT_SYMBOL_GPL(ipu_map_irq);
  940. int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
  941. enum ipu_channel_irq irq_type)
  942. {
  943. return ipu_map_irq(ipu, irq_type + channel->num);
  944. }
  945. EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
  946. static void ipu_submodules_exit(struct ipu_soc *ipu)
  947. {
  948. ipu_smfc_exit(ipu);
  949. ipu_dp_exit(ipu);
  950. ipu_dmfc_exit(ipu);
  951. ipu_dc_exit(ipu);
  952. ipu_di_exit(ipu, 1);
  953. ipu_di_exit(ipu, 0);
  954. ipu_image_convert_exit(ipu);
  955. ipu_vdi_exit(ipu);
  956. ipu_ic_exit(ipu);
  957. ipu_csi_exit(ipu, 1);
  958. ipu_csi_exit(ipu, 0);
  959. ipu_cpmem_exit(ipu);
  960. }
  961. static int platform_remove_devices_fn(struct device *dev, void *unused)
  962. {
  963. struct platform_device *pdev = to_platform_device(dev);
  964. platform_device_unregister(pdev);
  965. return 0;
  966. }
  967. static void platform_device_unregister_children(struct platform_device *pdev)
  968. {
  969. device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
  970. }
  971. struct ipu_platform_reg {
  972. struct ipu_client_platformdata pdata;
  973. const char *name;
  974. };
  975. /* These must be in the order of the corresponding device tree port nodes */
  976. static struct ipu_platform_reg client_reg[] = {
  977. {
  978. .pdata = {
  979. .csi = 0,
  980. .dma[0] = IPUV3_CHANNEL_CSI0,
  981. .dma[1] = -EINVAL,
  982. },
  983. .name = "imx-ipuv3-csi",
  984. }, {
  985. .pdata = {
  986. .csi = 1,
  987. .dma[0] = IPUV3_CHANNEL_CSI1,
  988. .dma[1] = -EINVAL,
  989. },
  990. .name = "imx-ipuv3-csi",
  991. }, {
  992. .pdata = {
  993. .di = 0,
  994. .dc = 5,
  995. .dp = IPU_DP_FLOW_SYNC_BG,
  996. .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
  997. .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
  998. },
  999. .name = "imx-ipuv3-crtc",
  1000. }, {
  1001. .pdata = {
  1002. .di = 1,
  1003. .dc = 1,
  1004. .dp = -EINVAL,
  1005. .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
  1006. .dma[1] = -EINVAL,
  1007. },
  1008. .name = "imx-ipuv3-crtc",
  1009. },
  1010. };
  1011. static DEFINE_MUTEX(ipu_client_id_mutex);
  1012. static int ipu_client_id;
  1013. static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
  1014. {
  1015. struct device *dev = ipu->dev;
  1016. unsigned i;
  1017. int id, ret;
  1018. mutex_lock(&ipu_client_id_mutex);
  1019. id = ipu_client_id;
  1020. ipu_client_id += ARRAY_SIZE(client_reg);
  1021. mutex_unlock(&ipu_client_id_mutex);
  1022. for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
  1023. struct ipu_platform_reg *reg = &client_reg[i];
  1024. struct platform_device *pdev;
  1025. struct device_node *of_node;
  1026. /* Associate subdevice with the corresponding port node */
  1027. of_node = of_graph_get_port_by_id(dev->of_node, i);
  1028. if (!of_node) {
  1029. dev_info(dev,
  1030. "no port@%d node in %pOF, not using %s%d\n",
  1031. i, dev->of_node,
  1032. (i / 2) ? "DI" : "CSI", i % 2);
  1033. continue;
  1034. }
  1035. pdev = platform_device_alloc(reg->name, id++);
  1036. if (!pdev) {
  1037. ret = -ENOMEM;
  1038. goto err_register;
  1039. }
  1040. pdev->dev.parent = dev;
  1041. reg->pdata.of_node = of_node;
  1042. ret = platform_device_add_data(pdev, &reg->pdata,
  1043. sizeof(reg->pdata));
  1044. if (!ret)
  1045. ret = platform_device_add(pdev);
  1046. if (ret) {
  1047. platform_device_put(pdev);
  1048. goto err_register;
  1049. }
  1050. }
  1051. return 0;
  1052. err_register:
  1053. platform_device_unregister_children(to_platform_device(dev));
  1054. return ret;
  1055. }
  1056. static int ipu_irq_init(struct ipu_soc *ipu)
  1057. {
  1058. struct irq_chip_generic *gc;
  1059. struct irq_chip_type *ct;
  1060. unsigned long unused[IPU_NUM_IRQS / 32] = {
  1061. 0x400100d0, 0xffe000fd,
  1062. 0x400100d0, 0xffe000fd,
  1063. 0x400100d0, 0xffe000fd,
  1064. 0x4077ffff, 0xffe7e1fd,
  1065. 0x23fffffe, 0x8880fff0,
  1066. 0xf98fe7d0, 0xfff81fff,
  1067. 0x400100d0, 0xffe000fd,
  1068. 0x00000000,
  1069. };
  1070. int ret, i;
  1071. ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
  1072. &irq_generic_chip_ops, ipu);
  1073. if (!ipu->domain) {
  1074. dev_err(ipu->dev, "failed to add irq domain\n");
  1075. return -ENODEV;
  1076. }
  1077. ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
  1078. handle_level_irq, 0, 0, 0);
  1079. if (ret < 0) {
  1080. dev_err(ipu->dev, "failed to alloc generic irq chips\n");
  1081. irq_domain_remove(ipu->domain);
  1082. return ret;
  1083. }
  1084. /* Mask and clear all interrupts */
  1085. for (i = 0; i < IPU_NUM_IRQS; i += 32) {
  1086. ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
  1087. ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
  1088. }
  1089. for (i = 0; i < IPU_NUM_IRQS; i += 32) {
  1090. gc = irq_get_domain_generic_chip(ipu->domain, i);
  1091. gc->reg_base = ipu->cm_reg;
  1092. gc->unused = unused[i / 32];
  1093. ct = gc->chip_types;
  1094. ct->chip.irq_ack = irq_gc_ack_set_bit;
  1095. ct->chip.irq_mask = irq_gc_mask_clr_bit;
  1096. ct->chip.irq_unmask = irq_gc_mask_set_bit;
  1097. ct->regs.ack = IPU_INT_STAT(i / 32);
  1098. ct->regs.mask = IPU_INT_CTRL(i / 32);
  1099. }
  1100. irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
  1101. irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
  1102. ipu);
  1103. return 0;
  1104. }
  1105. static void ipu_irq_exit(struct ipu_soc *ipu)
  1106. {
  1107. int i, irq;
  1108. irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
  1109. irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
  1110. /* TODO: remove irq_domain_generic_chips */
  1111. for (i = 0; i < IPU_NUM_IRQS; i++) {
  1112. irq = irq_linear_revmap(ipu->domain, i);
  1113. if (irq)
  1114. irq_dispose_mapping(irq);
  1115. }
  1116. irq_domain_remove(ipu->domain);
  1117. }
  1118. void ipu_dump(struct ipu_soc *ipu)
  1119. {
  1120. int i;
  1121. dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
  1122. ipu_cm_read(ipu, IPU_CONF));
  1123. dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
  1124. ipu_idmac_read(ipu, IDMAC_CONF));
  1125. dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
  1126. ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
  1127. dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
  1128. ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
  1129. dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
  1130. ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
  1131. dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
  1132. ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
  1133. dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
  1134. ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
  1135. dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
  1136. ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
  1137. dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
  1138. ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
  1139. dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
  1140. ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
  1141. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
  1142. ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
  1143. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
  1144. ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
  1145. dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
  1146. ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
  1147. dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
  1148. ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
  1149. for (i = 0; i < 15; i++)
  1150. dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
  1151. ipu_cm_read(ipu, IPU_INT_CTRL(i)));
  1152. }
  1153. EXPORT_SYMBOL_GPL(ipu_dump);
  1154. static int ipu_probe(struct platform_device *pdev)
  1155. {
  1156. struct device_node *np = pdev->dev.of_node;
  1157. struct ipu_soc *ipu;
  1158. struct resource *res;
  1159. unsigned long ipu_base;
  1160. int ret, irq_sync, irq_err;
  1161. const struct ipu_devtype *devtype;
  1162. devtype = of_device_get_match_data(&pdev->dev);
  1163. if (!devtype)
  1164. return -EINVAL;
  1165. irq_sync = platform_get_irq(pdev, 0);
  1166. irq_err = platform_get_irq(pdev, 1);
  1167. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1168. dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
  1169. irq_sync, irq_err);
  1170. if (!res || irq_sync < 0 || irq_err < 0)
  1171. return -ENODEV;
  1172. ipu_base = res->start;
  1173. ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
  1174. if (!ipu)
  1175. return -ENODEV;
  1176. ipu->id = of_alias_get_id(np, "ipu");
  1177. if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
  1178. IS_ENABLED(CONFIG_DRM)) {
  1179. ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
  1180. "fsl,prg", ipu->id);
  1181. if (!ipu->prg_priv)
  1182. return -EPROBE_DEFER;
  1183. }
  1184. ipu->devtype = devtype;
  1185. ipu->ipu_type = devtype->type;
  1186. spin_lock_init(&ipu->lock);
  1187. mutex_init(&ipu->channel_lock);
  1188. INIT_LIST_HEAD(&ipu->channels);
  1189. dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
  1190. ipu_base + devtype->cm_ofs);
  1191. dev_dbg(&pdev->dev, "idmac: 0x%08lx\n",
  1192. ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
  1193. dev_dbg(&pdev->dev, "cpmem: 0x%08lx\n",
  1194. ipu_base + devtype->cpmem_ofs);
  1195. dev_dbg(&pdev->dev, "csi0: 0x%08lx\n",
  1196. ipu_base + devtype->csi0_ofs);
  1197. dev_dbg(&pdev->dev, "csi1: 0x%08lx\n",
  1198. ipu_base + devtype->csi1_ofs);
  1199. dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
  1200. ipu_base + devtype->ic_ofs);
  1201. dev_dbg(&pdev->dev, "disp0: 0x%08lx\n",
  1202. ipu_base + devtype->disp0_ofs);
  1203. dev_dbg(&pdev->dev, "disp1: 0x%08lx\n",
  1204. ipu_base + devtype->disp1_ofs);
  1205. dev_dbg(&pdev->dev, "srm: 0x%08lx\n",
  1206. ipu_base + devtype->srm_ofs);
  1207. dev_dbg(&pdev->dev, "tpm: 0x%08lx\n",
  1208. ipu_base + devtype->tpm_ofs);
  1209. dev_dbg(&pdev->dev, "dc: 0x%08lx\n",
  1210. ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
  1211. dev_dbg(&pdev->dev, "ic: 0x%08lx\n",
  1212. ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
  1213. dev_dbg(&pdev->dev, "dmfc: 0x%08lx\n",
  1214. ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
  1215. dev_dbg(&pdev->dev, "vdi: 0x%08lx\n",
  1216. ipu_base + devtype->vdi_ofs);
  1217. ipu->cm_reg = devm_ioremap(&pdev->dev,
  1218. ipu_base + devtype->cm_ofs, PAGE_SIZE);
  1219. ipu->idmac_reg = devm_ioremap(&pdev->dev,
  1220. ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
  1221. PAGE_SIZE);
  1222. if (!ipu->cm_reg || !ipu->idmac_reg)
  1223. return -ENOMEM;
  1224. ipu->clk = devm_clk_get(&pdev->dev, "bus");
  1225. if (IS_ERR(ipu->clk)) {
  1226. ret = PTR_ERR(ipu->clk);
  1227. dev_err(&pdev->dev, "clk_get failed with %d", ret);
  1228. return ret;
  1229. }
  1230. platform_set_drvdata(pdev, ipu);
  1231. ret = clk_prepare_enable(ipu->clk);
  1232. if (ret) {
  1233. dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
  1234. return ret;
  1235. }
  1236. ipu->dev = &pdev->dev;
  1237. ipu->irq_sync = irq_sync;
  1238. ipu->irq_err = irq_err;
  1239. ret = device_reset(&pdev->dev);
  1240. if (ret) {
  1241. dev_err(&pdev->dev, "failed to reset: %d\n", ret);
  1242. goto out_failed_reset;
  1243. }
  1244. ret = ipu_memory_reset(ipu);
  1245. if (ret)
  1246. goto out_failed_reset;
  1247. ret = ipu_irq_init(ipu);
  1248. if (ret)
  1249. goto out_failed_irq;
  1250. /* Set MCU_T to divide MCU access window into 2 */
  1251. ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
  1252. IPU_DISP_GEN);
  1253. ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
  1254. if (ret)
  1255. goto failed_submodules_init;
  1256. ret = ipu_add_client_devices(ipu, ipu_base);
  1257. if (ret) {
  1258. dev_err(&pdev->dev, "adding client devices failed with %d\n",
  1259. ret);
  1260. goto failed_add_clients;
  1261. }
  1262. dev_info(&pdev->dev, "%s probed\n", devtype->name);
  1263. return 0;
  1264. failed_add_clients:
  1265. ipu_submodules_exit(ipu);
  1266. failed_submodules_init:
  1267. ipu_irq_exit(ipu);
  1268. out_failed_irq:
  1269. out_failed_reset:
  1270. clk_disable_unprepare(ipu->clk);
  1271. return ret;
  1272. }
  1273. static int ipu_remove(struct platform_device *pdev)
  1274. {
  1275. struct ipu_soc *ipu = platform_get_drvdata(pdev);
  1276. platform_device_unregister_children(pdev);
  1277. ipu_submodules_exit(ipu);
  1278. ipu_irq_exit(ipu);
  1279. clk_disable_unprepare(ipu->clk);
  1280. return 0;
  1281. }
  1282. static struct platform_driver imx_ipu_driver = {
  1283. .driver = {
  1284. .name = "imx-ipuv3",
  1285. .of_match_table = imx_ipu_dt_ids,
  1286. },
  1287. .probe = ipu_probe,
  1288. .remove = ipu_remove,
  1289. };
  1290. static struct platform_driver * const drivers[] = {
  1291. #if IS_ENABLED(CONFIG_DRM)
  1292. &ipu_pre_drv,
  1293. &ipu_prg_drv,
  1294. #endif
  1295. &imx_ipu_driver,
  1296. };
  1297. static int __init imx_ipu_init(void)
  1298. {
  1299. return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
  1300. }
  1301. module_init(imx_ipu_init);
  1302. static void __exit imx_ipu_exit(void)
  1303. {
  1304. platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
  1305. }
  1306. module_exit(imx_ipu_exit);
  1307. MODULE_ALIAS("platform:imx-ipuv3");
  1308. MODULE_DESCRIPTION("i.MX IPU v3 driver");
  1309. MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
  1310. MODULE_LICENSE("GPL");