dpu_encoder.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341
  1. /*
  2. * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <robdclark@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  19. #include <linux/kthread.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/seq_file.h>
  22. #include "msm_drv.h"
  23. #include "dpu_kms.h"
  24. #include <drm/drm_crtc.h>
  25. #include <drm/drm_crtc_helper.h>
  26. #include "dpu_hwio.h"
  27. #include "dpu_hw_catalog.h"
  28. #include "dpu_hw_intf.h"
  29. #include "dpu_hw_ctl.h"
  30. #include "dpu_formats.h"
  31. #include "dpu_encoder_phys.h"
  32. #include "dpu_crtc.h"
  33. #include "dpu_trace.h"
  34. #include "dpu_core_irq.h"
  35. #define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
  36. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  37. #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
  38. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  39. #define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
  40. (p) ? (p)->parent->base.id : -1, \
  41. (p) ? (p)->intf_idx - INTF_0 : -1, \
  42. (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  43. ##__VA_ARGS__)
  44. #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
  45. (p) ? (p)->parent->base.id : -1, \
  46. (p) ? (p)->intf_idx - INTF_0 : -1, \
  47. (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  48. ##__VA_ARGS__)
  49. /*
  50. * Two to anticipate panels that can do cmd/vid dynamic switching
  51. * plan is to create all possible physical encoder types, and switch between
  52. * them at runtime
  53. */
  54. #define NUM_PHYS_ENCODER_TYPES 2
  55. #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
  56. (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
  57. #define MAX_CHANNELS_PER_ENC 2
  58. #define IDLE_SHORT_TIMEOUT 1
  59. #define MAX_VDISPLAY_SPLIT 1080
  60. /**
  61. * enum dpu_enc_rc_events - events for resource control state machine
  62. * @DPU_ENC_RC_EVENT_KICKOFF:
  63. * This event happens at NORMAL priority.
  64. * Event that signals the start of the transfer. When this event is
  65. * received, enable MDP/DSI core clocks. Regardless of the previous
  66. * state, the resource should be in ON state at the end of this event.
  67. * @DPU_ENC_RC_EVENT_FRAME_DONE:
  68. * This event happens at INTERRUPT level.
  69. * Event signals the end of the data transfer after the PP FRAME_DONE
  70. * event. At the end of this event, a delayed work is scheduled to go to
  71. * IDLE_PC state after IDLE_TIMEOUT time.
  72. * @DPU_ENC_RC_EVENT_PRE_STOP:
  73. * This event happens at NORMAL priority.
  74. * This event, when received during the ON state, leave the RC STATE
  75. * in the PRE_OFF state. It should be followed by the STOP event as
  76. * part of encoder disable.
  77. * If received during IDLE or OFF states, it will do nothing.
  78. * @DPU_ENC_RC_EVENT_STOP:
  79. * This event happens at NORMAL priority.
  80. * When this event is received, disable all the MDP/DSI core clocks, and
  81. * disable IRQs. It should be called from the PRE_OFF or IDLE states.
  82. * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
  83. * PRE_OFF is expected when PRE_STOP was executed during the ON state.
  84. * Resource state should be in OFF at the end of the event.
  85. * @DPU_ENC_RC_EVENT_ENTER_IDLE:
  86. * This event happens at NORMAL priority from a work item.
  87. * Event signals that there were no frame updates for IDLE_TIMEOUT time.
  88. * This would disable MDP/DSI core clocks and change the resource state
  89. * to IDLE.
  90. */
  91. enum dpu_enc_rc_events {
  92. DPU_ENC_RC_EVENT_KICKOFF = 1,
  93. DPU_ENC_RC_EVENT_FRAME_DONE,
  94. DPU_ENC_RC_EVENT_PRE_STOP,
  95. DPU_ENC_RC_EVENT_STOP,
  96. DPU_ENC_RC_EVENT_ENTER_IDLE
  97. };
  98. /*
  99. * enum dpu_enc_rc_states - states that the resource control maintains
  100. * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
  101. * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
  102. * @DPU_ENC_RC_STATE_ON: Resource is in ON state
  103. * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
  104. * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
  105. */
  106. enum dpu_enc_rc_states {
  107. DPU_ENC_RC_STATE_OFF,
  108. DPU_ENC_RC_STATE_PRE_OFF,
  109. DPU_ENC_RC_STATE_ON,
  110. DPU_ENC_RC_STATE_IDLE
  111. };
  112. /**
  113. * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
  114. * encoders. Virtual encoder manages one "logical" display. Physical
  115. * encoders manage one intf block, tied to a specific panel/sub-panel.
  116. * Virtual encoder defers as much as possible to the physical encoders.
  117. * Virtual encoder registers itself with the DRM Framework as the encoder.
  118. * @base: drm_encoder base class for registration with DRM
  119. * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
  120. * @bus_scaling_client: Client handle to the bus scaling interface
  121. * @num_phys_encs: Actual number of physical encoders contained.
  122. * @phys_encs: Container of physical encoders managed.
  123. * @cur_master: Pointer to the current master in this mode. Optimization
  124. * Only valid after enable. Cleared as disable.
  125. * @hw_pp Handle to the pingpong blocks used for the display. No.
  126. * pingpong blocks can be different than num_phys_encs.
  127. * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
  128. * for partial update right-only cases, such as pingpong
  129. * split where virtual pingpong does not generate IRQs
  130. * @crtc_vblank_cb: Callback into the upper layer / CRTC for
  131. * notification of the VBLANK
  132. * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
  133. * @crtc_kickoff_cb: Callback into CRTC that will flush & start
  134. * all CTL paths
  135. * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
  136. * @debugfs_root: Debug file system root file node
  137. * @enc_lock: Lock around physical encoder create/destroy and
  138. access.
  139. * @frame_busy_mask: Bitmask tracking which phys_enc we are still
  140. * busy processing current command.
  141. * Bit0 = phys_encs[0] etc.
  142. * @crtc_frame_event_cb: callback handler for frame event
  143. * @crtc_frame_event_cb_data: callback handler private data
  144. * @frame_done_timeout: frame done timeout in Hz
  145. * @frame_done_timer: watchdog timer for frame done event
  146. * @vsync_event_timer: vsync timer
  147. * @disp_info: local copy of msm_display_info struct
  148. * @idle_pc_supported: indicate if idle power collaps is supported
  149. * @rc_lock: resource control mutex lock to protect
  150. * virt encoder over various state changes
  151. * @rc_state: resource controller state
  152. * @delayed_off_work: delayed worker to schedule disabling of
  153. * clks and resources after IDLE_TIMEOUT time.
  154. * @vsync_event_work: worker to handle vsync event for autorefresh
  155. * @topology: topology of the display
  156. * @mode_set_complete: flag to indicate modeset completion
  157. * @idle_timeout: idle timeout duration in milliseconds
  158. */
  159. struct dpu_encoder_virt {
  160. struct drm_encoder base;
  161. spinlock_t enc_spinlock;
  162. uint32_t bus_scaling_client;
  163. unsigned int num_phys_encs;
  164. struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
  165. struct dpu_encoder_phys *cur_master;
  166. struct dpu_encoder_phys *cur_slave;
  167. struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
  168. bool intfs_swapped;
  169. void (*crtc_vblank_cb)(void *);
  170. void *crtc_vblank_cb_data;
  171. struct dentry *debugfs_root;
  172. struct mutex enc_lock;
  173. DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
  174. void (*crtc_frame_event_cb)(void *, u32 event);
  175. void *crtc_frame_event_cb_data;
  176. atomic_t frame_done_timeout;
  177. struct timer_list frame_done_timer;
  178. struct timer_list vsync_event_timer;
  179. struct msm_display_info disp_info;
  180. bool idle_pc_supported;
  181. struct mutex rc_lock;
  182. enum dpu_enc_rc_states rc_state;
  183. struct kthread_delayed_work delayed_off_work;
  184. struct kthread_work vsync_event_work;
  185. struct msm_display_topology topology;
  186. bool mode_set_complete;
  187. u32 idle_timeout;
  188. };
  189. #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
  190. static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
  191. bool enable)
  192. {
  193. struct drm_encoder *drm_enc;
  194. struct msm_drm_private *priv;
  195. struct dpu_kms *dpu_kms;
  196. if (!dpu_enc) {
  197. DPU_ERROR("invalid dpu enc\n");
  198. return -EINVAL;
  199. }
  200. drm_enc = &dpu_enc->base;
  201. if (!drm_enc->dev || !drm_enc->dev->dev_private) {
  202. DPU_ERROR("drm device invalid\n");
  203. return -EINVAL;
  204. }
  205. priv = drm_enc->dev->dev_private;
  206. if (!priv->kms) {
  207. DPU_ERROR("invalid kms\n");
  208. return -EINVAL;
  209. }
  210. dpu_kms = to_dpu_kms(priv->kms);
  211. if (enable)
  212. pm_runtime_get_sync(&dpu_kms->pdev->dev);
  213. else
  214. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  215. return 0;
  216. }
  217. void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
  218. enum dpu_intr_idx intr_idx)
  219. {
  220. DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
  221. DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
  222. phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
  223. if (phys_enc->parent_ops->handle_frame_done)
  224. phys_enc->parent_ops->handle_frame_done(
  225. phys_enc->parent, phys_enc,
  226. DPU_ENCODER_FRAME_EVENT_ERROR);
  227. }
  228. static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
  229. int32_t hw_id, struct dpu_encoder_wait_info *info);
  230. int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
  231. enum dpu_intr_idx intr_idx,
  232. struct dpu_encoder_wait_info *wait_info)
  233. {
  234. struct dpu_encoder_irq *irq;
  235. u32 irq_status;
  236. int ret;
  237. if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
  238. DPU_ERROR("invalid params\n");
  239. return -EINVAL;
  240. }
  241. irq = &phys_enc->irq[intr_idx];
  242. /* note: do master / slave checking outside */
  243. /* return EWOULDBLOCK since we know the wait isn't necessary */
  244. if (phys_enc->enable_state == DPU_ENC_DISABLED) {
  245. DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
  246. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  247. irq->irq_idx);
  248. return -EWOULDBLOCK;
  249. }
  250. if (irq->irq_idx < 0) {
  251. DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
  252. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  253. irq->name);
  254. return 0;
  255. }
  256. DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
  257. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  258. irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
  259. atomic_read(wait_info->atomic_cnt));
  260. ret = dpu_encoder_helper_wait_event_timeout(
  261. DRMID(phys_enc->parent),
  262. irq->hw_idx,
  263. wait_info);
  264. if (ret <= 0) {
  265. irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
  266. irq->irq_idx, true);
  267. if (irq_status) {
  268. unsigned long flags;
  269. DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
  270. "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
  271. DRMID(phys_enc->parent), intr_idx,
  272. irq->hw_idx, irq->irq_idx,
  273. phys_enc->hw_pp->idx - PINGPONG_0,
  274. atomic_read(wait_info->atomic_cnt));
  275. local_irq_save(flags);
  276. irq->cb.func(phys_enc, irq->irq_idx);
  277. local_irq_restore(flags);
  278. ret = 0;
  279. } else {
  280. ret = -ETIMEDOUT;
  281. DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
  282. "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
  283. DRMID(phys_enc->parent), intr_idx,
  284. irq->hw_idx, irq->irq_idx,
  285. phys_enc->hw_pp->idx - PINGPONG_0,
  286. atomic_read(wait_info->atomic_cnt));
  287. }
  288. } else {
  289. ret = 0;
  290. trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
  291. intr_idx, irq->hw_idx, irq->irq_idx,
  292. phys_enc->hw_pp->idx - PINGPONG_0,
  293. atomic_read(wait_info->atomic_cnt));
  294. }
  295. return ret;
  296. }
  297. int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
  298. enum dpu_intr_idx intr_idx)
  299. {
  300. struct dpu_encoder_irq *irq;
  301. int ret = 0;
  302. if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
  303. DPU_ERROR("invalid params\n");
  304. return -EINVAL;
  305. }
  306. irq = &phys_enc->irq[intr_idx];
  307. if (irq->irq_idx >= 0) {
  308. DPU_DEBUG_PHYS(phys_enc,
  309. "skipping already registered irq %s type %d\n",
  310. irq->name, irq->intr_type);
  311. return 0;
  312. }
  313. irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
  314. irq->intr_type, irq->hw_idx);
  315. if (irq->irq_idx < 0) {
  316. DPU_ERROR_PHYS(phys_enc,
  317. "failed to lookup IRQ index for %s type:%d\n",
  318. irq->name, irq->intr_type);
  319. return -EINVAL;
  320. }
  321. ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
  322. &irq->cb);
  323. if (ret) {
  324. DPU_ERROR_PHYS(phys_enc,
  325. "failed to register IRQ callback for %s\n",
  326. irq->name);
  327. irq->irq_idx = -EINVAL;
  328. return ret;
  329. }
  330. ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
  331. if (ret) {
  332. DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
  333. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  334. irq->irq_idx);
  335. dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
  336. irq->irq_idx, &irq->cb);
  337. irq->irq_idx = -EINVAL;
  338. return ret;
  339. }
  340. trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
  341. irq->hw_idx, irq->irq_idx);
  342. return ret;
  343. }
  344. int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
  345. enum dpu_intr_idx intr_idx)
  346. {
  347. struct dpu_encoder_irq *irq;
  348. int ret;
  349. if (!phys_enc) {
  350. DPU_ERROR("invalid encoder\n");
  351. return -EINVAL;
  352. }
  353. irq = &phys_enc->irq[intr_idx];
  354. /* silently skip irqs that weren't registered */
  355. if (irq->irq_idx < 0) {
  356. DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
  357. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  358. irq->irq_idx);
  359. return 0;
  360. }
  361. ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
  362. if (ret) {
  363. DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
  364. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  365. irq->irq_idx, ret);
  366. }
  367. ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
  368. &irq->cb);
  369. if (ret) {
  370. DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
  371. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  372. irq->irq_idx, ret);
  373. }
  374. trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
  375. irq->hw_idx, irq->irq_idx);
  376. irq->irq_idx = -EINVAL;
  377. return 0;
  378. }
  379. void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
  380. struct dpu_encoder_hw_resources *hw_res)
  381. {
  382. struct dpu_encoder_virt *dpu_enc = NULL;
  383. int i = 0;
  384. dpu_enc = to_dpu_encoder_virt(drm_enc);
  385. DPU_DEBUG_ENC(dpu_enc, "\n");
  386. /* Query resources used by phys encs, expected to be without overlap */
  387. memset(hw_res, 0, sizeof(*hw_res));
  388. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  389. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  390. if (phys && phys->ops.get_hw_resources)
  391. phys->ops.get_hw_resources(phys, hw_res);
  392. }
  393. }
  394. static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
  395. {
  396. struct dpu_encoder_virt *dpu_enc = NULL;
  397. int i = 0;
  398. if (!drm_enc) {
  399. DPU_ERROR("invalid encoder\n");
  400. return;
  401. }
  402. dpu_enc = to_dpu_encoder_virt(drm_enc);
  403. DPU_DEBUG_ENC(dpu_enc, "\n");
  404. mutex_lock(&dpu_enc->enc_lock);
  405. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  406. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  407. if (phys && phys->ops.destroy) {
  408. phys->ops.destroy(phys);
  409. --dpu_enc->num_phys_encs;
  410. dpu_enc->phys_encs[i] = NULL;
  411. }
  412. }
  413. if (dpu_enc->num_phys_encs)
  414. DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
  415. dpu_enc->num_phys_encs);
  416. dpu_enc->num_phys_encs = 0;
  417. mutex_unlock(&dpu_enc->enc_lock);
  418. drm_encoder_cleanup(drm_enc);
  419. mutex_destroy(&dpu_enc->enc_lock);
  420. }
  421. void dpu_encoder_helper_split_config(
  422. struct dpu_encoder_phys *phys_enc,
  423. enum dpu_intf interface)
  424. {
  425. struct dpu_encoder_virt *dpu_enc;
  426. struct split_pipe_cfg cfg = { 0 };
  427. struct dpu_hw_mdp *hw_mdptop;
  428. struct msm_display_info *disp_info;
  429. if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
  430. DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
  431. return;
  432. }
  433. dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
  434. hw_mdptop = phys_enc->hw_mdptop;
  435. disp_info = &dpu_enc->disp_info;
  436. if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
  437. return;
  438. /**
  439. * disable split modes since encoder will be operating in as the only
  440. * encoder, either for the entire use case in the case of, for example,
  441. * single DSI, or for this frame in the case of left/right only partial
  442. * update.
  443. */
  444. if (phys_enc->split_role == ENC_ROLE_SOLO) {
  445. if (hw_mdptop->ops.setup_split_pipe)
  446. hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
  447. return;
  448. }
  449. cfg.en = true;
  450. cfg.mode = phys_enc->intf_mode;
  451. cfg.intf = interface;
  452. if (cfg.en && phys_enc->ops.needs_single_flush &&
  453. phys_enc->ops.needs_single_flush(phys_enc))
  454. cfg.split_flush_en = true;
  455. if (phys_enc->split_role == ENC_ROLE_MASTER) {
  456. DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
  457. if (hw_mdptop->ops.setup_split_pipe)
  458. hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
  459. }
  460. }
  461. static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
  462. struct drm_display_mode *adj_mode)
  463. {
  464. struct drm_display_mode *cur_mode;
  465. if (!connector || !adj_mode)
  466. return;
  467. list_for_each_entry(cur_mode, &connector->modes, head) {
  468. if (cur_mode->vdisplay == adj_mode->vdisplay &&
  469. cur_mode->hdisplay == adj_mode->hdisplay &&
  470. cur_mode->vrefresh == adj_mode->vrefresh) {
  471. adj_mode->private = cur_mode->private;
  472. adj_mode->private_flags |= cur_mode->private_flags;
  473. }
  474. }
  475. }
  476. static struct msm_display_topology dpu_encoder_get_topology(
  477. struct dpu_encoder_virt *dpu_enc,
  478. struct dpu_kms *dpu_kms,
  479. struct drm_display_mode *mode)
  480. {
  481. struct msm_display_topology topology;
  482. int i, intf_count = 0;
  483. for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
  484. if (dpu_enc->phys_encs[i])
  485. intf_count++;
  486. /* User split topology for width > 1080 */
  487. topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
  488. topology.num_enc = 0;
  489. topology.num_intf = intf_count;
  490. return topology;
  491. }
  492. static int dpu_encoder_virt_atomic_check(
  493. struct drm_encoder *drm_enc,
  494. struct drm_crtc_state *crtc_state,
  495. struct drm_connector_state *conn_state)
  496. {
  497. struct dpu_encoder_virt *dpu_enc;
  498. struct msm_drm_private *priv;
  499. struct dpu_kms *dpu_kms;
  500. const struct drm_display_mode *mode;
  501. struct drm_display_mode *adj_mode;
  502. struct msm_display_topology topology;
  503. int i = 0;
  504. int ret = 0;
  505. if (!drm_enc || !crtc_state || !conn_state) {
  506. DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
  507. drm_enc != 0, crtc_state != 0, conn_state != 0);
  508. return -EINVAL;
  509. }
  510. dpu_enc = to_dpu_encoder_virt(drm_enc);
  511. DPU_DEBUG_ENC(dpu_enc, "\n");
  512. priv = drm_enc->dev->dev_private;
  513. dpu_kms = to_dpu_kms(priv->kms);
  514. mode = &crtc_state->mode;
  515. adj_mode = &crtc_state->adjusted_mode;
  516. trace_dpu_enc_atomic_check(DRMID(drm_enc));
  517. /*
  518. * display drivers may populate private fields of the drm display mode
  519. * structure while registering possible modes of a connector with DRM.
  520. * These private fields are not populated back while DRM invokes
  521. * the mode_set callbacks. This module retrieves and populates the
  522. * private fields of the given mode.
  523. */
  524. _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
  525. /* perform atomic check on the first physical encoder (master) */
  526. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  527. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  528. if (phys && phys->ops.atomic_check)
  529. ret = phys->ops.atomic_check(phys, crtc_state,
  530. conn_state);
  531. else if (phys && phys->ops.mode_fixup)
  532. if (!phys->ops.mode_fixup(phys, mode, adj_mode))
  533. ret = -EINVAL;
  534. if (ret) {
  535. DPU_ERROR_ENC(dpu_enc,
  536. "mode unsupported, phys idx %d\n", i);
  537. break;
  538. }
  539. }
  540. topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
  541. /* Reserve dynamic resources now. Indicating AtomicTest phase */
  542. if (!ret) {
  543. /*
  544. * Avoid reserving resources when mode set is pending. Topology
  545. * info may not be available to complete reservation.
  546. */
  547. if (drm_atomic_crtc_needs_modeset(crtc_state)
  548. && dpu_enc->mode_set_complete) {
  549. ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
  550. topology, true);
  551. dpu_enc->mode_set_complete = false;
  552. }
  553. }
  554. if (!ret)
  555. drm_mode_set_crtcinfo(adj_mode, 0);
  556. trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
  557. adj_mode->private_flags);
  558. return ret;
  559. }
  560. static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
  561. struct msm_display_info *disp_info)
  562. {
  563. struct dpu_vsync_source_cfg vsync_cfg = { 0 };
  564. struct msm_drm_private *priv;
  565. struct dpu_kms *dpu_kms;
  566. struct dpu_hw_mdp *hw_mdptop;
  567. struct drm_encoder *drm_enc;
  568. int i;
  569. if (!dpu_enc || !disp_info) {
  570. DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
  571. dpu_enc != NULL, disp_info != NULL);
  572. return;
  573. } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
  574. DPU_ERROR("invalid num phys enc %d/%d\n",
  575. dpu_enc->num_phys_encs,
  576. (int) ARRAY_SIZE(dpu_enc->hw_pp));
  577. return;
  578. }
  579. drm_enc = &dpu_enc->base;
  580. /* this pointers are checked in virt_enable_helper */
  581. priv = drm_enc->dev->dev_private;
  582. dpu_kms = to_dpu_kms(priv->kms);
  583. if (!dpu_kms) {
  584. DPU_ERROR("invalid dpu_kms\n");
  585. return;
  586. }
  587. hw_mdptop = dpu_kms->hw_mdp;
  588. if (!hw_mdptop) {
  589. DPU_ERROR("invalid mdptop\n");
  590. return;
  591. }
  592. if (hw_mdptop->ops.setup_vsync_source &&
  593. disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
  594. for (i = 0; i < dpu_enc->num_phys_encs; i++)
  595. vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
  596. vsync_cfg.pp_count = dpu_enc->num_phys_encs;
  597. if (disp_info->is_te_using_watchdog_timer)
  598. vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
  599. else
  600. vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
  601. hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
  602. }
  603. }
  604. static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
  605. {
  606. struct dpu_encoder_virt *dpu_enc;
  607. int i;
  608. if (!drm_enc) {
  609. DPU_ERROR("invalid encoder\n");
  610. return;
  611. }
  612. dpu_enc = to_dpu_encoder_virt(drm_enc);
  613. DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
  614. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  615. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  616. if (phys && phys->ops.irq_control)
  617. phys->ops.irq_control(phys, enable);
  618. }
  619. }
  620. static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
  621. bool enable)
  622. {
  623. struct msm_drm_private *priv;
  624. struct dpu_kms *dpu_kms;
  625. struct dpu_encoder_virt *dpu_enc;
  626. dpu_enc = to_dpu_encoder_virt(drm_enc);
  627. priv = drm_enc->dev->dev_private;
  628. dpu_kms = to_dpu_kms(priv->kms);
  629. trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
  630. if (!dpu_enc->cur_master) {
  631. DPU_ERROR("encoder master not set\n");
  632. return;
  633. }
  634. if (enable) {
  635. /* enable DPU core clks */
  636. pm_runtime_get_sync(&dpu_kms->pdev->dev);
  637. /* enable all the irq */
  638. _dpu_encoder_irq_control(drm_enc, true);
  639. } else {
  640. /* disable all the irq */
  641. _dpu_encoder_irq_control(drm_enc, false);
  642. /* disable DPU core clks */
  643. pm_runtime_put_sync(&dpu_kms->pdev->dev);
  644. }
  645. }
  646. static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
  647. u32 sw_event)
  648. {
  649. struct dpu_encoder_virt *dpu_enc;
  650. struct msm_drm_private *priv;
  651. struct msm_drm_thread *disp_thread;
  652. bool is_vid_mode = false;
  653. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
  654. !drm_enc->crtc) {
  655. DPU_ERROR("invalid parameters\n");
  656. return -EINVAL;
  657. }
  658. dpu_enc = to_dpu_encoder_virt(drm_enc);
  659. priv = drm_enc->dev->dev_private;
  660. is_vid_mode = dpu_enc->disp_info.capabilities &
  661. MSM_DISPLAY_CAP_VID_MODE;
  662. if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
  663. DPU_ERROR("invalid crtc index\n");
  664. return -EINVAL;
  665. }
  666. disp_thread = &priv->disp_thread[drm_enc->crtc->index];
  667. /*
  668. * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
  669. * events and return early for other events (ie wb display).
  670. */
  671. if (!dpu_enc->idle_pc_supported &&
  672. (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
  673. sw_event != DPU_ENC_RC_EVENT_STOP &&
  674. sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
  675. return 0;
  676. trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
  677. dpu_enc->rc_state, "begin");
  678. switch (sw_event) {
  679. case DPU_ENC_RC_EVENT_KICKOFF:
  680. /* cancel delayed off work, if any */
  681. if (kthread_cancel_delayed_work_sync(
  682. &dpu_enc->delayed_off_work))
  683. DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
  684. sw_event);
  685. mutex_lock(&dpu_enc->rc_lock);
  686. /* return if the resource control is already in ON state */
  687. if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
  688. DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
  689. DRMID(drm_enc), sw_event);
  690. mutex_unlock(&dpu_enc->rc_lock);
  691. return 0;
  692. } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
  693. dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
  694. DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
  695. DRMID(drm_enc), sw_event,
  696. dpu_enc->rc_state);
  697. mutex_unlock(&dpu_enc->rc_lock);
  698. return -EINVAL;
  699. }
  700. if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
  701. _dpu_encoder_irq_control(drm_enc, true);
  702. else
  703. _dpu_encoder_resource_control_helper(drm_enc, true);
  704. dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
  705. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  706. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  707. "kickoff");
  708. mutex_unlock(&dpu_enc->rc_lock);
  709. break;
  710. case DPU_ENC_RC_EVENT_FRAME_DONE:
  711. /*
  712. * mutex lock is not used as this event happens at interrupt
  713. * context. And locking is not required as, the other events
  714. * like KICKOFF and STOP does a wait-for-idle before executing
  715. * the resource_control
  716. */
  717. if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
  718. DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
  719. DRMID(drm_enc), sw_event,
  720. dpu_enc->rc_state);
  721. return -EINVAL;
  722. }
  723. /*
  724. * schedule off work item only when there are no
  725. * frames pending
  726. */
  727. if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
  728. DRM_DEBUG_KMS("id:%d skip schedule work\n",
  729. DRMID(drm_enc));
  730. return 0;
  731. }
  732. kthread_queue_delayed_work(
  733. &disp_thread->worker,
  734. &dpu_enc->delayed_off_work,
  735. msecs_to_jiffies(dpu_enc->idle_timeout));
  736. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  737. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  738. "frame done");
  739. break;
  740. case DPU_ENC_RC_EVENT_PRE_STOP:
  741. /* cancel delayed off work, if any */
  742. if (kthread_cancel_delayed_work_sync(
  743. &dpu_enc->delayed_off_work))
  744. DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
  745. sw_event);
  746. mutex_lock(&dpu_enc->rc_lock);
  747. if (is_vid_mode &&
  748. dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
  749. _dpu_encoder_irq_control(drm_enc, true);
  750. }
  751. /* skip if is already OFF or IDLE, resources are off already */
  752. else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
  753. dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
  754. DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
  755. DRMID(drm_enc), sw_event,
  756. dpu_enc->rc_state);
  757. mutex_unlock(&dpu_enc->rc_lock);
  758. return 0;
  759. }
  760. dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
  761. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  762. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  763. "pre stop");
  764. mutex_unlock(&dpu_enc->rc_lock);
  765. break;
  766. case DPU_ENC_RC_EVENT_STOP:
  767. mutex_lock(&dpu_enc->rc_lock);
  768. /* return if the resource control is already in OFF state */
  769. if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
  770. DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
  771. DRMID(drm_enc), sw_event);
  772. mutex_unlock(&dpu_enc->rc_lock);
  773. return 0;
  774. } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
  775. DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
  776. DRMID(drm_enc), sw_event, dpu_enc->rc_state);
  777. mutex_unlock(&dpu_enc->rc_lock);
  778. return -EINVAL;
  779. }
  780. /**
  781. * expect to arrive here only if in either idle state or pre-off
  782. * and in IDLE state the resources are already disabled
  783. */
  784. if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
  785. _dpu_encoder_resource_control_helper(drm_enc, false);
  786. dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
  787. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  788. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  789. "stop");
  790. mutex_unlock(&dpu_enc->rc_lock);
  791. break;
  792. case DPU_ENC_RC_EVENT_ENTER_IDLE:
  793. mutex_lock(&dpu_enc->rc_lock);
  794. if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
  795. DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
  796. DRMID(drm_enc), sw_event, dpu_enc->rc_state);
  797. mutex_unlock(&dpu_enc->rc_lock);
  798. return 0;
  799. }
  800. /*
  801. * if we are in ON but a frame was just kicked off,
  802. * ignore the IDLE event, it's probably a stale timer event
  803. */
  804. if (dpu_enc->frame_busy_mask[0]) {
  805. DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
  806. DRMID(drm_enc), sw_event, dpu_enc->rc_state);
  807. mutex_unlock(&dpu_enc->rc_lock);
  808. return 0;
  809. }
  810. if (is_vid_mode)
  811. _dpu_encoder_irq_control(drm_enc, false);
  812. else
  813. _dpu_encoder_resource_control_helper(drm_enc, false);
  814. dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
  815. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  816. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  817. "idle");
  818. mutex_unlock(&dpu_enc->rc_lock);
  819. break;
  820. default:
  821. DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
  822. sw_event);
  823. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  824. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  825. "error");
  826. break;
  827. }
  828. trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
  829. dpu_enc->idle_pc_supported, dpu_enc->rc_state,
  830. "end");
  831. return 0;
  832. }
  833. static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
  834. struct drm_display_mode *mode,
  835. struct drm_display_mode *adj_mode)
  836. {
  837. struct dpu_encoder_virt *dpu_enc;
  838. struct msm_drm_private *priv;
  839. struct dpu_kms *dpu_kms;
  840. struct list_head *connector_list;
  841. struct drm_connector *conn = NULL, *conn_iter;
  842. struct dpu_rm_hw_iter pp_iter, ctl_iter;
  843. struct msm_display_topology topology;
  844. struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
  845. int i = 0, ret;
  846. if (!drm_enc) {
  847. DPU_ERROR("invalid encoder\n");
  848. return;
  849. }
  850. dpu_enc = to_dpu_encoder_virt(drm_enc);
  851. DPU_DEBUG_ENC(dpu_enc, "\n");
  852. priv = drm_enc->dev->dev_private;
  853. dpu_kms = to_dpu_kms(priv->kms);
  854. connector_list = &dpu_kms->dev->mode_config.connector_list;
  855. trace_dpu_enc_mode_set(DRMID(drm_enc));
  856. list_for_each_entry(conn_iter, connector_list, head)
  857. if (conn_iter->encoder == drm_enc)
  858. conn = conn_iter;
  859. if (!conn) {
  860. DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
  861. return;
  862. } else if (!conn->state) {
  863. DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
  864. return;
  865. }
  866. topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
  867. /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
  868. ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
  869. topology, false);
  870. if (ret) {
  871. DPU_ERROR_ENC(dpu_enc,
  872. "failed to reserve hw resources, %d\n", ret);
  873. return;
  874. }
  875. dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
  876. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  877. dpu_enc->hw_pp[i] = NULL;
  878. if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
  879. break;
  880. dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
  881. }
  882. dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
  883. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  884. if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
  885. break;
  886. hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
  887. }
  888. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  889. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  890. if (phys) {
  891. if (!dpu_enc->hw_pp[i]) {
  892. DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
  893. "at idx: %d\n", i);
  894. return;
  895. }
  896. if (!hw_ctl[i]) {
  897. DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
  898. "at idx: %d\n", i);
  899. return;
  900. }
  901. phys->hw_pp = dpu_enc->hw_pp[i];
  902. phys->hw_ctl = hw_ctl[i];
  903. phys->connector = conn->state->connector;
  904. if (phys->ops.mode_set)
  905. phys->ops.mode_set(phys, mode, adj_mode);
  906. }
  907. }
  908. dpu_enc->mode_set_complete = true;
  909. }
  910. static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
  911. {
  912. struct dpu_encoder_virt *dpu_enc = NULL;
  913. struct msm_drm_private *priv;
  914. struct dpu_kms *dpu_kms;
  915. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  916. DPU_ERROR("invalid parameters\n");
  917. return;
  918. }
  919. priv = drm_enc->dev->dev_private;
  920. dpu_kms = to_dpu_kms(priv->kms);
  921. if (!dpu_kms) {
  922. DPU_ERROR("invalid dpu_kms\n");
  923. return;
  924. }
  925. dpu_enc = to_dpu_encoder_virt(drm_enc);
  926. if (!dpu_enc || !dpu_enc->cur_master) {
  927. DPU_ERROR("invalid dpu encoder/master\n");
  928. return;
  929. }
  930. if (dpu_enc->cur_master->hw_mdptop &&
  931. dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
  932. dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
  933. dpu_enc->cur_master->hw_mdptop,
  934. dpu_kms->catalog);
  935. _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
  936. }
  937. void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
  938. {
  939. struct dpu_encoder_virt *dpu_enc = NULL;
  940. int i;
  941. if (!drm_enc) {
  942. DPU_ERROR("invalid encoder\n");
  943. return;
  944. }
  945. dpu_enc = to_dpu_encoder_virt(drm_enc);
  946. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  947. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  948. if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
  949. phys->ops.restore(phys);
  950. }
  951. if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
  952. dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
  953. _dpu_encoder_virt_enable_helper(drm_enc);
  954. }
  955. static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
  956. {
  957. struct dpu_encoder_virt *dpu_enc = NULL;
  958. int ret = 0;
  959. struct drm_display_mode *cur_mode = NULL;
  960. if (!drm_enc) {
  961. DPU_ERROR("invalid encoder\n");
  962. return;
  963. }
  964. dpu_enc = to_dpu_encoder_virt(drm_enc);
  965. cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
  966. trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
  967. cur_mode->vdisplay);
  968. /* always enable slave encoder before master */
  969. if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
  970. dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
  971. if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
  972. dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
  973. ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
  974. if (ret) {
  975. DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
  976. ret);
  977. return;
  978. }
  979. _dpu_encoder_virt_enable_helper(drm_enc);
  980. }
  981. static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
  982. {
  983. struct dpu_encoder_virt *dpu_enc = NULL;
  984. struct msm_drm_private *priv;
  985. struct dpu_kms *dpu_kms;
  986. struct drm_display_mode *mode;
  987. int i = 0;
  988. if (!drm_enc) {
  989. DPU_ERROR("invalid encoder\n");
  990. return;
  991. } else if (!drm_enc->dev) {
  992. DPU_ERROR("invalid dev\n");
  993. return;
  994. } else if (!drm_enc->dev->dev_private) {
  995. DPU_ERROR("invalid dev_private\n");
  996. return;
  997. }
  998. mode = &drm_enc->crtc->state->adjusted_mode;
  999. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1000. DPU_DEBUG_ENC(dpu_enc, "\n");
  1001. priv = drm_enc->dev->dev_private;
  1002. dpu_kms = to_dpu_kms(priv->kms);
  1003. trace_dpu_enc_disable(DRMID(drm_enc));
  1004. /* wait for idle */
  1005. dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
  1006. dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
  1007. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1008. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1009. if (phys && phys->ops.disable)
  1010. phys->ops.disable(phys);
  1011. }
  1012. /* after phys waits for frame-done, should be no more frames pending */
  1013. if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
  1014. DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
  1015. del_timer_sync(&dpu_enc->frame_done_timer);
  1016. }
  1017. dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
  1018. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1019. if (dpu_enc->phys_encs[i])
  1020. dpu_enc->phys_encs[i]->connector = NULL;
  1021. }
  1022. DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
  1023. dpu_rm_release(&dpu_kms->rm, drm_enc);
  1024. }
  1025. static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
  1026. enum dpu_intf_type type, u32 controller_id)
  1027. {
  1028. int i = 0;
  1029. for (i = 0; i < catalog->intf_count; i++) {
  1030. if (catalog->intf[i].type == type
  1031. && catalog->intf[i].controller_id == controller_id) {
  1032. return catalog->intf[i].id;
  1033. }
  1034. }
  1035. return INTF_MAX;
  1036. }
  1037. static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
  1038. struct dpu_encoder_phys *phy_enc)
  1039. {
  1040. struct dpu_encoder_virt *dpu_enc = NULL;
  1041. unsigned long lock_flags;
  1042. if (!drm_enc || !phy_enc)
  1043. return;
  1044. DPU_ATRACE_BEGIN("encoder_vblank_callback");
  1045. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1046. spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
  1047. if (dpu_enc->crtc_vblank_cb)
  1048. dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
  1049. spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
  1050. atomic_inc(&phy_enc->vsync_cnt);
  1051. DPU_ATRACE_END("encoder_vblank_callback");
  1052. }
  1053. static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
  1054. struct dpu_encoder_phys *phy_enc)
  1055. {
  1056. if (!phy_enc)
  1057. return;
  1058. DPU_ATRACE_BEGIN("encoder_underrun_callback");
  1059. atomic_inc(&phy_enc->underrun_cnt);
  1060. trace_dpu_enc_underrun_cb(DRMID(drm_enc),
  1061. atomic_read(&phy_enc->underrun_cnt));
  1062. DPU_ATRACE_END("encoder_underrun_callback");
  1063. }
  1064. void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
  1065. void (*vbl_cb)(void *), void *vbl_data)
  1066. {
  1067. struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
  1068. unsigned long lock_flags;
  1069. bool enable;
  1070. int i;
  1071. enable = vbl_cb ? true : false;
  1072. if (!drm_enc) {
  1073. DPU_ERROR("invalid encoder\n");
  1074. return;
  1075. }
  1076. trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
  1077. spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
  1078. dpu_enc->crtc_vblank_cb = vbl_cb;
  1079. dpu_enc->crtc_vblank_cb_data = vbl_data;
  1080. spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
  1081. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1082. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1083. if (phys && phys->ops.control_vblank_irq)
  1084. phys->ops.control_vblank_irq(phys, enable);
  1085. }
  1086. }
  1087. void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
  1088. void (*frame_event_cb)(void *, u32 event),
  1089. void *frame_event_cb_data)
  1090. {
  1091. struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
  1092. unsigned long lock_flags;
  1093. bool enable;
  1094. enable = frame_event_cb ? true : false;
  1095. if (!drm_enc) {
  1096. DPU_ERROR("invalid encoder\n");
  1097. return;
  1098. }
  1099. trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
  1100. spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
  1101. dpu_enc->crtc_frame_event_cb = frame_event_cb;
  1102. dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
  1103. spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
  1104. }
  1105. static void dpu_encoder_frame_done_callback(
  1106. struct drm_encoder *drm_enc,
  1107. struct dpu_encoder_phys *ready_phys, u32 event)
  1108. {
  1109. struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
  1110. unsigned int i;
  1111. if (event & (DPU_ENCODER_FRAME_EVENT_DONE
  1112. | DPU_ENCODER_FRAME_EVENT_ERROR
  1113. | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
  1114. if (!dpu_enc->frame_busy_mask[0]) {
  1115. /**
  1116. * suppress frame_done without waiter,
  1117. * likely autorefresh
  1118. */
  1119. trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
  1120. event, ready_phys->intf_idx);
  1121. return;
  1122. }
  1123. /* One of the physical encoders has become idle */
  1124. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1125. if (dpu_enc->phys_encs[i] == ready_phys) {
  1126. trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
  1127. dpu_enc->frame_busy_mask[0]);
  1128. clear_bit(i, dpu_enc->frame_busy_mask);
  1129. }
  1130. }
  1131. if (!dpu_enc->frame_busy_mask[0]) {
  1132. atomic_set(&dpu_enc->frame_done_timeout, 0);
  1133. del_timer(&dpu_enc->frame_done_timer);
  1134. dpu_encoder_resource_control(drm_enc,
  1135. DPU_ENC_RC_EVENT_FRAME_DONE);
  1136. if (dpu_enc->crtc_frame_event_cb)
  1137. dpu_enc->crtc_frame_event_cb(
  1138. dpu_enc->crtc_frame_event_cb_data,
  1139. event);
  1140. }
  1141. } else {
  1142. if (dpu_enc->crtc_frame_event_cb)
  1143. dpu_enc->crtc_frame_event_cb(
  1144. dpu_enc->crtc_frame_event_cb_data, event);
  1145. }
  1146. }
  1147. static void dpu_encoder_off_work(struct kthread_work *work)
  1148. {
  1149. struct dpu_encoder_virt *dpu_enc = container_of(work,
  1150. struct dpu_encoder_virt, delayed_off_work.work);
  1151. if (!dpu_enc) {
  1152. DPU_ERROR("invalid dpu encoder\n");
  1153. return;
  1154. }
  1155. dpu_encoder_resource_control(&dpu_enc->base,
  1156. DPU_ENC_RC_EVENT_ENTER_IDLE);
  1157. dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
  1158. DPU_ENCODER_FRAME_EVENT_IDLE);
  1159. }
  1160. /**
  1161. * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
  1162. * drm_enc: Pointer to drm encoder structure
  1163. * phys: Pointer to physical encoder structure
  1164. * extra_flush_bits: Additional bit mask to include in flush trigger
  1165. */
  1166. static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
  1167. struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
  1168. {
  1169. struct dpu_hw_ctl *ctl;
  1170. int pending_kickoff_cnt;
  1171. u32 ret = UINT_MAX;
  1172. if (!drm_enc || !phys) {
  1173. DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
  1174. drm_enc != 0, phys != 0);
  1175. return;
  1176. }
  1177. if (!phys->hw_pp) {
  1178. DPU_ERROR("invalid pingpong hw\n");
  1179. return;
  1180. }
  1181. ctl = phys->hw_ctl;
  1182. if (!ctl || !ctl->ops.trigger_flush) {
  1183. DPU_ERROR("missing trigger cb\n");
  1184. return;
  1185. }
  1186. pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
  1187. if (extra_flush_bits && ctl->ops.update_pending_flush)
  1188. ctl->ops.update_pending_flush(ctl, extra_flush_bits);
  1189. ctl->ops.trigger_flush(ctl);
  1190. if (ctl->ops.get_pending_flush)
  1191. ret = ctl->ops.get_pending_flush(ctl);
  1192. trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
  1193. pending_kickoff_cnt, ctl->idx,
  1194. extra_flush_bits, ret);
  1195. }
  1196. /**
  1197. * _dpu_encoder_trigger_start - trigger start for a physical encoder
  1198. * phys: Pointer to physical encoder structure
  1199. */
  1200. static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
  1201. {
  1202. if (!phys) {
  1203. DPU_ERROR("invalid argument(s)\n");
  1204. return;
  1205. }
  1206. if (!phys->hw_pp) {
  1207. DPU_ERROR("invalid pingpong hw\n");
  1208. return;
  1209. }
  1210. if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
  1211. phys->ops.trigger_start(phys);
  1212. }
  1213. void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
  1214. {
  1215. struct dpu_hw_ctl *ctl;
  1216. if (!phys_enc) {
  1217. DPU_ERROR("invalid encoder\n");
  1218. return;
  1219. }
  1220. ctl = phys_enc->hw_ctl;
  1221. if (ctl && ctl->ops.trigger_start) {
  1222. ctl->ops.trigger_start(ctl);
  1223. trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
  1224. }
  1225. }
  1226. static int dpu_encoder_helper_wait_event_timeout(
  1227. int32_t drm_id,
  1228. int32_t hw_id,
  1229. struct dpu_encoder_wait_info *info)
  1230. {
  1231. int rc = 0;
  1232. s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
  1233. s64 jiffies = msecs_to_jiffies(info->timeout_ms);
  1234. s64 time;
  1235. do {
  1236. rc = wait_event_timeout(*(info->wq),
  1237. atomic_read(info->atomic_cnt) == 0, jiffies);
  1238. time = ktime_to_ms(ktime_get());
  1239. trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
  1240. expected_time,
  1241. atomic_read(info->atomic_cnt));
  1242. /* If we timed out, counter is valid and time is less, wait again */
  1243. } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
  1244. (time < expected_time));
  1245. return rc;
  1246. }
  1247. void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
  1248. {
  1249. struct dpu_encoder_virt *dpu_enc;
  1250. struct dpu_hw_ctl *ctl;
  1251. int rc;
  1252. if (!phys_enc) {
  1253. DPU_ERROR("invalid encoder\n");
  1254. return;
  1255. }
  1256. dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
  1257. ctl = phys_enc->hw_ctl;
  1258. if (!ctl || !ctl->ops.reset)
  1259. return;
  1260. DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
  1261. ctl->idx);
  1262. rc = ctl->ops.reset(ctl);
  1263. if (rc) {
  1264. DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
  1265. dpu_dbg_dump(false, __func__, true, true);
  1266. }
  1267. phys_enc->enable_state = DPU_ENC_ENABLED;
  1268. }
  1269. /**
  1270. * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
  1271. * Iterate through the physical encoders and perform consolidated flush
  1272. * and/or control start triggering as needed. This is done in the virtual
  1273. * encoder rather than the individual physical ones in order to handle
  1274. * use cases that require visibility into multiple physical encoders at
  1275. * a time.
  1276. * dpu_enc: Pointer to virtual encoder structure
  1277. */
  1278. static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
  1279. {
  1280. struct dpu_hw_ctl *ctl;
  1281. uint32_t i, pending_flush;
  1282. unsigned long lock_flags;
  1283. if (!dpu_enc) {
  1284. DPU_ERROR("invalid encoder\n");
  1285. return;
  1286. }
  1287. pending_flush = 0x0;
  1288. /* update pending counts and trigger kickoff ctl flush atomically */
  1289. spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
  1290. /* don't perform flush/start operations for slave encoders */
  1291. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1292. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1293. if (!phys || phys->enable_state == DPU_ENC_DISABLED)
  1294. continue;
  1295. ctl = phys->hw_ctl;
  1296. if (!ctl)
  1297. continue;
  1298. if (phys->split_role != ENC_ROLE_SLAVE)
  1299. set_bit(i, dpu_enc->frame_busy_mask);
  1300. if (!phys->ops.needs_single_flush ||
  1301. !phys->ops.needs_single_flush(phys))
  1302. _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
  1303. else if (ctl->ops.get_pending_flush)
  1304. pending_flush |= ctl->ops.get_pending_flush(ctl);
  1305. }
  1306. /* for split flush, combine pending flush masks and send to master */
  1307. if (pending_flush && dpu_enc->cur_master) {
  1308. _dpu_encoder_trigger_flush(
  1309. &dpu_enc->base,
  1310. dpu_enc->cur_master,
  1311. pending_flush);
  1312. }
  1313. _dpu_encoder_trigger_start(dpu_enc->cur_master);
  1314. spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
  1315. }
  1316. void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
  1317. {
  1318. struct dpu_encoder_virt *dpu_enc;
  1319. struct dpu_encoder_phys *phys;
  1320. unsigned int i;
  1321. struct dpu_hw_ctl *ctl;
  1322. struct msm_display_info *disp_info;
  1323. if (!drm_enc) {
  1324. DPU_ERROR("invalid encoder\n");
  1325. return;
  1326. }
  1327. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1328. disp_info = &dpu_enc->disp_info;
  1329. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1330. phys = dpu_enc->phys_encs[i];
  1331. if (phys && phys->hw_ctl) {
  1332. ctl = phys->hw_ctl;
  1333. if (ctl->ops.clear_pending_flush)
  1334. ctl->ops.clear_pending_flush(ctl);
  1335. /* update only for command mode primary ctl */
  1336. if ((phys == dpu_enc->cur_master) &&
  1337. (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
  1338. && ctl->ops.trigger_pending)
  1339. ctl->ops.trigger_pending(ctl);
  1340. }
  1341. }
  1342. }
  1343. static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
  1344. struct drm_display_mode *mode)
  1345. {
  1346. u64 pclk_rate;
  1347. u32 pclk_period;
  1348. u32 line_time;
  1349. /*
  1350. * For linetime calculation, only operate on master encoder.
  1351. */
  1352. if (!dpu_enc->cur_master)
  1353. return 0;
  1354. if (!dpu_enc->cur_master->ops.get_line_count) {
  1355. DPU_ERROR("get_line_count function not defined\n");
  1356. return 0;
  1357. }
  1358. pclk_rate = mode->clock; /* pixel clock in kHz */
  1359. if (pclk_rate == 0) {
  1360. DPU_ERROR("pclk is 0, cannot calculate line time\n");
  1361. return 0;
  1362. }
  1363. pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
  1364. if (pclk_period == 0) {
  1365. DPU_ERROR("pclk period is 0\n");
  1366. return 0;
  1367. }
  1368. /*
  1369. * Line time calculation based on Pixel clock and HTOTAL.
  1370. * Final unit is in ns.
  1371. */
  1372. line_time = (pclk_period * mode->htotal) / 1000;
  1373. if (line_time == 0) {
  1374. DPU_ERROR("line time calculation is 0\n");
  1375. return 0;
  1376. }
  1377. DPU_DEBUG_ENC(dpu_enc,
  1378. "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
  1379. pclk_rate, pclk_period, line_time);
  1380. return line_time;
  1381. }
  1382. static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
  1383. ktime_t *wakeup_time)
  1384. {
  1385. struct drm_display_mode *mode;
  1386. struct dpu_encoder_virt *dpu_enc;
  1387. u32 cur_line;
  1388. u32 line_time;
  1389. u32 vtotal, time_to_vsync;
  1390. ktime_t cur_time;
  1391. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1392. if (!drm_enc->crtc || !drm_enc->crtc->state) {
  1393. DPU_ERROR("crtc/crtc state object is NULL\n");
  1394. return -EINVAL;
  1395. }
  1396. mode = &drm_enc->crtc->state->adjusted_mode;
  1397. line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
  1398. if (!line_time)
  1399. return -EINVAL;
  1400. cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
  1401. vtotal = mode->vtotal;
  1402. if (cur_line >= vtotal)
  1403. time_to_vsync = line_time * vtotal;
  1404. else
  1405. time_to_vsync = line_time * (vtotal - cur_line);
  1406. if (time_to_vsync == 0) {
  1407. DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
  1408. vtotal);
  1409. return -EINVAL;
  1410. }
  1411. cur_time = ktime_get();
  1412. *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
  1413. DPU_DEBUG_ENC(dpu_enc,
  1414. "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
  1415. cur_line, vtotal, time_to_vsync,
  1416. ktime_to_ms(cur_time),
  1417. ktime_to_ms(*wakeup_time));
  1418. return 0;
  1419. }
  1420. static void dpu_encoder_vsync_event_handler(struct timer_list *t)
  1421. {
  1422. struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
  1423. vsync_event_timer);
  1424. struct drm_encoder *drm_enc = &dpu_enc->base;
  1425. struct msm_drm_private *priv;
  1426. struct msm_drm_thread *event_thread;
  1427. if (!drm_enc->dev || !drm_enc->dev->dev_private ||
  1428. !drm_enc->crtc) {
  1429. DPU_ERROR("invalid parameters\n");
  1430. return;
  1431. }
  1432. priv = drm_enc->dev->dev_private;
  1433. if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
  1434. DPU_ERROR("invalid crtc index\n");
  1435. return;
  1436. }
  1437. event_thread = &priv->event_thread[drm_enc->crtc->index];
  1438. if (!event_thread) {
  1439. DPU_ERROR("event_thread not found for crtc:%d\n",
  1440. drm_enc->crtc->index);
  1441. return;
  1442. }
  1443. del_timer(&dpu_enc->vsync_event_timer);
  1444. }
  1445. static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
  1446. {
  1447. struct dpu_encoder_virt *dpu_enc = container_of(work,
  1448. struct dpu_encoder_virt, vsync_event_work);
  1449. ktime_t wakeup_time;
  1450. if (!dpu_enc) {
  1451. DPU_ERROR("invalid dpu encoder\n");
  1452. return;
  1453. }
  1454. if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
  1455. return;
  1456. trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
  1457. mod_timer(&dpu_enc->vsync_event_timer,
  1458. nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
  1459. }
  1460. void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
  1461. struct dpu_encoder_kickoff_params *params)
  1462. {
  1463. struct dpu_encoder_virt *dpu_enc;
  1464. struct dpu_encoder_phys *phys;
  1465. bool needs_hw_reset = false;
  1466. unsigned int i;
  1467. if (!drm_enc || !params) {
  1468. DPU_ERROR("invalid args\n");
  1469. return;
  1470. }
  1471. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1472. trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
  1473. /* prepare for next kickoff, may include waiting on previous kickoff */
  1474. DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
  1475. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1476. phys = dpu_enc->phys_encs[i];
  1477. if (phys) {
  1478. if (phys->ops.prepare_for_kickoff)
  1479. phys->ops.prepare_for_kickoff(phys, params);
  1480. if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
  1481. needs_hw_reset = true;
  1482. }
  1483. }
  1484. DPU_ATRACE_END("enc_prepare_for_kickoff");
  1485. dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
  1486. /* if any phys needs reset, reset all phys, in-order */
  1487. if (needs_hw_reset) {
  1488. trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
  1489. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1490. phys = dpu_enc->phys_encs[i];
  1491. if (phys && phys->ops.hw_reset)
  1492. phys->ops.hw_reset(phys);
  1493. }
  1494. }
  1495. }
  1496. void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
  1497. {
  1498. struct dpu_encoder_virt *dpu_enc;
  1499. struct dpu_encoder_phys *phys;
  1500. ktime_t wakeup_time;
  1501. unsigned int i;
  1502. if (!drm_enc) {
  1503. DPU_ERROR("invalid encoder\n");
  1504. return;
  1505. }
  1506. DPU_ATRACE_BEGIN("encoder_kickoff");
  1507. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1508. trace_dpu_enc_kickoff(DRMID(drm_enc));
  1509. atomic_set(&dpu_enc->frame_done_timeout,
  1510. DPU_FRAME_DONE_TIMEOUT * 1000 /
  1511. drm_enc->crtc->state->adjusted_mode.vrefresh);
  1512. mod_timer(&dpu_enc->frame_done_timer, jiffies +
  1513. ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
  1514. /* All phys encs are ready to go, trigger the kickoff */
  1515. _dpu_encoder_kickoff_phys(dpu_enc);
  1516. /* allow phys encs to handle any post-kickoff business */
  1517. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1518. phys = dpu_enc->phys_encs[i];
  1519. if (phys && phys->ops.handle_post_kickoff)
  1520. phys->ops.handle_post_kickoff(phys);
  1521. }
  1522. if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
  1523. !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
  1524. trace_dpu_enc_early_kickoff(DRMID(drm_enc),
  1525. ktime_to_ms(wakeup_time));
  1526. mod_timer(&dpu_enc->vsync_event_timer,
  1527. nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
  1528. }
  1529. DPU_ATRACE_END("encoder_kickoff");
  1530. }
  1531. void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
  1532. {
  1533. struct dpu_encoder_virt *dpu_enc;
  1534. struct dpu_encoder_phys *phys;
  1535. int i;
  1536. if (!drm_enc) {
  1537. DPU_ERROR("invalid encoder\n");
  1538. return;
  1539. }
  1540. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1541. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1542. phys = dpu_enc->phys_encs[i];
  1543. if (phys && phys->ops.prepare_commit)
  1544. phys->ops.prepare_commit(phys);
  1545. }
  1546. }
  1547. #ifdef CONFIG_DEBUG_FS
  1548. static int _dpu_encoder_status_show(struct seq_file *s, void *data)
  1549. {
  1550. struct dpu_encoder_virt *dpu_enc;
  1551. int i;
  1552. if (!s || !s->private)
  1553. return -EINVAL;
  1554. dpu_enc = s->private;
  1555. mutex_lock(&dpu_enc->enc_lock);
  1556. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1557. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1558. if (!phys)
  1559. continue;
  1560. seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
  1561. phys->intf_idx - INTF_0,
  1562. atomic_read(&phys->vsync_cnt),
  1563. atomic_read(&phys->underrun_cnt));
  1564. switch (phys->intf_mode) {
  1565. case INTF_MODE_VIDEO:
  1566. seq_puts(s, "mode: video\n");
  1567. break;
  1568. case INTF_MODE_CMD:
  1569. seq_puts(s, "mode: command\n");
  1570. break;
  1571. default:
  1572. seq_puts(s, "mode: ???\n");
  1573. break;
  1574. }
  1575. }
  1576. mutex_unlock(&dpu_enc->enc_lock);
  1577. return 0;
  1578. }
  1579. static int _dpu_encoder_debugfs_status_open(struct inode *inode,
  1580. struct file *file)
  1581. {
  1582. return single_open(file, _dpu_encoder_status_show, inode->i_private);
  1583. }
  1584. static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
  1585. {
  1586. struct dpu_encoder_virt *dpu_enc;
  1587. struct msm_drm_private *priv;
  1588. struct dpu_kms *dpu_kms;
  1589. int i;
  1590. static const struct file_operations debugfs_status_fops = {
  1591. .open = _dpu_encoder_debugfs_status_open,
  1592. .read = seq_read,
  1593. .llseek = seq_lseek,
  1594. .release = single_release,
  1595. };
  1596. char name[DPU_NAME_SIZE];
  1597. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  1598. DPU_ERROR("invalid encoder or kms\n");
  1599. return -EINVAL;
  1600. }
  1601. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1602. priv = drm_enc->dev->dev_private;
  1603. dpu_kms = to_dpu_kms(priv->kms);
  1604. snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
  1605. /* create overall sub-directory for the encoder */
  1606. dpu_enc->debugfs_root = debugfs_create_dir(name,
  1607. drm_enc->dev->primary->debugfs_root);
  1608. if (!dpu_enc->debugfs_root)
  1609. return -ENOMEM;
  1610. /* don't error check these */
  1611. debugfs_create_file("status", 0600,
  1612. dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
  1613. for (i = 0; i < dpu_enc->num_phys_encs; i++)
  1614. if (dpu_enc->phys_encs[i] &&
  1615. dpu_enc->phys_encs[i]->ops.late_register)
  1616. dpu_enc->phys_encs[i]->ops.late_register(
  1617. dpu_enc->phys_encs[i],
  1618. dpu_enc->debugfs_root);
  1619. return 0;
  1620. }
  1621. static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
  1622. {
  1623. struct dpu_encoder_virt *dpu_enc;
  1624. if (!drm_enc)
  1625. return;
  1626. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1627. debugfs_remove_recursive(dpu_enc->debugfs_root);
  1628. }
  1629. #else
  1630. static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
  1631. {
  1632. return 0;
  1633. }
  1634. static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
  1635. {
  1636. }
  1637. #endif
  1638. static int dpu_encoder_late_register(struct drm_encoder *encoder)
  1639. {
  1640. return _dpu_encoder_init_debugfs(encoder);
  1641. }
  1642. static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
  1643. {
  1644. _dpu_encoder_destroy_debugfs(encoder);
  1645. }
  1646. static int dpu_encoder_virt_add_phys_encs(
  1647. u32 display_caps,
  1648. struct dpu_encoder_virt *dpu_enc,
  1649. struct dpu_enc_phys_init_params *params)
  1650. {
  1651. struct dpu_encoder_phys *enc = NULL;
  1652. DPU_DEBUG_ENC(dpu_enc, "\n");
  1653. /*
  1654. * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
  1655. * in this function, check up-front.
  1656. */
  1657. if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
  1658. ARRAY_SIZE(dpu_enc->phys_encs)) {
  1659. DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
  1660. dpu_enc->num_phys_encs);
  1661. return -EINVAL;
  1662. }
  1663. if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
  1664. enc = dpu_encoder_phys_vid_init(params);
  1665. if (IS_ERR_OR_NULL(enc)) {
  1666. DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
  1667. PTR_ERR(enc));
  1668. return enc == 0 ? -EINVAL : PTR_ERR(enc);
  1669. }
  1670. dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
  1671. ++dpu_enc->num_phys_encs;
  1672. }
  1673. if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
  1674. enc = dpu_encoder_phys_cmd_init(params);
  1675. if (IS_ERR_OR_NULL(enc)) {
  1676. DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
  1677. PTR_ERR(enc));
  1678. return enc == 0 ? -EINVAL : PTR_ERR(enc);
  1679. }
  1680. dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
  1681. ++dpu_enc->num_phys_encs;
  1682. }
  1683. if (params->split_role == ENC_ROLE_SLAVE)
  1684. dpu_enc->cur_slave = enc;
  1685. else
  1686. dpu_enc->cur_master = enc;
  1687. return 0;
  1688. }
  1689. static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
  1690. .handle_vblank_virt = dpu_encoder_vblank_callback,
  1691. .handle_underrun_virt = dpu_encoder_underrun_callback,
  1692. .handle_frame_done = dpu_encoder_frame_done_callback,
  1693. };
  1694. static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
  1695. struct dpu_kms *dpu_kms,
  1696. struct msm_display_info *disp_info)
  1697. {
  1698. int ret = 0;
  1699. int i = 0;
  1700. enum dpu_intf_type intf_type;
  1701. struct dpu_enc_phys_init_params phys_params;
  1702. if (!dpu_enc || !dpu_kms) {
  1703. DPU_ERROR("invalid arg(s), enc %d kms %d\n",
  1704. dpu_enc != 0, dpu_kms != 0);
  1705. return -EINVAL;
  1706. }
  1707. dpu_enc->cur_master = NULL;
  1708. memset(&phys_params, 0, sizeof(phys_params));
  1709. phys_params.dpu_kms = dpu_kms;
  1710. phys_params.parent = &dpu_enc->base;
  1711. phys_params.parent_ops = &dpu_encoder_parent_ops;
  1712. phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
  1713. DPU_DEBUG("\n");
  1714. switch (disp_info->intf_type) {
  1715. case DRM_MODE_ENCODER_DSI:
  1716. intf_type = INTF_DSI;
  1717. break;
  1718. default:
  1719. DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
  1720. return -EINVAL;
  1721. }
  1722. WARN_ON(disp_info->num_of_h_tiles < 1);
  1723. DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
  1724. if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
  1725. (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
  1726. dpu_enc->idle_pc_supported =
  1727. dpu_kms->catalog->caps->has_idle_pc;
  1728. mutex_lock(&dpu_enc->enc_lock);
  1729. for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
  1730. /*
  1731. * Left-most tile is at index 0, content is controller id
  1732. * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
  1733. * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
  1734. */
  1735. u32 controller_id = disp_info->h_tile_instance[i];
  1736. if (disp_info->num_of_h_tiles > 1) {
  1737. if (i == 0)
  1738. phys_params.split_role = ENC_ROLE_MASTER;
  1739. else
  1740. phys_params.split_role = ENC_ROLE_SLAVE;
  1741. } else {
  1742. phys_params.split_role = ENC_ROLE_SOLO;
  1743. }
  1744. DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
  1745. i, controller_id, phys_params.split_role);
  1746. phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
  1747. intf_type,
  1748. controller_id);
  1749. if (phys_params.intf_idx == INTF_MAX) {
  1750. DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
  1751. intf_type, controller_id);
  1752. ret = -EINVAL;
  1753. }
  1754. if (!ret) {
  1755. ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
  1756. dpu_enc,
  1757. &phys_params);
  1758. if (ret)
  1759. DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
  1760. }
  1761. }
  1762. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1763. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1764. if (phys) {
  1765. atomic_set(&phys->vsync_cnt, 0);
  1766. atomic_set(&phys->underrun_cnt, 0);
  1767. }
  1768. }
  1769. mutex_unlock(&dpu_enc->enc_lock);
  1770. return ret;
  1771. }
  1772. static void dpu_encoder_frame_done_timeout(struct timer_list *t)
  1773. {
  1774. struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
  1775. frame_done_timer);
  1776. struct drm_encoder *drm_enc = &dpu_enc->base;
  1777. struct msm_drm_private *priv;
  1778. u32 event;
  1779. if (!drm_enc->dev || !drm_enc->dev->dev_private) {
  1780. DPU_ERROR("invalid parameters\n");
  1781. return;
  1782. }
  1783. priv = drm_enc->dev->dev_private;
  1784. if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
  1785. DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
  1786. DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
  1787. return;
  1788. } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
  1789. DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
  1790. return;
  1791. }
  1792. DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
  1793. event = DPU_ENCODER_FRAME_EVENT_ERROR;
  1794. trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
  1795. dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
  1796. }
  1797. static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
  1798. .mode_set = dpu_encoder_virt_mode_set,
  1799. .disable = dpu_encoder_virt_disable,
  1800. .enable = dpu_kms_encoder_enable,
  1801. .atomic_check = dpu_encoder_virt_atomic_check,
  1802. /* This is called by dpu_kms_encoder_enable */
  1803. .commit = dpu_encoder_virt_enable,
  1804. };
  1805. static const struct drm_encoder_funcs dpu_encoder_funcs = {
  1806. .destroy = dpu_encoder_destroy,
  1807. .late_register = dpu_encoder_late_register,
  1808. .early_unregister = dpu_encoder_early_unregister,
  1809. };
  1810. int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
  1811. struct msm_display_info *disp_info)
  1812. {
  1813. struct msm_drm_private *priv = dev->dev_private;
  1814. struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
  1815. struct drm_encoder *drm_enc = NULL;
  1816. struct dpu_encoder_virt *dpu_enc = NULL;
  1817. int ret = 0;
  1818. dpu_enc = to_dpu_encoder_virt(enc);
  1819. mutex_init(&dpu_enc->enc_lock);
  1820. ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
  1821. if (ret)
  1822. goto fail;
  1823. spin_lock_init(&dpu_enc->enc_spinlock);
  1824. atomic_set(&dpu_enc->frame_done_timeout, 0);
  1825. timer_setup(&dpu_enc->frame_done_timer,
  1826. dpu_encoder_frame_done_timeout, 0);
  1827. if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
  1828. timer_setup(&dpu_enc->vsync_event_timer,
  1829. dpu_encoder_vsync_event_handler,
  1830. 0);
  1831. mutex_init(&dpu_enc->rc_lock);
  1832. kthread_init_delayed_work(&dpu_enc->delayed_off_work,
  1833. dpu_encoder_off_work);
  1834. dpu_enc->idle_timeout = IDLE_TIMEOUT;
  1835. kthread_init_work(&dpu_enc->vsync_event_work,
  1836. dpu_encoder_vsync_event_work_handler);
  1837. memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
  1838. DPU_DEBUG_ENC(dpu_enc, "created\n");
  1839. return ret;
  1840. fail:
  1841. DPU_ERROR("failed to create encoder\n");
  1842. if (drm_enc)
  1843. dpu_encoder_destroy(drm_enc);
  1844. return ret;
  1845. }
  1846. struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
  1847. int drm_enc_mode)
  1848. {
  1849. struct dpu_encoder_virt *dpu_enc = NULL;
  1850. int rc = 0;
  1851. dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
  1852. if (!dpu_enc)
  1853. return ERR_PTR(ENOMEM);
  1854. rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
  1855. drm_enc_mode, NULL);
  1856. if (rc) {
  1857. devm_kfree(dev->dev, dpu_enc);
  1858. return ERR_PTR(rc);
  1859. }
  1860. drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
  1861. return &dpu_enc->base;
  1862. }
  1863. int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
  1864. enum msm_event_wait event)
  1865. {
  1866. int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
  1867. struct dpu_encoder_virt *dpu_enc = NULL;
  1868. int i, ret = 0;
  1869. if (!drm_enc) {
  1870. DPU_ERROR("invalid encoder\n");
  1871. return -EINVAL;
  1872. }
  1873. dpu_enc = to_dpu_encoder_virt(drm_enc);
  1874. DPU_DEBUG_ENC(dpu_enc, "\n");
  1875. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1876. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1877. if (!phys)
  1878. continue;
  1879. switch (event) {
  1880. case MSM_ENC_COMMIT_DONE:
  1881. fn_wait = phys->ops.wait_for_commit_done;
  1882. break;
  1883. case MSM_ENC_TX_COMPLETE:
  1884. fn_wait = phys->ops.wait_for_tx_complete;
  1885. break;
  1886. case MSM_ENC_VBLANK:
  1887. fn_wait = phys->ops.wait_for_vblank;
  1888. break;
  1889. default:
  1890. DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
  1891. event);
  1892. return -EINVAL;
  1893. };
  1894. if (fn_wait) {
  1895. DPU_ATRACE_BEGIN("wait_for_completion_event");
  1896. ret = fn_wait(phys);
  1897. DPU_ATRACE_END("wait_for_completion_event");
  1898. if (ret)
  1899. return ret;
  1900. }
  1901. }
  1902. return ret;
  1903. }
  1904. enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
  1905. {
  1906. struct dpu_encoder_virt *dpu_enc = NULL;
  1907. int i;
  1908. if (!encoder) {
  1909. DPU_ERROR("invalid encoder\n");
  1910. return INTF_MODE_NONE;
  1911. }
  1912. dpu_enc = to_dpu_encoder_virt(encoder);
  1913. if (dpu_enc->cur_master)
  1914. return dpu_enc->cur_master->intf_mode;
  1915. for (i = 0; i < dpu_enc->num_phys_encs; i++) {
  1916. struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
  1917. if (phys)
  1918. return phys->intf_mode;
  1919. }
  1920. return INTF_MODE_NONE;
  1921. }