dce_v11_0.c 117 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include "drmP.h"
  24. #include "amdgpu.h"
  25. #include "amdgpu_pm.h"
  26. #include "amdgpu_i2c.h"
  27. #include "vid.h"
  28. #include "atom.h"
  29. #include "amdgpu_atombios.h"
  30. #include "atombios_crtc.h"
  31. #include "atombios_encoders.h"
  32. #include "amdgpu_pll.h"
  33. #include "amdgpu_connectors.h"
  34. #include "dce/dce_11_0_d.h"
  35. #include "dce/dce_11_0_sh_mask.h"
  36. #include "dce/dce_11_0_enum.h"
  37. #include "oss/oss_3_0_d.h"
  38. #include "oss/oss_3_0_sh_mask.h"
  39. #include "gmc/gmc_8_1_d.h"
  40. #include "gmc/gmc_8_1_sh_mask.h"
  41. static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
  42. static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
  43. static const u32 crtc_offsets[] =
  44. {
  45. CRTC0_REGISTER_OFFSET,
  46. CRTC1_REGISTER_OFFSET,
  47. CRTC2_REGISTER_OFFSET,
  48. CRTC3_REGISTER_OFFSET,
  49. CRTC4_REGISTER_OFFSET,
  50. CRTC5_REGISTER_OFFSET,
  51. CRTC6_REGISTER_OFFSET
  52. };
  53. static const u32 hpd_offsets[] =
  54. {
  55. HPD0_REGISTER_OFFSET,
  56. HPD1_REGISTER_OFFSET,
  57. HPD2_REGISTER_OFFSET,
  58. HPD3_REGISTER_OFFSET,
  59. HPD4_REGISTER_OFFSET,
  60. HPD5_REGISTER_OFFSET
  61. };
  62. static const uint32_t dig_offsets[] = {
  63. DIG0_REGISTER_OFFSET,
  64. DIG1_REGISTER_OFFSET,
  65. DIG2_REGISTER_OFFSET,
  66. DIG3_REGISTER_OFFSET,
  67. DIG4_REGISTER_OFFSET,
  68. DIG5_REGISTER_OFFSET,
  69. DIG6_REGISTER_OFFSET,
  70. DIG7_REGISTER_OFFSET,
  71. DIG8_REGISTER_OFFSET
  72. };
  73. static const struct {
  74. uint32_t reg;
  75. uint32_t vblank;
  76. uint32_t vline;
  77. uint32_t hpd;
  78. } interrupt_status_offsets[] = { {
  79. .reg = mmDISP_INTERRUPT_STATUS,
  80. .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
  81. .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
  82. .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
  83. }, {
  84. .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
  85. .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
  86. .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
  87. .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
  88. }, {
  89. .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
  90. .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
  91. .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
  92. .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
  93. }, {
  94. .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
  95. .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
  96. .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
  97. .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
  98. }, {
  99. .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
  100. .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
  101. .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
  102. .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
  103. }, {
  104. .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
  105. .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
  106. .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
  107. .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
  108. } };
  109. static const u32 cz_golden_settings_a11[] =
  110. {
  111. mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
  112. mmFBC_MISC, 0x1f311fff, 0x14300000,
  113. };
  114. static const u32 cz_mgcg_cgcg_init[] =
  115. {
  116. mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
  117. mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
  118. };
  119. static const u32 stoney_golden_settings_a11[] =
  120. {
  121. mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
  122. mmFBC_MISC, 0x1f311fff, 0x14302000,
  123. };
  124. static const u32 polaris11_golden_settings_a11[] =
  125. {
  126. mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
  127. mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
  128. mmFBC_DEBUG1, 0xffffffff, 0x00000008,
  129. mmFBC_MISC, 0x9f313fff, 0x14302008,
  130. mmHDMI_CONTROL, 0x313f031f, 0x00000011,
  131. };
  132. static const u32 polaris10_golden_settings_a11[] =
  133. {
  134. mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
  135. mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
  136. mmFBC_MISC, 0x9f313fff, 0x14302008,
  137. mmHDMI_CONTROL, 0x313f031f, 0x00000011,
  138. };
  139. static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
  140. {
  141. switch (adev->asic_type) {
  142. case CHIP_CARRIZO:
  143. amdgpu_program_register_sequence(adev,
  144. cz_mgcg_cgcg_init,
  145. (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
  146. amdgpu_program_register_sequence(adev,
  147. cz_golden_settings_a11,
  148. (const u32)ARRAY_SIZE(cz_golden_settings_a11));
  149. break;
  150. case CHIP_STONEY:
  151. amdgpu_program_register_sequence(adev,
  152. stoney_golden_settings_a11,
  153. (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
  154. break;
  155. case CHIP_POLARIS11:
  156. amdgpu_program_register_sequence(adev,
  157. polaris11_golden_settings_a11,
  158. (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
  159. break;
  160. case CHIP_POLARIS10:
  161. amdgpu_program_register_sequence(adev,
  162. polaris10_golden_settings_a11,
  163. (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
  164. break;
  165. default:
  166. break;
  167. }
  168. }
  169. static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
  170. u32 block_offset, u32 reg)
  171. {
  172. unsigned long flags;
  173. u32 r;
  174. spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
  175. WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
  176. r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
  177. spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
  178. return r;
  179. }
  180. static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
  181. u32 block_offset, u32 reg, u32 v)
  182. {
  183. unsigned long flags;
  184. spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
  185. WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
  186. WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
  187. spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
  188. }
  189. static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
  190. {
  191. if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
  192. CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
  193. return true;
  194. else
  195. return false;
  196. }
  197. static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
  198. {
  199. u32 pos1, pos2;
  200. pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
  201. pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
  202. if (pos1 != pos2)
  203. return true;
  204. else
  205. return false;
  206. }
  207. /**
  208. * dce_v11_0_vblank_wait - vblank wait asic callback.
  209. *
  210. * @adev: amdgpu_device pointer
  211. * @crtc: crtc to wait for vblank on
  212. *
  213. * Wait for vblank on the requested crtc (evergreen+).
  214. */
  215. static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
  216. {
  217. unsigned i = 100;
  218. if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
  219. return;
  220. if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
  221. return;
  222. /* depending on when we hit vblank, we may be close to active; if so,
  223. * wait for another frame.
  224. */
  225. while (dce_v11_0_is_in_vblank(adev, crtc)) {
  226. if (i++ == 100) {
  227. i = 0;
  228. if (!dce_v11_0_is_counter_moving(adev, crtc))
  229. break;
  230. }
  231. }
  232. while (!dce_v11_0_is_in_vblank(adev, crtc)) {
  233. if (i++ == 100) {
  234. i = 0;
  235. if (!dce_v11_0_is_counter_moving(adev, crtc))
  236. break;
  237. }
  238. }
  239. }
  240. static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
  241. {
  242. if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
  243. return 0;
  244. else
  245. return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
  246. }
  247. static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
  248. {
  249. unsigned i;
  250. /* Enable pflip interrupts */
  251. for (i = 0; i < adev->mode_info.num_crtc; i++)
  252. amdgpu_irq_get(adev, &adev->pageflip_irq, i);
  253. }
  254. static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
  255. {
  256. unsigned i;
  257. /* Disable pflip interrupts */
  258. for (i = 0; i < adev->mode_info.num_crtc; i++)
  259. amdgpu_irq_put(adev, &adev->pageflip_irq, i);
  260. }
  261. /**
  262. * dce_v11_0_page_flip - pageflip callback.
  263. *
  264. * @adev: amdgpu_device pointer
  265. * @crtc_id: crtc to cleanup pageflip on
  266. * @crtc_base: new address of the crtc (GPU MC address)
  267. *
  268. * Triggers the actual pageflip by updating the primary
  269. * surface base address.
  270. */
  271. static void dce_v11_0_page_flip(struct amdgpu_device *adev,
  272. int crtc_id, u64 crtc_base, bool async)
  273. {
  274. struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
  275. u32 tmp;
  276. /* flip at hsync for async, default is vsync */
  277. /* use UPDATE_IMMEDIATE_EN instead for async? */
  278. tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
  279. tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
  280. GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
  281. WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  282. /* update the scanout addresses */
  283. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
  284. upper_32_bits(crtc_base));
  285. /* writing to the low address triggers the update */
  286. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
  287. lower_32_bits(crtc_base));
  288. /* post the write */
  289. RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
  290. }
  291. static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
  292. u32 *vbl, u32 *position)
  293. {
  294. if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
  295. return -EINVAL;
  296. *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
  297. *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
  298. return 0;
  299. }
  300. /**
  301. * dce_v11_0_hpd_sense - hpd sense callback.
  302. *
  303. * @adev: amdgpu_device pointer
  304. * @hpd: hpd (hotplug detect) pin
  305. *
  306. * Checks if a digital monitor is connected (evergreen+).
  307. * Returns true if connected, false if not connected.
  308. */
  309. static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
  310. enum amdgpu_hpd_id hpd)
  311. {
  312. int idx;
  313. bool connected = false;
  314. switch (hpd) {
  315. case AMDGPU_HPD_1:
  316. idx = 0;
  317. break;
  318. case AMDGPU_HPD_2:
  319. idx = 1;
  320. break;
  321. case AMDGPU_HPD_3:
  322. idx = 2;
  323. break;
  324. case AMDGPU_HPD_4:
  325. idx = 3;
  326. break;
  327. case AMDGPU_HPD_5:
  328. idx = 4;
  329. break;
  330. case AMDGPU_HPD_6:
  331. idx = 5;
  332. break;
  333. default:
  334. return connected;
  335. }
  336. if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) &
  337. DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
  338. connected = true;
  339. return connected;
  340. }
  341. /**
  342. * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
  343. *
  344. * @adev: amdgpu_device pointer
  345. * @hpd: hpd (hotplug detect) pin
  346. *
  347. * Set the polarity of the hpd pin (evergreen+).
  348. */
  349. static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
  350. enum amdgpu_hpd_id hpd)
  351. {
  352. u32 tmp;
  353. bool connected = dce_v11_0_hpd_sense(adev, hpd);
  354. int idx;
  355. switch (hpd) {
  356. case AMDGPU_HPD_1:
  357. idx = 0;
  358. break;
  359. case AMDGPU_HPD_2:
  360. idx = 1;
  361. break;
  362. case AMDGPU_HPD_3:
  363. idx = 2;
  364. break;
  365. case AMDGPU_HPD_4:
  366. idx = 3;
  367. break;
  368. case AMDGPU_HPD_5:
  369. idx = 4;
  370. break;
  371. case AMDGPU_HPD_6:
  372. idx = 5;
  373. break;
  374. default:
  375. return;
  376. }
  377. tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]);
  378. if (connected)
  379. tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
  380. else
  381. tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
  382. WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp);
  383. }
  384. /**
  385. * dce_v11_0_hpd_init - hpd setup callback.
  386. *
  387. * @adev: amdgpu_device pointer
  388. *
  389. * Setup the hpd pins used by the card (evergreen+).
  390. * Enable the pin, set the polarity, and enable the hpd interrupts.
  391. */
  392. static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
  393. {
  394. struct drm_device *dev = adev->ddev;
  395. struct drm_connector *connector;
  396. u32 tmp;
  397. int idx;
  398. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  399. struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
  400. if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
  401. connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
  402. /* don't try to enable hpd on eDP or LVDS avoid breaking the
  403. * aux dp channel on imac and help (but not completely fix)
  404. * https://bugzilla.redhat.com/show_bug.cgi?id=726143
  405. * also avoid interrupt storms during dpms.
  406. */
  407. continue;
  408. }
  409. switch (amdgpu_connector->hpd.hpd) {
  410. case AMDGPU_HPD_1:
  411. idx = 0;
  412. break;
  413. case AMDGPU_HPD_2:
  414. idx = 1;
  415. break;
  416. case AMDGPU_HPD_3:
  417. idx = 2;
  418. break;
  419. case AMDGPU_HPD_4:
  420. idx = 3;
  421. break;
  422. case AMDGPU_HPD_5:
  423. idx = 4;
  424. break;
  425. case AMDGPU_HPD_6:
  426. idx = 5;
  427. break;
  428. default:
  429. continue;
  430. }
  431. tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
  432. tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
  433. WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
  434. tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]);
  435. tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
  436. DC_HPD_CONNECT_INT_DELAY,
  437. AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
  438. tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
  439. DC_HPD_DISCONNECT_INT_DELAY,
  440. AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
  441. WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp);
  442. dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
  443. amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
  444. }
  445. }
  446. /**
  447. * dce_v11_0_hpd_fini - hpd tear down callback.
  448. *
  449. * @adev: amdgpu_device pointer
  450. *
  451. * Tear down the hpd pins used by the card (evergreen+).
  452. * Disable the hpd interrupts.
  453. */
  454. static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
  455. {
  456. struct drm_device *dev = adev->ddev;
  457. struct drm_connector *connector;
  458. u32 tmp;
  459. int idx;
  460. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  461. struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
  462. switch (amdgpu_connector->hpd.hpd) {
  463. case AMDGPU_HPD_1:
  464. idx = 0;
  465. break;
  466. case AMDGPU_HPD_2:
  467. idx = 1;
  468. break;
  469. case AMDGPU_HPD_3:
  470. idx = 2;
  471. break;
  472. case AMDGPU_HPD_4:
  473. idx = 3;
  474. break;
  475. case AMDGPU_HPD_5:
  476. idx = 4;
  477. break;
  478. case AMDGPU_HPD_6:
  479. idx = 5;
  480. break;
  481. default:
  482. continue;
  483. }
  484. tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]);
  485. tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
  486. WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp);
  487. amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
  488. }
  489. }
  490. static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
  491. {
  492. return mmDC_GPIO_HPD_A;
  493. }
  494. static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
  495. {
  496. u32 crtc_hung = 0;
  497. u32 crtc_status[6];
  498. u32 i, j, tmp;
  499. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  500. tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
  501. if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
  502. crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
  503. crtc_hung |= (1 << i);
  504. }
  505. }
  506. for (j = 0; j < 10; j++) {
  507. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  508. if (crtc_hung & (1 << i)) {
  509. tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
  510. if (tmp != crtc_status[i])
  511. crtc_hung &= ~(1 << i);
  512. }
  513. }
  514. if (crtc_hung == 0)
  515. return false;
  516. udelay(100);
  517. }
  518. return true;
  519. }
  520. static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
  521. struct amdgpu_mode_mc_save *save)
  522. {
  523. u32 crtc_enabled, tmp;
  524. int i;
  525. save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
  526. save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
  527. /* disable VGA render */
  528. tmp = RREG32(mmVGA_RENDER_CONTROL);
  529. tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
  530. WREG32(mmVGA_RENDER_CONTROL, tmp);
  531. /* blank the display controllers */
  532. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  533. crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
  534. CRTC_CONTROL, CRTC_MASTER_EN);
  535. if (crtc_enabled) {
  536. #if 1
  537. save->crtc_enabled[i] = true;
  538. tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
  539. if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
  540. /*it is correct only for RGB ; black is 0*/
  541. WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
  542. tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
  543. WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
  544. }
  545. #else
  546. /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
  547. WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
  548. tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
  549. tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
  550. WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
  551. WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
  552. save->crtc_enabled[i] = false;
  553. /* ***** */
  554. #endif
  555. } else {
  556. save->crtc_enabled[i] = false;
  557. }
  558. }
  559. }
  560. static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
  561. struct amdgpu_mode_mc_save *save)
  562. {
  563. u32 tmp;
  564. int i;
  565. /* update crtc base addresses */
  566. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  567. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
  568. upper_32_bits(adev->mc.vram_start));
  569. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
  570. (u32)adev->mc.vram_start);
  571. if (save->crtc_enabled[i]) {
  572. tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
  573. tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
  574. WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
  575. }
  576. }
  577. WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
  578. WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
  579. /* Unlock vga access */
  580. WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
  581. mdelay(1);
  582. WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
  583. }
  584. static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
  585. bool render)
  586. {
  587. u32 tmp;
  588. /* Lockout access through VGA aperture*/
  589. tmp = RREG32(mmVGA_HDP_CONTROL);
  590. if (render)
  591. tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
  592. else
  593. tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
  594. WREG32(mmVGA_HDP_CONTROL, tmp);
  595. /* disable VGA render */
  596. tmp = RREG32(mmVGA_RENDER_CONTROL);
  597. if (render)
  598. tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
  599. else
  600. tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
  601. WREG32(mmVGA_RENDER_CONTROL, tmp);
  602. }
  603. static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
  604. {
  605. struct drm_device *dev = encoder->dev;
  606. struct amdgpu_device *adev = dev->dev_private;
  607. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  608. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
  609. struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
  610. int bpc = 0;
  611. u32 tmp = 0;
  612. enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
  613. if (connector) {
  614. struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
  615. bpc = amdgpu_connector_get_monitor_bpc(connector);
  616. dither = amdgpu_connector->dither;
  617. }
  618. /* LVDS/eDP FMT is set up by atom */
  619. if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
  620. return;
  621. /* not needed for analog */
  622. if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
  623. (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
  624. return;
  625. if (bpc == 0)
  626. return;
  627. switch (bpc) {
  628. case 6:
  629. if (dither == AMDGPU_FMT_DITHER_ENABLE) {
  630. /* XXX sort out optimal dither settings */
  631. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
  632. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
  633. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
  634. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
  635. } else {
  636. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
  637. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
  638. }
  639. break;
  640. case 8:
  641. if (dither == AMDGPU_FMT_DITHER_ENABLE) {
  642. /* XXX sort out optimal dither settings */
  643. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
  644. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
  645. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
  646. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
  647. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
  648. } else {
  649. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
  650. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
  651. }
  652. break;
  653. case 10:
  654. if (dither == AMDGPU_FMT_DITHER_ENABLE) {
  655. /* XXX sort out optimal dither settings */
  656. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
  657. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
  658. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
  659. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
  660. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
  661. } else {
  662. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
  663. tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
  664. }
  665. break;
  666. default:
  667. /* not needed */
  668. break;
  669. }
  670. WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  671. }
  672. /* display watermark setup */
  673. /**
  674. * dce_v11_0_line_buffer_adjust - Set up the line buffer
  675. *
  676. * @adev: amdgpu_device pointer
  677. * @amdgpu_crtc: the selected display controller
  678. * @mode: the current display mode on the selected display
  679. * controller
  680. *
  681. * Setup up the line buffer allocation for
  682. * the selected display controller (CIK).
  683. * Returns the line buffer size in pixels.
  684. */
  685. static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
  686. struct amdgpu_crtc *amdgpu_crtc,
  687. struct drm_display_mode *mode)
  688. {
  689. u32 tmp, buffer_alloc, i, mem_cfg;
  690. u32 pipe_offset = amdgpu_crtc->crtc_id;
  691. /*
  692. * Line Buffer Setup
  693. * There are 6 line buffers, one for each display controllers.
  694. * There are 3 partitions per LB. Select the number of partitions
  695. * to enable based on the display width. For display widths larger
  696. * than 4096, you need use to use 2 display controllers and combine
  697. * them using the stereo blender.
  698. */
  699. if (amdgpu_crtc->base.enabled && mode) {
  700. if (mode->crtc_hdisplay < 1920) {
  701. mem_cfg = 1;
  702. buffer_alloc = 2;
  703. } else if (mode->crtc_hdisplay < 2560) {
  704. mem_cfg = 2;
  705. buffer_alloc = 2;
  706. } else if (mode->crtc_hdisplay < 4096) {
  707. mem_cfg = 0;
  708. buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
  709. } else {
  710. DRM_DEBUG_KMS("Mode too big for LB!\n");
  711. mem_cfg = 0;
  712. buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
  713. }
  714. } else {
  715. mem_cfg = 1;
  716. buffer_alloc = 0;
  717. }
  718. tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
  719. tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
  720. WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
  721. tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
  722. tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
  723. WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
  724. for (i = 0; i < adev->usec_timeout; i++) {
  725. tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
  726. if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
  727. break;
  728. udelay(1);
  729. }
  730. if (amdgpu_crtc->base.enabled && mode) {
  731. switch (mem_cfg) {
  732. case 0:
  733. default:
  734. return 4096 * 2;
  735. case 1:
  736. return 1920 * 2;
  737. case 2:
  738. return 2560 * 2;
  739. }
  740. }
  741. /* controller not enabled, so no lb used */
  742. return 0;
  743. }
  744. /**
  745. * cik_get_number_of_dram_channels - get the number of dram channels
  746. *
  747. * @adev: amdgpu_device pointer
  748. *
  749. * Look up the number of video ram channels (CIK).
  750. * Used for display watermark bandwidth calculations
  751. * Returns the number of dram channels
  752. */
  753. static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
  754. {
  755. u32 tmp = RREG32(mmMC_SHARED_CHMAP);
  756. switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
  757. case 0:
  758. default:
  759. return 1;
  760. case 1:
  761. return 2;
  762. case 2:
  763. return 4;
  764. case 3:
  765. return 8;
  766. case 4:
  767. return 3;
  768. case 5:
  769. return 6;
  770. case 6:
  771. return 10;
  772. case 7:
  773. return 12;
  774. case 8:
  775. return 16;
  776. }
  777. }
  778. struct dce10_wm_params {
  779. u32 dram_channels; /* number of dram channels */
  780. u32 yclk; /* bandwidth per dram data pin in kHz */
  781. u32 sclk; /* engine clock in kHz */
  782. u32 disp_clk; /* display clock in kHz */
  783. u32 src_width; /* viewport width */
  784. u32 active_time; /* active display time in ns */
  785. u32 blank_time; /* blank time in ns */
  786. bool interlaced; /* mode is interlaced */
  787. fixed20_12 vsc; /* vertical scale ratio */
  788. u32 num_heads; /* number of active crtcs */
  789. u32 bytes_per_pixel; /* bytes per pixel display + overlay */
  790. u32 lb_size; /* line buffer allocated to pipe */
  791. u32 vtaps; /* vertical scaler taps */
  792. };
  793. /**
  794. * dce_v11_0_dram_bandwidth - get the dram bandwidth
  795. *
  796. * @wm: watermark calculation data
  797. *
  798. * Calculate the raw dram bandwidth (CIK).
  799. * Used for display watermark bandwidth calculations
  800. * Returns the dram bandwidth in MBytes/s
  801. */
  802. static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
  803. {
  804. /* Calculate raw DRAM Bandwidth */
  805. fixed20_12 dram_efficiency; /* 0.7 */
  806. fixed20_12 yclk, dram_channels, bandwidth;
  807. fixed20_12 a;
  808. a.full = dfixed_const(1000);
  809. yclk.full = dfixed_const(wm->yclk);
  810. yclk.full = dfixed_div(yclk, a);
  811. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  812. a.full = dfixed_const(10);
  813. dram_efficiency.full = dfixed_const(7);
  814. dram_efficiency.full = dfixed_div(dram_efficiency, a);
  815. bandwidth.full = dfixed_mul(dram_channels, yclk);
  816. bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
  817. return dfixed_trunc(bandwidth);
  818. }
  819. /**
  820. * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
  821. *
  822. * @wm: watermark calculation data
  823. *
  824. * Calculate the dram bandwidth used for display (CIK).
  825. * Used for display watermark bandwidth calculations
  826. * Returns the dram bandwidth for display in MBytes/s
  827. */
  828. static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
  829. {
  830. /* Calculate DRAM Bandwidth and the part allocated to display. */
  831. fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
  832. fixed20_12 yclk, dram_channels, bandwidth;
  833. fixed20_12 a;
  834. a.full = dfixed_const(1000);
  835. yclk.full = dfixed_const(wm->yclk);
  836. yclk.full = dfixed_div(yclk, a);
  837. dram_channels.full = dfixed_const(wm->dram_channels * 4);
  838. a.full = dfixed_const(10);
  839. disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
  840. disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
  841. bandwidth.full = dfixed_mul(dram_channels, yclk);
  842. bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
  843. return dfixed_trunc(bandwidth);
  844. }
  845. /**
  846. * dce_v11_0_data_return_bandwidth - get the data return bandwidth
  847. *
  848. * @wm: watermark calculation data
  849. *
  850. * Calculate the data return bandwidth used for display (CIK).
  851. * Used for display watermark bandwidth calculations
  852. * Returns the data return bandwidth in MBytes/s
  853. */
  854. static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
  855. {
  856. /* Calculate the display Data return Bandwidth */
  857. fixed20_12 return_efficiency; /* 0.8 */
  858. fixed20_12 sclk, bandwidth;
  859. fixed20_12 a;
  860. a.full = dfixed_const(1000);
  861. sclk.full = dfixed_const(wm->sclk);
  862. sclk.full = dfixed_div(sclk, a);
  863. a.full = dfixed_const(10);
  864. return_efficiency.full = dfixed_const(8);
  865. return_efficiency.full = dfixed_div(return_efficiency, a);
  866. a.full = dfixed_const(32);
  867. bandwidth.full = dfixed_mul(a, sclk);
  868. bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
  869. return dfixed_trunc(bandwidth);
  870. }
  871. /**
  872. * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
  873. *
  874. * @wm: watermark calculation data
  875. *
  876. * Calculate the dmif bandwidth used for display (CIK).
  877. * Used for display watermark bandwidth calculations
  878. * Returns the dmif bandwidth in MBytes/s
  879. */
  880. static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
  881. {
  882. /* Calculate the DMIF Request Bandwidth */
  883. fixed20_12 disp_clk_request_efficiency; /* 0.8 */
  884. fixed20_12 disp_clk, bandwidth;
  885. fixed20_12 a, b;
  886. a.full = dfixed_const(1000);
  887. disp_clk.full = dfixed_const(wm->disp_clk);
  888. disp_clk.full = dfixed_div(disp_clk, a);
  889. a.full = dfixed_const(32);
  890. b.full = dfixed_mul(a, disp_clk);
  891. a.full = dfixed_const(10);
  892. disp_clk_request_efficiency.full = dfixed_const(8);
  893. disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
  894. bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
  895. return dfixed_trunc(bandwidth);
  896. }
  897. /**
  898. * dce_v11_0_available_bandwidth - get the min available bandwidth
  899. *
  900. * @wm: watermark calculation data
  901. *
  902. * Calculate the min available bandwidth used for display (CIK).
  903. * Used for display watermark bandwidth calculations
  904. * Returns the min available bandwidth in MBytes/s
  905. */
  906. static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
  907. {
  908. /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
  909. u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
  910. u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
  911. u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
  912. return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
  913. }
  914. /**
  915. * dce_v11_0_average_bandwidth - get the average available bandwidth
  916. *
  917. * @wm: watermark calculation data
  918. *
  919. * Calculate the average available bandwidth used for display (CIK).
  920. * Used for display watermark bandwidth calculations
  921. * Returns the average available bandwidth in MBytes/s
  922. */
  923. static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
  924. {
  925. /* Calculate the display mode Average Bandwidth
  926. * DisplayMode should contain the source and destination dimensions,
  927. * timing, etc.
  928. */
  929. fixed20_12 bpp;
  930. fixed20_12 line_time;
  931. fixed20_12 src_width;
  932. fixed20_12 bandwidth;
  933. fixed20_12 a;
  934. a.full = dfixed_const(1000);
  935. line_time.full = dfixed_const(wm->active_time + wm->blank_time);
  936. line_time.full = dfixed_div(line_time, a);
  937. bpp.full = dfixed_const(wm->bytes_per_pixel);
  938. src_width.full = dfixed_const(wm->src_width);
  939. bandwidth.full = dfixed_mul(src_width, bpp);
  940. bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
  941. bandwidth.full = dfixed_div(bandwidth, line_time);
  942. return dfixed_trunc(bandwidth);
  943. }
  944. /**
  945. * dce_v11_0_latency_watermark - get the latency watermark
  946. *
  947. * @wm: watermark calculation data
  948. *
  949. * Calculate the latency watermark (CIK).
  950. * Used for display watermark bandwidth calculations
  951. * Returns the latency watermark in ns
  952. */
  953. static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
  954. {
  955. /* First calculate the latency in ns */
  956. u32 mc_latency = 2000; /* 2000 ns. */
  957. u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
  958. u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
  959. u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
  960. u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
  961. u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
  962. (wm->num_heads * cursor_line_pair_return_time);
  963. u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
  964. u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
  965. u32 tmp, dmif_size = 12288;
  966. fixed20_12 a, b, c;
  967. if (wm->num_heads == 0)
  968. return 0;
  969. a.full = dfixed_const(2);
  970. b.full = dfixed_const(1);
  971. if ((wm->vsc.full > a.full) ||
  972. ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
  973. (wm->vtaps >= 5) ||
  974. ((wm->vsc.full >= a.full) && wm->interlaced))
  975. max_src_lines_per_dst_line = 4;
  976. else
  977. max_src_lines_per_dst_line = 2;
  978. a.full = dfixed_const(available_bandwidth);
  979. b.full = dfixed_const(wm->num_heads);
  980. a.full = dfixed_div(a, b);
  981. b.full = dfixed_const(mc_latency + 512);
  982. c.full = dfixed_const(wm->disp_clk);
  983. b.full = dfixed_div(b, c);
  984. c.full = dfixed_const(dmif_size);
  985. b.full = dfixed_div(c, b);
  986. tmp = min(dfixed_trunc(a), dfixed_trunc(b));
  987. b.full = dfixed_const(1000);
  988. c.full = dfixed_const(wm->disp_clk);
  989. b.full = dfixed_div(c, b);
  990. c.full = dfixed_const(wm->bytes_per_pixel);
  991. b.full = dfixed_mul(b, c);
  992. lb_fill_bw = min(tmp, dfixed_trunc(b));
  993. a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
  994. b.full = dfixed_const(1000);
  995. c.full = dfixed_const(lb_fill_bw);
  996. b.full = dfixed_div(c, b);
  997. a.full = dfixed_div(a, b);
  998. line_fill_time = dfixed_trunc(a);
  999. if (line_fill_time < wm->active_time)
  1000. return latency;
  1001. else
  1002. return latency + (line_fill_time - wm->active_time);
  1003. }
  1004. /**
  1005. * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
  1006. * average and available dram bandwidth
  1007. *
  1008. * @wm: watermark calculation data
  1009. *
  1010. * Check if the display average bandwidth fits in the display
  1011. * dram bandwidth (CIK).
  1012. * Used for display watermark bandwidth calculations
  1013. * Returns true if the display fits, false if not.
  1014. */
  1015. static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
  1016. {
  1017. if (dce_v11_0_average_bandwidth(wm) <=
  1018. (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
  1019. return true;
  1020. else
  1021. return false;
  1022. }
  1023. /**
  1024. * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
  1025. * average and available bandwidth
  1026. *
  1027. * @wm: watermark calculation data
  1028. *
  1029. * Check if the display average bandwidth fits in the display
  1030. * available bandwidth (CIK).
  1031. * Used for display watermark bandwidth calculations
  1032. * Returns true if the display fits, false if not.
  1033. */
  1034. static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
  1035. {
  1036. if (dce_v11_0_average_bandwidth(wm) <=
  1037. (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
  1038. return true;
  1039. else
  1040. return false;
  1041. }
  1042. /**
  1043. * dce_v11_0_check_latency_hiding - check latency hiding
  1044. *
  1045. * @wm: watermark calculation data
  1046. *
  1047. * Check latency hiding (CIK).
  1048. * Used for display watermark bandwidth calculations
  1049. * Returns true if the display fits, false if not.
  1050. */
  1051. static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
  1052. {
  1053. u32 lb_partitions = wm->lb_size / wm->src_width;
  1054. u32 line_time = wm->active_time + wm->blank_time;
  1055. u32 latency_tolerant_lines;
  1056. u32 latency_hiding;
  1057. fixed20_12 a;
  1058. a.full = dfixed_const(1);
  1059. if (wm->vsc.full > a.full)
  1060. latency_tolerant_lines = 1;
  1061. else {
  1062. if (lb_partitions <= (wm->vtaps + 1))
  1063. latency_tolerant_lines = 1;
  1064. else
  1065. latency_tolerant_lines = 2;
  1066. }
  1067. latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
  1068. if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
  1069. return true;
  1070. else
  1071. return false;
  1072. }
  1073. /**
  1074. * dce_v11_0_program_watermarks - program display watermarks
  1075. *
  1076. * @adev: amdgpu_device pointer
  1077. * @amdgpu_crtc: the selected display controller
  1078. * @lb_size: line buffer size
  1079. * @num_heads: number of display controllers in use
  1080. *
  1081. * Calculate and program the display watermarks for the
  1082. * selected display controller (CIK).
  1083. */
  1084. static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
  1085. struct amdgpu_crtc *amdgpu_crtc,
  1086. u32 lb_size, u32 num_heads)
  1087. {
  1088. struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
  1089. struct dce10_wm_params wm_low, wm_high;
  1090. u32 pixel_period;
  1091. u32 line_time = 0;
  1092. u32 latency_watermark_a = 0, latency_watermark_b = 0;
  1093. u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
  1094. if (amdgpu_crtc->base.enabled && num_heads && mode) {
  1095. pixel_period = 1000000 / (u32)mode->clock;
  1096. line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
  1097. /* watermark for high clocks */
  1098. if (adev->pm.dpm_enabled) {
  1099. wm_high.yclk =
  1100. amdgpu_dpm_get_mclk(adev, false) * 10;
  1101. wm_high.sclk =
  1102. amdgpu_dpm_get_sclk(adev, false) * 10;
  1103. } else {
  1104. wm_high.yclk = adev->pm.current_mclk * 10;
  1105. wm_high.sclk = adev->pm.current_sclk * 10;
  1106. }
  1107. wm_high.disp_clk = mode->clock;
  1108. wm_high.src_width = mode->crtc_hdisplay;
  1109. wm_high.active_time = mode->crtc_hdisplay * pixel_period;
  1110. wm_high.blank_time = line_time - wm_high.active_time;
  1111. wm_high.interlaced = false;
  1112. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1113. wm_high.interlaced = true;
  1114. wm_high.vsc = amdgpu_crtc->vsc;
  1115. wm_high.vtaps = 1;
  1116. if (amdgpu_crtc->rmx_type != RMX_OFF)
  1117. wm_high.vtaps = 2;
  1118. wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
  1119. wm_high.lb_size = lb_size;
  1120. wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
  1121. wm_high.num_heads = num_heads;
  1122. /* set for high clocks */
  1123. latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
  1124. /* possibly force display priority to high */
  1125. /* should really do this at mode validation time... */
  1126. if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
  1127. !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
  1128. !dce_v11_0_check_latency_hiding(&wm_high) ||
  1129. (adev->mode_info.disp_priority == 2)) {
  1130. DRM_DEBUG_KMS("force priority to high\n");
  1131. }
  1132. /* watermark for low clocks */
  1133. if (adev->pm.dpm_enabled) {
  1134. wm_low.yclk =
  1135. amdgpu_dpm_get_mclk(adev, true) * 10;
  1136. wm_low.sclk =
  1137. amdgpu_dpm_get_sclk(adev, true) * 10;
  1138. } else {
  1139. wm_low.yclk = adev->pm.current_mclk * 10;
  1140. wm_low.sclk = adev->pm.current_sclk * 10;
  1141. }
  1142. wm_low.disp_clk = mode->clock;
  1143. wm_low.src_width = mode->crtc_hdisplay;
  1144. wm_low.active_time = mode->crtc_hdisplay * pixel_period;
  1145. wm_low.blank_time = line_time - wm_low.active_time;
  1146. wm_low.interlaced = false;
  1147. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1148. wm_low.interlaced = true;
  1149. wm_low.vsc = amdgpu_crtc->vsc;
  1150. wm_low.vtaps = 1;
  1151. if (amdgpu_crtc->rmx_type != RMX_OFF)
  1152. wm_low.vtaps = 2;
  1153. wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
  1154. wm_low.lb_size = lb_size;
  1155. wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
  1156. wm_low.num_heads = num_heads;
  1157. /* set for low clocks */
  1158. latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
  1159. /* possibly force display priority to high */
  1160. /* should really do this at mode validation time... */
  1161. if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
  1162. !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
  1163. !dce_v11_0_check_latency_hiding(&wm_low) ||
  1164. (adev->mode_info.disp_priority == 2)) {
  1165. DRM_DEBUG_KMS("force priority to high\n");
  1166. }
  1167. lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
  1168. }
  1169. /* select wm A */
  1170. wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
  1171. tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
  1172. WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  1173. tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
  1174. tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
  1175. tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
  1176. WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  1177. /* select wm B */
  1178. tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
  1179. WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  1180. tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
  1181. tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
  1182. tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
  1183. WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  1184. /* restore original selection */
  1185. WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
  1186. /* save values for DPM */
  1187. amdgpu_crtc->line_time = line_time;
  1188. amdgpu_crtc->wm_high = latency_watermark_a;
  1189. amdgpu_crtc->wm_low = latency_watermark_b;
  1190. /* Save number of lines the linebuffer leads before the scanout */
  1191. amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
  1192. }
  1193. /**
  1194. * dce_v11_0_bandwidth_update - program display watermarks
  1195. *
  1196. * @adev: amdgpu_device pointer
  1197. *
  1198. * Calculate and program the display watermarks and line
  1199. * buffer allocation (CIK).
  1200. */
  1201. static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
  1202. {
  1203. struct drm_display_mode *mode = NULL;
  1204. u32 num_heads = 0, lb_size;
  1205. int i;
  1206. amdgpu_update_display_priority(adev);
  1207. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  1208. if (adev->mode_info.crtcs[i]->base.enabled)
  1209. num_heads++;
  1210. }
  1211. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  1212. mode = &adev->mode_info.crtcs[i]->base.mode;
  1213. lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
  1214. dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
  1215. lb_size, num_heads);
  1216. }
  1217. }
  1218. static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
  1219. {
  1220. int i;
  1221. u32 offset, tmp;
  1222. for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  1223. offset = adev->mode_info.audio.pin[i].offset;
  1224. tmp = RREG32_AUDIO_ENDPT(offset,
  1225. ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
  1226. if (((tmp &
  1227. AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
  1228. AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
  1229. adev->mode_info.audio.pin[i].connected = false;
  1230. else
  1231. adev->mode_info.audio.pin[i].connected = true;
  1232. }
  1233. }
  1234. static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
  1235. {
  1236. int i;
  1237. dce_v11_0_audio_get_connected_pins(adev);
  1238. for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  1239. if (adev->mode_info.audio.pin[i].connected)
  1240. return &adev->mode_info.audio.pin[i];
  1241. }
  1242. DRM_ERROR("No connected audio pins found!\n");
  1243. return NULL;
  1244. }
  1245. static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
  1246. {
  1247. struct amdgpu_device *adev = encoder->dev->dev_private;
  1248. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1249. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1250. u32 tmp;
  1251. if (!dig || !dig->afmt || !dig->afmt->pin)
  1252. return;
  1253. tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
  1254. tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
  1255. WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
  1256. }
  1257. static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
  1258. struct drm_display_mode *mode)
  1259. {
  1260. struct amdgpu_device *adev = encoder->dev->dev_private;
  1261. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1262. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1263. struct drm_connector *connector;
  1264. struct amdgpu_connector *amdgpu_connector = NULL;
  1265. u32 tmp;
  1266. int interlace = 0;
  1267. if (!dig || !dig->afmt || !dig->afmt->pin)
  1268. return;
  1269. list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
  1270. if (connector->encoder == encoder) {
  1271. amdgpu_connector = to_amdgpu_connector(connector);
  1272. break;
  1273. }
  1274. }
  1275. if (!amdgpu_connector) {
  1276. DRM_ERROR("Couldn't find encoder's connector\n");
  1277. return;
  1278. }
  1279. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  1280. interlace = 1;
  1281. if (connector->latency_present[interlace]) {
  1282. tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
  1283. VIDEO_LIPSYNC, connector->video_latency[interlace]);
  1284. tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
  1285. AUDIO_LIPSYNC, connector->audio_latency[interlace]);
  1286. } else {
  1287. tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
  1288. VIDEO_LIPSYNC, 0);
  1289. tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
  1290. AUDIO_LIPSYNC, 0);
  1291. }
  1292. WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
  1293. ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
  1294. }
  1295. static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
  1296. {
  1297. struct amdgpu_device *adev = encoder->dev->dev_private;
  1298. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1299. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1300. struct drm_connector *connector;
  1301. struct amdgpu_connector *amdgpu_connector = NULL;
  1302. u32 tmp;
  1303. u8 *sadb = NULL;
  1304. int sad_count;
  1305. if (!dig || !dig->afmt || !dig->afmt->pin)
  1306. return;
  1307. list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
  1308. if (connector->encoder == encoder) {
  1309. amdgpu_connector = to_amdgpu_connector(connector);
  1310. break;
  1311. }
  1312. }
  1313. if (!amdgpu_connector) {
  1314. DRM_ERROR("Couldn't find encoder's connector\n");
  1315. return;
  1316. }
  1317. sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
  1318. if (sad_count < 0) {
  1319. DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
  1320. sad_count = 0;
  1321. }
  1322. /* program the speaker allocation */
  1323. tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
  1324. ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
  1325. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
  1326. DP_CONNECTION, 0);
  1327. /* set HDMI mode */
  1328. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
  1329. HDMI_CONNECTION, 1);
  1330. if (sad_count)
  1331. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
  1332. SPEAKER_ALLOCATION, sadb[0]);
  1333. else
  1334. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
  1335. SPEAKER_ALLOCATION, 5); /* stereo */
  1336. WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
  1337. ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
  1338. kfree(sadb);
  1339. }
  1340. static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
  1341. {
  1342. struct amdgpu_device *adev = encoder->dev->dev_private;
  1343. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1344. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1345. struct drm_connector *connector;
  1346. struct amdgpu_connector *amdgpu_connector = NULL;
  1347. struct cea_sad *sads;
  1348. int i, sad_count;
  1349. static const u16 eld_reg_to_type[][2] = {
  1350. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
  1351. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
  1352. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
  1353. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
  1354. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
  1355. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
  1356. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
  1357. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
  1358. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
  1359. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
  1360. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
  1361. { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
  1362. };
  1363. if (!dig || !dig->afmt || !dig->afmt->pin)
  1364. return;
  1365. list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
  1366. if (connector->encoder == encoder) {
  1367. amdgpu_connector = to_amdgpu_connector(connector);
  1368. break;
  1369. }
  1370. }
  1371. if (!amdgpu_connector) {
  1372. DRM_ERROR("Couldn't find encoder's connector\n");
  1373. return;
  1374. }
  1375. sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
  1376. if (sad_count <= 0) {
  1377. DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
  1378. return;
  1379. }
  1380. BUG_ON(!sads);
  1381. for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
  1382. u32 tmp = 0;
  1383. u8 stereo_freqs = 0;
  1384. int max_channels = -1;
  1385. int j;
  1386. for (j = 0; j < sad_count; j++) {
  1387. struct cea_sad *sad = &sads[j];
  1388. if (sad->format == eld_reg_to_type[i][1]) {
  1389. if (sad->channels > max_channels) {
  1390. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
  1391. MAX_CHANNELS, sad->channels);
  1392. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
  1393. DESCRIPTOR_BYTE_2, sad->byte2);
  1394. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
  1395. SUPPORTED_FREQUENCIES, sad->freq);
  1396. max_channels = sad->channels;
  1397. }
  1398. if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
  1399. stereo_freqs |= sad->freq;
  1400. else
  1401. break;
  1402. }
  1403. }
  1404. tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
  1405. SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
  1406. WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
  1407. }
  1408. kfree(sads);
  1409. }
  1410. static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
  1411. struct amdgpu_audio_pin *pin,
  1412. bool enable)
  1413. {
  1414. if (!pin)
  1415. return;
  1416. WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
  1417. enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
  1418. }
  1419. static const u32 pin_offsets[] =
  1420. {
  1421. AUD0_REGISTER_OFFSET,
  1422. AUD1_REGISTER_OFFSET,
  1423. AUD2_REGISTER_OFFSET,
  1424. AUD3_REGISTER_OFFSET,
  1425. AUD4_REGISTER_OFFSET,
  1426. AUD5_REGISTER_OFFSET,
  1427. AUD6_REGISTER_OFFSET,
  1428. AUD7_REGISTER_OFFSET,
  1429. };
  1430. static int dce_v11_0_audio_init(struct amdgpu_device *adev)
  1431. {
  1432. int i;
  1433. if (!amdgpu_audio)
  1434. return 0;
  1435. adev->mode_info.audio.enabled = true;
  1436. switch (adev->asic_type) {
  1437. case CHIP_CARRIZO:
  1438. case CHIP_STONEY:
  1439. adev->mode_info.audio.num_pins = 7;
  1440. break;
  1441. case CHIP_POLARIS10:
  1442. adev->mode_info.audio.num_pins = 8;
  1443. break;
  1444. case CHIP_POLARIS11:
  1445. adev->mode_info.audio.num_pins = 6;
  1446. break;
  1447. default:
  1448. return -EINVAL;
  1449. }
  1450. for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  1451. adev->mode_info.audio.pin[i].channels = -1;
  1452. adev->mode_info.audio.pin[i].rate = -1;
  1453. adev->mode_info.audio.pin[i].bits_per_sample = -1;
  1454. adev->mode_info.audio.pin[i].status_bits = 0;
  1455. adev->mode_info.audio.pin[i].category_code = 0;
  1456. adev->mode_info.audio.pin[i].connected = false;
  1457. adev->mode_info.audio.pin[i].offset = pin_offsets[i];
  1458. adev->mode_info.audio.pin[i].id = i;
  1459. /* disable audio. it will be set up later */
  1460. /* XXX remove once we switch to ip funcs */
  1461. dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
  1462. }
  1463. return 0;
  1464. }
  1465. static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
  1466. {
  1467. int i;
  1468. if (!amdgpu_audio)
  1469. return;
  1470. if (!adev->mode_info.audio.enabled)
  1471. return;
  1472. for (i = 0; i < adev->mode_info.audio.num_pins; i++)
  1473. dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
  1474. adev->mode_info.audio.enabled = false;
  1475. }
  1476. /*
  1477. * update the N and CTS parameters for a given pixel clock rate
  1478. */
  1479. static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
  1480. {
  1481. struct drm_device *dev = encoder->dev;
  1482. struct amdgpu_device *adev = dev->dev_private;
  1483. struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
  1484. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1485. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1486. u32 tmp;
  1487. tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
  1488. tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
  1489. WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
  1490. tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
  1491. tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
  1492. WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
  1493. tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
  1494. tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
  1495. WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
  1496. tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
  1497. tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
  1498. WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
  1499. tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
  1500. tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
  1501. WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
  1502. tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
  1503. tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
  1504. WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
  1505. }
  1506. /*
  1507. * build a HDMI Video Info Frame
  1508. */
  1509. static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
  1510. void *buffer, size_t size)
  1511. {
  1512. struct drm_device *dev = encoder->dev;
  1513. struct amdgpu_device *adev = dev->dev_private;
  1514. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1515. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1516. uint8_t *frame = buffer + 3;
  1517. uint8_t *header = buffer;
  1518. WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
  1519. frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
  1520. WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
  1521. frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
  1522. WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
  1523. frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
  1524. WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
  1525. frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
  1526. }
  1527. static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
  1528. {
  1529. struct drm_device *dev = encoder->dev;
  1530. struct amdgpu_device *adev = dev->dev_private;
  1531. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1532. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1533. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
  1534. u32 dto_phase = 24 * 1000;
  1535. u32 dto_modulo = clock;
  1536. u32 tmp;
  1537. if (!dig || !dig->afmt)
  1538. return;
  1539. /* XXX two dtos; generally use dto0 for hdmi */
  1540. /* Express [24MHz / target pixel clock] as an exact rational
  1541. * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
  1542. * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
  1543. */
  1544. tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
  1545. tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
  1546. amdgpu_crtc->crtc_id);
  1547. WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
  1548. WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
  1549. WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
  1550. }
  1551. /*
  1552. * update the info frames with the data from the current display mode
  1553. */
  1554. static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
  1555. struct drm_display_mode *mode)
  1556. {
  1557. struct drm_device *dev = encoder->dev;
  1558. struct amdgpu_device *adev = dev->dev_private;
  1559. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1560. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1561. struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
  1562. u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
  1563. struct hdmi_avi_infoframe frame;
  1564. ssize_t err;
  1565. u32 tmp;
  1566. int bpc = 8;
  1567. if (!dig || !dig->afmt)
  1568. return;
  1569. /* Silent, r600_hdmi_enable will raise WARN for us */
  1570. if (!dig->afmt->enabled)
  1571. return;
  1572. /* hdmi deep color mode general control packets setup, if bpc > 8 */
  1573. if (encoder->crtc) {
  1574. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
  1575. bpc = amdgpu_crtc->bpc;
  1576. }
  1577. /* disable audio prior to setting up hw */
  1578. dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
  1579. dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
  1580. dce_v11_0_audio_set_dto(encoder, mode->clock);
  1581. tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
  1582. tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
  1583. WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
  1584. WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
  1585. tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
  1586. switch (bpc) {
  1587. case 0:
  1588. case 6:
  1589. case 8:
  1590. case 16:
  1591. default:
  1592. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
  1593. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
  1594. DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
  1595. connector->name, bpc);
  1596. break;
  1597. case 10:
  1598. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
  1599. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
  1600. DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
  1601. connector->name);
  1602. break;
  1603. case 12:
  1604. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
  1605. tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
  1606. DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
  1607. connector->name);
  1608. break;
  1609. }
  1610. WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
  1611. tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
  1612. tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
  1613. tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
  1614. tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
  1615. WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
  1616. tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
  1617. /* enable audio info frames (frames won't be set until audio is enabled) */
  1618. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
  1619. /* required for audio info values to be updated */
  1620. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
  1621. WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
  1622. tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
  1623. /* required for audio info values to be updated */
  1624. tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
  1625. WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
  1626. tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
  1627. /* anything other than 0 */
  1628. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
  1629. WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
  1630. WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
  1631. tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  1632. /* set the default audio delay */
  1633. tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
  1634. /* should be suffient for all audio modes and small enough for all hblanks */
  1635. tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
  1636. WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
  1637. tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  1638. /* allow 60958 channel status fields to be updated */
  1639. tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
  1640. WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
  1641. tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
  1642. if (bpc > 8)
  1643. /* clear SW CTS value */
  1644. tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
  1645. else
  1646. /* select SW CTS value */
  1647. tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
  1648. /* allow hw to sent ACR packets when required */
  1649. tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
  1650. WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
  1651. dce_v11_0_afmt_update_ACR(encoder, mode->clock);
  1652. tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
  1653. tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
  1654. WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
  1655. tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
  1656. tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
  1657. WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
  1658. tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
  1659. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
  1660. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
  1661. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
  1662. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
  1663. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
  1664. tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
  1665. WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
  1666. dce_v11_0_audio_write_speaker_allocation(encoder);
  1667. WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
  1668. (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
  1669. dce_v11_0_afmt_audio_select_pin(encoder);
  1670. dce_v11_0_audio_write_sad_regs(encoder);
  1671. dce_v11_0_audio_write_latency_fields(encoder, mode);
  1672. err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
  1673. if (err < 0) {
  1674. DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
  1675. return;
  1676. }
  1677. err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
  1678. if (err < 0) {
  1679. DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
  1680. return;
  1681. }
  1682. dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
  1683. tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
  1684. /* enable AVI info frames */
  1685. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
  1686. /* required for audio info values to be updated */
  1687. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
  1688. WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
  1689. tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
  1690. tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
  1691. WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
  1692. tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
  1693. /* send audio packets */
  1694. tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
  1695. WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
  1696. WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
  1697. WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
  1698. WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
  1699. WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
  1700. /* enable audio after to setting up hw */
  1701. dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
  1702. }
  1703. static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
  1704. {
  1705. struct drm_device *dev = encoder->dev;
  1706. struct amdgpu_device *adev = dev->dev_private;
  1707. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  1708. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  1709. if (!dig || !dig->afmt)
  1710. return;
  1711. /* Silent, r600_hdmi_enable will raise WARN for us */
  1712. if (enable && dig->afmt->enabled)
  1713. return;
  1714. if (!enable && !dig->afmt->enabled)
  1715. return;
  1716. if (!enable && dig->afmt->pin) {
  1717. dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
  1718. dig->afmt->pin = NULL;
  1719. }
  1720. dig->afmt->enabled = enable;
  1721. DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
  1722. enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
  1723. }
  1724. static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
  1725. {
  1726. int i;
  1727. for (i = 0; i < adev->mode_info.num_dig; i++)
  1728. adev->mode_info.afmt[i] = NULL;
  1729. /* DCE11 has audio blocks tied to DIG encoders */
  1730. for (i = 0; i < adev->mode_info.num_dig; i++) {
  1731. adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
  1732. if (adev->mode_info.afmt[i]) {
  1733. adev->mode_info.afmt[i]->offset = dig_offsets[i];
  1734. adev->mode_info.afmt[i]->id = i;
  1735. } else {
  1736. int j;
  1737. for (j = 0; j < i; j++) {
  1738. kfree(adev->mode_info.afmt[j]);
  1739. adev->mode_info.afmt[j] = NULL;
  1740. }
  1741. return -ENOMEM;
  1742. }
  1743. }
  1744. return 0;
  1745. }
  1746. static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
  1747. {
  1748. int i;
  1749. for (i = 0; i < adev->mode_info.num_dig; i++) {
  1750. kfree(adev->mode_info.afmt[i]);
  1751. adev->mode_info.afmt[i] = NULL;
  1752. }
  1753. }
  1754. static const u32 vga_control_regs[6] =
  1755. {
  1756. mmD1VGA_CONTROL,
  1757. mmD2VGA_CONTROL,
  1758. mmD3VGA_CONTROL,
  1759. mmD4VGA_CONTROL,
  1760. mmD5VGA_CONTROL,
  1761. mmD6VGA_CONTROL,
  1762. };
  1763. static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
  1764. {
  1765. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  1766. struct drm_device *dev = crtc->dev;
  1767. struct amdgpu_device *adev = dev->dev_private;
  1768. u32 vga_control;
  1769. vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
  1770. if (enable)
  1771. WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
  1772. else
  1773. WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
  1774. }
  1775. static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
  1776. {
  1777. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  1778. struct drm_device *dev = crtc->dev;
  1779. struct amdgpu_device *adev = dev->dev_private;
  1780. if (enable)
  1781. WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
  1782. else
  1783. WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
  1784. }
  1785. static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
  1786. struct drm_framebuffer *fb,
  1787. int x, int y, int atomic)
  1788. {
  1789. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  1790. struct drm_device *dev = crtc->dev;
  1791. struct amdgpu_device *adev = dev->dev_private;
  1792. struct amdgpu_framebuffer *amdgpu_fb;
  1793. struct drm_framebuffer *target_fb;
  1794. struct drm_gem_object *obj;
  1795. struct amdgpu_bo *rbo;
  1796. uint64_t fb_location, tiling_flags;
  1797. uint32_t fb_format, fb_pitch_pixels;
  1798. u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
  1799. u32 pipe_config;
  1800. u32 tmp, viewport_w, viewport_h;
  1801. int r;
  1802. bool bypass_lut = false;
  1803. /* no fb bound */
  1804. if (!atomic && !crtc->primary->fb) {
  1805. DRM_DEBUG_KMS("No FB bound\n");
  1806. return 0;
  1807. }
  1808. if (atomic) {
  1809. amdgpu_fb = to_amdgpu_framebuffer(fb);
  1810. target_fb = fb;
  1811. } else {
  1812. amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
  1813. target_fb = crtc->primary->fb;
  1814. }
  1815. /* If atomic, assume fb object is pinned & idle & fenced and
  1816. * just update base pointers
  1817. */
  1818. obj = amdgpu_fb->obj;
  1819. rbo = gem_to_amdgpu_bo(obj);
  1820. r = amdgpu_bo_reserve(rbo, false);
  1821. if (unlikely(r != 0))
  1822. return r;
  1823. if (atomic) {
  1824. fb_location = amdgpu_bo_gpu_offset(rbo);
  1825. } else {
  1826. r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
  1827. if (unlikely(r != 0)) {
  1828. amdgpu_bo_unreserve(rbo);
  1829. return -EINVAL;
  1830. }
  1831. }
  1832. amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
  1833. amdgpu_bo_unreserve(rbo);
  1834. pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
  1835. switch (target_fb->pixel_format) {
  1836. case DRM_FORMAT_C8:
  1837. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
  1838. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
  1839. break;
  1840. case DRM_FORMAT_XRGB4444:
  1841. case DRM_FORMAT_ARGB4444:
  1842. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
  1843. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
  1844. #ifdef __BIG_ENDIAN
  1845. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1846. ENDIAN_8IN16);
  1847. #endif
  1848. break;
  1849. case DRM_FORMAT_XRGB1555:
  1850. case DRM_FORMAT_ARGB1555:
  1851. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
  1852. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
  1853. #ifdef __BIG_ENDIAN
  1854. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1855. ENDIAN_8IN16);
  1856. #endif
  1857. break;
  1858. case DRM_FORMAT_BGRX5551:
  1859. case DRM_FORMAT_BGRA5551:
  1860. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
  1861. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
  1862. #ifdef __BIG_ENDIAN
  1863. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1864. ENDIAN_8IN16);
  1865. #endif
  1866. break;
  1867. case DRM_FORMAT_RGB565:
  1868. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
  1869. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
  1870. #ifdef __BIG_ENDIAN
  1871. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1872. ENDIAN_8IN16);
  1873. #endif
  1874. break;
  1875. case DRM_FORMAT_XRGB8888:
  1876. case DRM_FORMAT_ARGB8888:
  1877. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
  1878. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
  1879. #ifdef __BIG_ENDIAN
  1880. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1881. ENDIAN_8IN32);
  1882. #endif
  1883. break;
  1884. case DRM_FORMAT_XRGB2101010:
  1885. case DRM_FORMAT_ARGB2101010:
  1886. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
  1887. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
  1888. #ifdef __BIG_ENDIAN
  1889. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1890. ENDIAN_8IN32);
  1891. #endif
  1892. /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
  1893. bypass_lut = true;
  1894. break;
  1895. case DRM_FORMAT_BGRX1010102:
  1896. case DRM_FORMAT_BGRA1010102:
  1897. fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
  1898. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
  1899. #ifdef __BIG_ENDIAN
  1900. fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
  1901. ENDIAN_8IN32);
  1902. #endif
  1903. /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
  1904. bypass_lut = true;
  1905. break;
  1906. default:
  1907. DRM_ERROR("Unsupported screen format %s\n",
  1908. drm_get_format_name(target_fb->pixel_format));
  1909. return -EINVAL;
  1910. }
  1911. if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
  1912. unsigned bankw, bankh, mtaspect, tile_split, num_banks;
  1913. bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
  1914. bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
  1915. mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
  1916. tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
  1917. num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
  1918. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
  1919. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
  1920. ARRAY_2D_TILED_THIN1);
  1921. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
  1922. tile_split);
  1923. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
  1924. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
  1925. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
  1926. mtaspect);
  1927. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
  1928. ADDR_SURF_MICRO_TILING_DISPLAY);
  1929. } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
  1930. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
  1931. ARRAY_1D_TILED_THIN1);
  1932. }
  1933. fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
  1934. pipe_config);
  1935. dce_v11_0_vga_enable(crtc, false);
  1936. /* Make sure surface address is updated at vertical blank rather than
  1937. * horizontal blank
  1938. */
  1939. tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
  1940. tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
  1941. GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
  1942. WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  1943. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
  1944. upper_32_bits(fb_location));
  1945. WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
  1946. upper_32_bits(fb_location));
  1947. WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
  1948. (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
  1949. WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
  1950. (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
  1951. WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
  1952. WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
  1953. /*
  1954. * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
  1955. * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
  1956. * retain the full precision throughout the pipeline.
  1957. */
  1958. tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
  1959. if (bypass_lut)
  1960. tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
  1961. else
  1962. tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
  1963. WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
  1964. if (bypass_lut)
  1965. DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
  1966. WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
  1967. WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
  1968. WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
  1969. WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
  1970. WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
  1971. WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
  1972. fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
  1973. WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
  1974. dce_v11_0_grph_enable(crtc, true);
  1975. WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
  1976. target_fb->height);
  1977. x &= ~3;
  1978. y &= ~1;
  1979. WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
  1980. (x << 16) | y);
  1981. viewport_w = crtc->mode.hdisplay;
  1982. viewport_h = (crtc->mode.vdisplay + 1) & ~1;
  1983. WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
  1984. (viewport_w << 16) | viewport_h);
  1985. /* set pageflip to happen only at start of vblank interval (front porch) */
  1986. WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
  1987. if (!atomic && fb && fb != crtc->primary->fb) {
  1988. amdgpu_fb = to_amdgpu_framebuffer(fb);
  1989. rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
  1990. r = amdgpu_bo_reserve(rbo, false);
  1991. if (unlikely(r != 0))
  1992. return r;
  1993. amdgpu_bo_unpin(rbo);
  1994. amdgpu_bo_unreserve(rbo);
  1995. }
  1996. /* Bytes per pixel may have changed */
  1997. dce_v11_0_bandwidth_update(adev);
  1998. return 0;
  1999. }
  2000. static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
  2001. struct drm_display_mode *mode)
  2002. {
  2003. struct drm_device *dev = crtc->dev;
  2004. struct amdgpu_device *adev = dev->dev_private;
  2005. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2006. u32 tmp;
  2007. tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
  2008. if (mode->flags & DRM_MODE_FLAG_INTERLACE)
  2009. tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
  2010. else
  2011. tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
  2012. WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
  2013. }
  2014. static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
  2015. {
  2016. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2017. struct drm_device *dev = crtc->dev;
  2018. struct amdgpu_device *adev = dev->dev_private;
  2019. int i;
  2020. u32 tmp;
  2021. DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
  2022. tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
  2023. tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
  2024. WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2025. tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
  2026. tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
  2027. WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2028. tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
  2029. tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
  2030. WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2031. WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
  2032. WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
  2033. WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
  2034. WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
  2035. WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
  2036. WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
  2037. WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
  2038. WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
  2039. WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
  2040. WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
  2041. for (i = 0; i < 256; i++) {
  2042. WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
  2043. (amdgpu_crtc->lut_r[i] << 20) |
  2044. (amdgpu_crtc->lut_g[i] << 10) |
  2045. (amdgpu_crtc->lut_b[i] << 0));
  2046. }
  2047. tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
  2048. tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
  2049. tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
  2050. tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
  2051. WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2052. tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
  2053. tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
  2054. WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2055. tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
  2056. tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
  2057. WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2058. tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
  2059. tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
  2060. WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2061. /* XXX match this to the depth of the crtc fmt block, move to modeset? */
  2062. WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
  2063. /* XXX this only needs to be programmed once per crtc at startup,
  2064. * not sure where the best place for it is
  2065. */
  2066. tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
  2067. tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
  2068. WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2069. }
  2070. static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
  2071. {
  2072. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  2073. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  2074. switch (amdgpu_encoder->encoder_id) {
  2075. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
  2076. if (dig->linkb)
  2077. return 1;
  2078. else
  2079. return 0;
  2080. break;
  2081. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
  2082. if (dig->linkb)
  2083. return 3;
  2084. else
  2085. return 2;
  2086. break;
  2087. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
  2088. if (dig->linkb)
  2089. return 5;
  2090. else
  2091. return 4;
  2092. break;
  2093. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
  2094. return 6;
  2095. break;
  2096. default:
  2097. DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
  2098. return 0;
  2099. }
  2100. }
  2101. /**
  2102. * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
  2103. *
  2104. * @crtc: drm crtc
  2105. *
  2106. * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
  2107. * a single PPLL can be used for all DP crtcs/encoders. For non-DP
  2108. * monitors a dedicated PPLL must be used. If a particular board has
  2109. * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
  2110. * as there is no need to program the PLL itself. If we are not able to
  2111. * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
  2112. * avoid messing up an existing monitor.
  2113. *
  2114. * Asic specific PLL information
  2115. *
  2116. * DCE 10.x
  2117. * Tonga
  2118. * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
  2119. * CI
  2120. * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
  2121. *
  2122. */
  2123. static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
  2124. {
  2125. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2126. struct drm_device *dev = crtc->dev;
  2127. struct amdgpu_device *adev = dev->dev_private;
  2128. u32 pll_in_use;
  2129. int pll;
  2130. if ((adev->asic_type == CHIP_POLARIS10) ||
  2131. (adev->asic_type == CHIP_POLARIS11)) {
  2132. struct amdgpu_encoder *amdgpu_encoder =
  2133. to_amdgpu_encoder(amdgpu_crtc->encoder);
  2134. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  2135. if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
  2136. return ATOM_DP_DTO;
  2137. switch (amdgpu_encoder->encoder_id) {
  2138. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
  2139. if (dig->linkb)
  2140. return ATOM_COMBOPHY_PLL1;
  2141. else
  2142. return ATOM_COMBOPHY_PLL0;
  2143. break;
  2144. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
  2145. if (dig->linkb)
  2146. return ATOM_COMBOPHY_PLL3;
  2147. else
  2148. return ATOM_COMBOPHY_PLL2;
  2149. break;
  2150. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
  2151. if (dig->linkb)
  2152. return ATOM_COMBOPHY_PLL5;
  2153. else
  2154. return ATOM_COMBOPHY_PLL4;
  2155. break;
  2156. default:
  2157. DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
  2158. return ATOM_PPLL_INVALID;
  2159. }
  2160. }
  2161. if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
  2162. if (adev->clock.dp_extclk)
  2163. /* skip PPLL programming if using ext clock */
  2164. return ATOM_PPLL_INVALID;
  2165. else {
  2166. /* use the same PPLL for all DP monitors */
  2167. pll = amdgpu_pll_get_shared_dp_ppll(crtc);
  2168. if (pll != ATOM_PPLL_INVALID)
  2169. return pll;
  2170. }
  2171. } else {
  2172. /* use the same PPLL for all monitors with the same clock */
  2173. pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
  2174. if (pll != ATOM_PPLL_INVALID)
  2175. return pll;
  2176. }
  2177. /* XXX need to determine what plls are available on each DCE11 part */
  2178. pll_in_use = amdgpu_pll_get_use_mask(crtc);
  2179. if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
  2180. if (!(pll_in_use & (1 << ATOM_PPLL1)))
  2181. return ATOM_PPLL1;
  2182. if (!(pll_in_use & (1 << ATOM_PPLL0)))
  2183. return ATOM_PPLL0;
  2184. DRM_ERROR("unable to allocate a PPLL\n");
  2185. return ATOM_PPLL_INVALID;
  2186. } else {
  2187. if (!(pll_in_use & (1 << ATOM_PPLL2)))
  2188. return ATOM_PPLL2;
  2189. if (!(pll_in_use & (1 << ATOM_PPLL1)))
  2190. return ATOM_PPLL1;
  2191. if (!(pll_in_use & (1 << ATOM_PPLL0)))
  2192. return ATOM_PPLL0;
  2193. DRM_ERROR("unable to allocate a PPLL\n");
  2194. return ATOM_PPLL_INVALID;
  2195. }
  2196. return ATOM_PPLL_INVALID;
  2197. }
  2198. static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
  2199. {
  2200. struct amdgpu_device *adev = crtc->dev->dev_private;
  2201. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2202. uint32_t cur_lock;
  2203. cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
  2204. if (lock)
  2205. cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
  2206. else
  2207. cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
  2208. WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
  2209. }
  2210. static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
  2211. {
  2212. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2213. struct amdgpu_device *adev = crtc->dev->dev_private;
  2214. u32 tmp;
  2215. tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
  2216. tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
  2217. WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2218. }
  2219. static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
  2220. {
  2221. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2222. struct amdgpu_device *adev = crtc->dev->dev_private;
  2223. u32 tmp;
  2224. WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
  2225. upper_32_bits(amdgpu_crtc->cursor_addr));
  2226. WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
  2227. lower_32_bits(amdgpu_crtc->cursor_addr));
  2228. tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
  2229. tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
  2230. tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
  2231. WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
  2232. }
  2233. static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
  2234. int x, int y)
  2235. {
  2236. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2237. struct amdgpu_device *adev = crtc->dev->dev_private;
  2238. int xorigin = 0, yorigin = 0;
  2239. /* avivo cursor are offset into the total surface */
  2240. x += crtc->x;
  2241. y += crtc->y;
  2242. DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
  2243. if (x < 0) {
  2244. xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
  2245. x = 0;
  2246. }
  2247. if (y < 0) {
  2248. yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
  2249. y = 0;
  2250. }
  2251. WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
  2252. WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
  2253. WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
  2254. ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
  2255. amdgpu_crtc->cursor_x = x;
  2256. amdgpu_crtc->cursor_y = y;
  2257. return 0;
  2258. }
  2259. static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
  2260. int x, int y)
  2261. {
  2262. int ret;
  2263. dce_v11_0_lock_cursor(crtc, true);
  2264. ret = dce_v11_0_cursor_move_locked(crtc, x, y);
  2265. dce_v11_0_lock_cursor(crtc, false);
  2266. return ret;
  2267. }
  2268. static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
  2269. struct drm_file *file_priv,
  2270. uint32_t handle,
  2271. uint32_t width,
  2272. uint32_t height,
  2273. int32_t hot_x,
  2274. int32_t hot_y)
  2275. {
  2276. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2277. struct drm_gem_object *obj;
  2278. struct amdgpu_bo *aobj;
  2279. int ret;
  2280. if (!handle) {
  2281. /* turn off cursor */
  2282. dce_v11_0_hide_cursor(crtc);
  2283. obj = NULL;
  2284. goto unpin;
  2285. }
  2286. if ((width > amdgpu_crtc->max_cursor_width) ||
  2287. (height > amdgpu_crtc->max_cursor_height)) {
  2288. DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
  2289. return -EINVAL;
  2290. }
  2291. obj = drm_gem_object_lookup(file_priv, handle);
  2292. if (!obj) {
  2293. DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
  2294. return -ENOENT;
  2295. }
  2296. aobj = gem_to_amdgpu_bo(obj);
  2297. ret = amdgpu_bo_reserve(aobj, false);
  2298. if (ret != 0) {
  2299. drm_gem_object_unreference_unlocked(obj);
  2300. return ret;
  2301. }
  2302. ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
  2303. amdgpu_bo_unreserve(aobj);
  2304. if (ret) {
  2305. DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
  2306. drm_gem_object_unreference_unlocked(obj);
  2307. return ret;
  2308. }
  2309. amdgpu_crtc->cursor_width = width;
  2310. amdgpu_crtc->cursor_height = height;
  2311. dce_v11_0_lock_cursor(crtc, true);
  2312. if (hot_x != amdgpu_crtc->cursor_hot_x ||
  2313. hot_y != amdgpu_crtc->cursor_hot_y) {
  2314. int x, y;
  2315. x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
  2316. y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
  2317. dce_v11_0_cursor_move_locked(crtc, x, y);
  2318. amdgpu_crtc->cursor_hot_x = hot_x;
  2319. amdgpu_crtc->cursor_hot_y = hot_y;
  2320. }
  2321. dce_v11_0_show_cursor(crtc);
  2322. dce_v11_0_lock_cursor(crtc, false);
  2323. unpin:
  2324. if (amdgpu_crtc->cursor_bo) {
  2325. struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
  2326. ret = amdgpu_bo_reserve(aobj, false);
  2327. if (likely(ret == 0)) {
  2328. amdgpu_bo_unpin(aobj);
  2329. amdgpu_bo_unreserve(aobj);
  2330. }
  2331. drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
  2332. }
  2333. amdgpu_crtc->cursor_bo = obj;
  2334. return 0;
  2335. }
  2336. static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
  2337. {
  2338. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2339. if (amdgpu_crtc->cursor_bo) {
  2340. dce_v11_0_lock_cursor(crtc, true);
  2341. dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
  2342. amdgpu_crtc->cursor_y);
  2343. dce_v11_0_show_cursor(crtc);
  2344. dce_v11_0_lock_cursor(crtc, false);
  2345. }
  2346. }
  2347. static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
  2348. u16 *blue, uint32_t start, uint32_t size)
  2349. {
  2350. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2351. int end = (start + size > 256) ? 256 : start + size, i;
  2352. /* userspace palettes are always correct as is */
  2353. for (i = start; i < end; i++) {
  2354. amdgpu_crtc->lut_r[i] = red[i] >> 6;
  2355. amdgpu_crtc->lut_g[i] = green[i] >> 6;
  2356. amdgpu_crtc->lut_b[i] = blue[i] >> 6;
  2357. }
  2358. dce_v11_0_crtc_load_lut(crtc);
  2359. }
  2360. static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
  2361. {
  2362. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2363. drm_crtc_cleanup(crtc);
  2364. kfree(amdgpu_crtc);
  2365. }
  2366. static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
  2367. .cursor_set2 = dce_v11_0_crtc_cursor_set2,
  2368. .cursor_move = dce_v11_0_crtc_cursor_move,
  2369. .gamma_set = dce_v11_0_crtc_gamma_set,
  2370. .set_config = amdgpu_crtc_set_config,
  2371. .destroy = dce_v11_0_crtc_destroy,
  2372. .page_flip = amdgpu_crtc_page_flip,
  2373. };
  2374. static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
  2375. {
  2376. struct drm_device *dev = crtc->dev;
  2377. struct amdgpu_device *adev = dev->dev_private;
  2378. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2379. unsigned type;
  2380. switch (mode) {
  2381. case DRM_MODE_DPMS_ON:
  2382. amdgpu_crtc->enabled = true;
  2383. amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
  2384. dce_v11_0_vga_enable(crtc, true);
  2385. amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
  2386. dce_v11_0_vga_enable(crtc, false);
  2387. /* Make sure VBLANK and PFLIP interrupts are still enabled */
  2388. type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
  2389. amdgpu_irq_update(adev, &adev->crtc_irq, type);
  2390. amdgpu_irq_update(adev, &adev->pageflip_irq, type);
  2391. drm_vblank_on(dev, amdgpu_crtc->crtc_id);
  2392. dce_v11_0_crtc_load_lut(crtc);
  2393. break;
  2394. case DRM_MODE_DPMS_STANDBY:
  2395. case DRM_MODE_DPMS_SUSPEND:
  2396. case DRM_MODE_DPMS_OFF:
  2397. drm_vblank_off(dev, amdgpu_crtc->crtc_id);
  2398. if (amdgpu_crtc->enabled) {
  2399. dce_v11_0_vga_enable(crtc, true);
  2400. amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
  2401. dce_v11_0_vga_enable(crtc, false);
  2402. }
  2403. amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
  2404. amdgpu_crtc->enabled = false;
  2405. break;
  2406. }
  2407. /* adjust pm to dpms */
  2408. amdgpu_pm_compute_clocks(adev);
  2409. }
  2410. static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
  2411. {
  2412. /* disable crtc pair power gating before programming */
  2413. amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
  2414. amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
  2415. dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  2416. }
  2417. static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
  2418. {
  2419. dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
  2420. amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
  2421. }
  2422. static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
  2423. {
  2424. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2425. struct drm_device *dev = crtc->dev;
  2426. struct amdgpu_device *adev = dev->dev_private;
  2427. struct amdgpu_atom_ss ss;
  2428. int i;
  2429. dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
  2430. if (crtc->primary->fb) {
  2431. int r;
  2432. struct amdgpu_framebuffer *amdgpu_fb;
  2433. struct amdgpu_bo *rbo;
  2434. amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
  2435. rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
  2436. r = amdgpu_bo_reserve(rbo, false);
  2437. if (unlikely(r))
  2438. DRM_ERROR("failed to reserve rbo before unpin\n");
  2439. else {
  2440. amdgpu_bo_unpin(rbo);
  2441. amdgpu_bo_unreserve(rbo);
  2442. }
  2443. }
  2444. /* disable the GRPH */
  2445. dce_v11_0_grph_enable(crtc, false);
  2446. amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
  2447. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  2448. if (adev->mode_info.crtcs[i] &&
  2449. adev->mode_info.crtcs[i]->enabled &&
  2450. i != amdgpu_crtc->crtc_id &&
  2451. amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
  2452. /* one other crtc is using this pll don't turn
  2453. * off the pll
  2454. */
  2455. goto done;
  2456. }
  2457. }
  2458. switch (amdgpu_crtc->pll_id) {
  2459. case ATOM_PPLL0:
  2460. case ATOM_PPLL1:
  2461. case ATOM_PPLL2:
  2462. /* disable the ppll */
  2463. amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
  2464. 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
  2465. break;
  2466. case ATOM_COMBOPHY_PLL0:
  2467. case ATOM_COMBOPHY_PLL1:
  2468. case ATOM_COMBOPHY_PLL2:
  2469. case ATOM_COMBOPHY_PLL3:
  2470. case ATOM_COMBOPHY_PLL4:
  2471. case ATOM_COMBOPHY_PLL5:
  2472. /* disable the ppll */
  2473. amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
  2474. 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
  2475. break;
  2476. default:
  2477. break;
  2478. }
  2479. done:
  2480. amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
  2481. amdgpu_crtc->adjusted_clock = 0;
  2482. amdgpu_crtc->encoder = NULL;
  2483. amdgpu_crtc->connector = NULL;
  2484. }
  2485. static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
  2486. struct drm_display_mode *mode,
  2487. struct drm_display_mode *adjusted_mode,
  2488. int x, int y, struct drm_framebuffer *old_fb)
  2489. {
  2490. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2491. struct drm_device *dev = crtc->dev;
  2492. struct amdgpu_device *adev = dev->dev_private;
  2493. if (!amdgpu_crtc->adjusted_clock)
  2494. return -EINVAL;
  2495. if ((adev->asic_type == CHIP_POLARIS10) ||
  2496. (adev->asic_type == CHIP_POLARIS11)) {
  2497. struct amdgpu_encoder *amdgpu_encoder =
  2498. to_amdgpu_encoder(amdgpu_crtc->encoder);
  2499. int encoder_mode =
  2500. amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
  2501. /* SetPixelClock calculates the plls and ss values now */
  2502. amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
  2503. amdgpu_crtc->pll_id,
  2504. encoder_mode, amdgpu_encoder->encoder_id,
  2505. adjusted_mode->clock, 0, 0, 0, 0,
  2506. amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
  2507. } else {
  2508. amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
  2509. }
  2510. amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
  2511. dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
  2512. amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
  2513. amdgpu_atombios_crtc_scaler_setup(crtc);
  2514. dce_v11_0_cursor_reset(crtc);
  2515. /* update the hw version fpr dpm */
  2516. amdgpu_crtc->hw_mode = *adjusted_mode;
  2517. return 0;
  2518. }
  2519. static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
  2520. const struct drm_display_mode *mode,
  2521. struct drm_display_mode *adjusted_mode)
  2522. {
  2523. struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
  2524. struct drm_device *dev = crtc->dev;
  2525. struct drm_encoder *encoder;
  2526. /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
  2527. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  2528. if (encoder->crtc == crtc) {
  2529. amdgpu_crtc->encoder = encoder;
  2530. amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
  2531. break;
  2532. }
  2533. }
  2534. if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
  2535. amdgpu_crtc->encoder = NULL;
  2536. amdgpu_crtc->connector = NULL;
  2537. return false;
  2538. }
  2539. if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
  2540. return false;
  2541. if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
  2542. return false;
  2543. /* pick pll */
  2544. amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
  2545. /* if we can't get a PPLL for a non-DP encoder, fail */
  2546. if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
  2547. !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
  2548. return false;
  2549. return true;
  2550. }
  2551. static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
  2552. struct drm_framebuffer *old_fb)
  2553. {
  2554. return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
  2555. }
  2556. static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
  2557. struct drm_framebuffer *fb,
  2558. int x, int y, enum mode_set_atomic state)
  2559. {
  2560. return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
  2561. }
  2562. static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
  2563. .dpms = dce_v11_0_crtc_dpms,
  2564. .mode_fixup = dce_v11_0_crtc_mode_fixup,
  2565. .mode_set = dce_v11_0_crtc_mode_set,
  2566. .mode_set_base = dce_v11_0_crtc_set_base,
  2567. .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
  2568. .prepare = dce_v11_0_crtc_prepare,
  2569. .commit = dce_v11_0_crtc_commit,
  2570. .load_lut = dce_v11_0_crtc_load_lut,
  2571. .disable = dce_v11_0_crtc_disable,
  2572. };
  2573. static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
  2574. {
  2575. struct amdgpu_crtc *amdgpu_crtc;
  2576. int i;
  2577. amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
  2578. (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
  2579. if (amdgpu_crtc == NULL)
  2580. return -ENOMEM;
  2581. drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
  2582. drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
  2583. amdgpu_crtc->crtc_id = index;
  2584. adev->mode_info.crtcs[index] = amdgpu_crtc;
  2585. amdgpu_crtc->max_cursor_width = 128;
  2586. amdgpu_crtc->max_cursor_height = 128;
  2587. adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
  2588. adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
  2589. for (i = 0; i < 256; i++) {
  2590. amdgpu_crtc->lut_r[i] = i << 2;
  2591. amdgpu_crtc->lut_g[i] = i << 2;
  2592. amdgpu_crtc->lut_b[i] = i << 2;
  2593. }
  2594. switch (amdgpu_crtc->crtc_id) {
  2595. case 0:
  2596. default:
  2597. amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
  2598. break;
  2599. case 1:
  2600. amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
  2601. break;
  2602. case 2:
  2603. amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
  2604. break;
  2605. case 3:
  2606. amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
  2607. break;
  2608. case 4:
  2609. amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
  2610. break;
  2611. case 5:
  2612. amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
  2613. break;
  2614. }
  2615. amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
  2616. amdgpu_crtc->adjusted_clock = 0;
  2617. amdgpu_crtc->encoder = NULL;
  2618. amdgpu_crtc->connector = NULL;
  2619. drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
  2620. return 0;
  2621. }
  2622. static int dce_v11_0_early_init(void *handle)
  2623. {
  2624. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2625. adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
  2626. adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
  2627. dce_v11_0_set_display_funcs(adev);
  2628. dce_v11_0_set_irq_funcs(adev);
  2629. switch (adev->asic_type) {
  2630. case CHIP_CARRIZO:
  2631. adev->mode_info.num_crtc = 3;
  2632. adev->mode_info.num_hpd = 6;
  2633. adev->mode_info.num_dig = 9;
  2634. break;
  2635. case CHIP_STONEY:
  2636. adev->mode_info.num_crtc = 2;
  2637. adev->mode_info.num_hpd = 6;
  2638. adev->mode_info.num_dig = 9;
  2639. break;
  2640. case CHIP_POLARIS10:
  2641. adev->mode_info.num_crtc = 6;
  2642. adev->mode_info.num_hpd = 6;
  2643. adev->mode_info.num_dig = 6;
  2644. break;
  2645. case CHIP_POLARIS11:
  2646. adev->mode_info.num_crtc = 5;
  2647. adev->mode_info.num_hpd = 5;
  2648. adev->mode_info.num_dig = 5;
  2649. break;
  2650. default:
  2651. /* FIXME: not supported yet */
  2652. return -EINVAL;
  2653. }
  2654. return 0;
  2655. }
  2656. static int dce_v11_0_sw_init(void *handle)
  2657. {
  2658. int r, i;
  2659. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2660. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  2661. r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
  2662. if (r)
  2663. return r;
  2664. }
  2665. for (i = 8; i < 20; i += 2) {
  2666. r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
  2667. if (r)
  2668. return r;
  2669. }
  2670. /* HPD hotplug */
  2671. r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
  2672. if (r)
  2673. return r;
  2674. adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
  2675. adev->ddev->mode_config.async_page_flip = true;
  2676. adev->ddev->mode_config.max_width = 16384;
  2677. adev->ddev->mode_config.max_height = 16384;
  2678. adev->ddev->mode_config.preferred_depth = 24;
  2679. adev->ddev->mode_config.prefer_shadow = 1;
  2680. adev->ddev->mode_config.fb_base = adev->mc.aper_base;
  2681. r = amdgpu_modeset_create_props(adev);
  2682. if (r)
  2683. return r;
  2684. adev->ddev->mode_config.max_width = 16384;
  2685. adev->ddev->mode_config.max_height = 16384;
  2686. /* allocate crtcs */
  2687. for (i = 0; i < adev->mode_info.num_crtc; i++) {
  2688. r = dce_v11_0_crtc_init(adev, i);
  2689. if (r)
  2690. return r;
  2691. }
  2692. if (amdgpu_atombios_get_connector_info_from_object_table(adev))
  2693. amdgpu_print_display_setup(adev->ddev);
  2694. else
  2695. return -EINVAL;
  2696. /* setup afmt */
  2697. r = dce_v11_0_afmt_init(adev);
  2698. if (r)
  2699. return r;
  2700. r = dce_v11_0_audio_init(adev);
  2701. if (r)
  2702. return r;
  2703. drm_kms_helper_poll_init(adev->ddev);
  2704. adev->mode_info.mode_config_initialized = true;
  2705. return 0;
  2706. }
  2707. static int dce_v11_0_sw_fini(void *handle)
  2708. {
  2709. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2710. kfree(adev->mode_info.bios_hardcoded_edid);
  2711. drm_kms_helper_poll_fini(adev->ddev);
  2712. dce_v11_0_audio_fini(adev);
  2713. dce_v11_0_afmt_fini(adev);
  2714. adev->mode_info.mode_config_initialized = false;
  2715. return 0;
  2716. }
  2717. static int dce_v11_0_hw_init(void *handle)
  2718. {
  2719. int i;
  2720. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2721. dce_v11_0_init_golden_registers(adev);
  2722. /* init dig PHYs, disp eng pll */
  2723. amdgpu_atombios_crtc_powergate_init(adev);
  2724. amdgpu_atombios_encoder_init_dig(adev);
  2725. if ((adev->asic_type == CHIP_POLARIS10) ||
  2726. (adev->asic_type == CHIP_POLARIS11)) {
  2727. amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
  2728. DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
  2729. amdgpu_atombios_crtc_set_dce_clock(adev, 0,
  2730. DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
  2731. } else {
  2732. amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
  2733. }
  2734. /* initialize hpd */
  2735. dce_v11_0_hpd_init(adev);
  2736. for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  2737. dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
  2738. }
  2739. dce_v11_0_pageflip_interrupt_init(adev);
  2740. return 0;
  2741. }
  2742. static int dce_v11_0_hw_fini(void *handle)
  2743. {
  2744. int i;
  2745. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2746. dce_v11_0_hpd_fini(adev);
  2747. for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
  2748. dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
  2749. }
  2750. dce_v11_0_pageflip_interrupt_fini(adev);
  2751. return 0;
  2752. }
  2753. static int dce_v11_0_suspend(void *handle)
  2754. {
  2755. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2756. amdgpu_atombios_scratch_regs_save(adev);
  2757. return dce_v11_0_hw_fini(handle);
  2758. }
  2759. static int dce_v11_0_resume(void *handle)
  2760. {
  2761. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2762. int ret;
  2763. ret = dce_v11_0_hw_init(handle);
  2764. amdgpu_atombios_scratch_regs_restore(adev);
  2765. /* turn on the BL */
  2766. if (adev->mode_info.bl_encoder) {
  2767. u8 bl_level = amdgpu_display_backlight_get_level(adev,
  2768. adev->mode_info.bl_encoder);
  2769. amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
  2770. bl_level);
  2771. }
  2772. return ret;
  2773. }
  2774. static bool dce_v11_0_is_idle(void *handle)
  2775. {
  2776. return true;
  2777. }
  2778. static int dce_v11_0_wait_for_idle(void *handle)
  2779. {
  2780. return 0;
  2781. }
  2782. static int dce_v11_0_soft_reset(void *handle)
  2783. {
  2784. u32 srbm_soft_reset = 0, tmp;
  2785. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2786. if (dce_v11_0_is_display_hung(adev))
  2787. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
  2788. if (srbm_soft_reset) {
  2789. tmp = RREG32(mmSRBM_SOFT_RESET);
  2790. tmp |= srbm_soft_reset;
  2791. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  2792. WREG32(mmSRBM_SOFT_RESET, tmp);
  2793. tmp = RREG32(mmSRBM_SOFT_RESET);
  2794. udelay(50);
  2795. tmp &= ~srbm_soft_reset;
  2796. WREG32(mmSRBM_SOFT_RESET, tmp);
  2797. tmp = RREG32(mmSRBM_SOFT_RESET);
  2798. /* Wait a little for things to settle down */
  2799. udelay(50);
  2800. }
  2801. return 0;
  2802. }
  2803. static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
  2804. int crtc,
  2805. enum amdgpu_interrupt_state state)
  2806. {
  2807. u32 lb_interrupt_mask;
  2808. if (crtc >= adev->mode_info.num_crtc) {
  2809. DRM_DEBUG("invalid crtc %d\n", crtc);
  2810. return;
  2811. }
  2812. switch (state) {
  2813. case AMDGPU_IRQ_STATE_DISABLE:
  2814. lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
  2815. lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
  2816. VBLANK_INTERRUPT_MASK, 0);
  2817. WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
  2818. break;
  2819. case AMDGPU_IRQ_STATE_ENABLE:
  2820. lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
  2821. lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
  2822. VBLANK_INTERRUPT_MASK, 1);
  2823. WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
  2824. break;
  2825. default:
  2826. break;
  2827. }
  2828. }
  2829. static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
  2830. int crtc,
  2831. enum amdgpu_interrupt_state state)
  2832. {
  2833. u32 lb_interrupt_mask;
  2834. if (crtc >= adev->mode_info.num_crtc) {
  2835. DRM_DEBUG("invalid crtc %d\n", crtc);
  2836. return;
  2837. }
  2838. switch (state) {
  2839. case AMDGPU_IRQ_STATE_DISABLE:
  2840. lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
  2841. lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
  2842. VLINE_INTERRUPT_MASK, 0);
  2843. WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
  2844. break;
  2845. case AMDGPU_IRQ_STATE_ENABLE:
  2846. lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
  2847. lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
  2848. VLINE_INTERRUPT_MASK, 1);
  2849. WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
  2850. break;
  2851. default:
  2852. break;
  2853. }
  2854. }
  2855. static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
  2856. struct amdgpu_irq_src *source,
  2857. unsigned hpd,
  2858. enum amdgpu_interrupt_state state)
  2859. {
  2860. u32 tmp;
  2861. if (hpd >= adev->mode_info.num_hpd) {
  2862. DRM_DEBUG("invalid hdp %d\n", hpd);
  2863. return 0;
  2864. }
  2865. switch (state) {
  2866. case AMDGPU_IRQ_STATE_DISABLE:
  2867. tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
  2868. tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
  2869. WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
  2870. break;
  2871. case AMDGPU_IRQ_STATE_ENABLE:
  2872. tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
  2873. tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
  2874. WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
  2875. break;
  2876. default:
  2877. break;
  2878. }
  2879. return 0;
  2880. }
  2881. static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
  2882. struct amdgpu_irq_src *source,
  2883. unsigned type,
  2884. enum amdgpu_interrupt_state state)
  2885. {
  2886. switch (type) {
  2887. case AMDGPU_CRTC_IRQ_VBLANK1:
  2888. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
  2889. break;
  2890. case AMDGPU_CRTC_IRQ_VBLANK2:
  2891. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
  2892. break;
  2893. case AMDGPU_CRTC_IRQ_VBLANK3:
  2894. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
  2895. break;
  2896. case AMDGPU_CRTC_IRQ_VBLANK4:
  2897. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
  2898. break;
  2899. case AMDGPU_CRTC_IRQ_VBLANK5:
  2900. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
  2901. break;
  2902. case AMDGPU_CRTC_IRQ_VBLANK6:
  2903. dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
  2904. break;
  2905. case AMDGPU_CRTC_IRQ_VLINE1:
  2906. dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
  2907. break;
  2908. case AMDGPU_CRTC_IRQ_VLINE2:
  2909. dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
  2910. break;
  2911. case AMDGPU_CRTC_IRQ_VLINE3:
  2912. dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
  2913. break;
  2914. case AMDGPU_CRTC_IRQ_VLINE4:
  2915. dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
  2916. break;
  2917. case AMDGPU_CRTC_IRQ_VLINE5:
  2918. dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
  2919. break;
  2920. case AMDGPU_CRTC_IRQ_VLINE6:
  2921. dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
  2922. break;
  2923. default:
  2924. break;
  2925. }
  2926. return 0;
  2927. }
  2928. static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
  2929. struct amdgpu_irq_src *src,
  2930. unsigned type,
  2931. enum amdgpu_interrupt_state state)
  2932. {
  2933. u32 reg;
  2934. if (type >= adev->mode_info.num_crtc) {
  2935. DRM_ERROR("invalid pageflip crtc %d\n", type);
  2936. return -EINVAL;
  2937. }
  2938. reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
  2939. if (state == AMDGPU_IRQ_STATE_DISABLE)
  2940. WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
  2941. reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
  2942. else
  2943. WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
  2944. reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
  2945. return 0;
  2946. }
  2947. static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
  2948. struct amdgpu_irq_src *source,
  2949. struct amdgpu_iv_entry *entry)
  2950. {
  2951. unsigned long flags;
  2952. unsigned crtc_id;
  2953. struct amdgpu_crtc *amdgpu_crtc;
  2954. struct amdgpu_flip_work *works;
  2955. crtc_id = (entry->src_id - 8) >> 1;
  2956. amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
  2957. if (crtc_id >= adev->mode_info.num_crtc) {
  2958. DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
  2959. return -EINVAL;
  2960. }
  2961. if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
  2962. GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
  2963. WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
  2964. GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
  2965. /* IRQ could occur when in initial stage */
  2966. if(amdgpu_crtc == NULL)
  2967. return 0;
  2968. spin_lock_irqsave(&adev->ddev->event_lock, flags);
  2969. works = amdgpu_crtc->pflip_works;
  2970. if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
  2971. DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
  2972. "AMDGPU_FLIP_SUBMITTED(%d)\n",
  2973. amdgpu_crtc->pflip_status,
  2974. AMDGPU_FLIP_SUBMITTED);
  2975. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  2976. return 0;
  2977. }
  2978. /* page flip completed. clean up */
  2979. amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
  2980. amdgpu_crtc->pflip_works = NULL;
  2981. /* wakeup usersapce */
  2982. if(works->event)
  2983. drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
  2984. spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
  2985. drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
  2986. schedule_work(&works->unpin_work);
  2987. return 0;
  2988. }
  2989. static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
  2990. int hpd)
  2991. {
  2992. u32 tmp;
  2993. if (hpd >= adev->mode_info.num_hpd) {
  2994. DRM_DEBUG("invalid hdp %d\n", hpd);
  2995. return;
  2996. }
  2997. tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
  2998. tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
  2999. WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
  3000. }
  3001. static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
  3002. int crtc)
  3003. {
  3004. u32 tmp;
  3005. if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
  3006. DRM_DEBUG("invalid crtc %d\n", crtc);
  3007. return;
  3008. }
  3009. tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
  3010. tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
  3011. WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
  3012. }
  3013. static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
  3014. int crtc)
  3015. {
  3016. u32 tmp;
  3017. if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
  3018. DRM_DEBUG("invalid crtc %d\n", crtc);
  3019. return;
  3020. }
  3021. tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
  3022. tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
  3023. WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
  3024. }
  3025. static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
  3026. struct amdgpu_irq_src *source,
  3027. struct amdgpu_iv_entry *entry)
  3028. {
  3029. unsigned crtc = entry->src_id - 1;
  3030. uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
  3031. unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
  3032. switch (entry->src_data) {
  3033. case 0: /* vblank */
  3034. if (disp_int & interrupt_status_offsets[crtc].vblank)
  3035. dce_v11_0_crtc_vblank_int_ack(adev, crtc);
  3036. else
  3037. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  3038. if (amdgpu_irq_enabled(adev, source, irq_type)) {
  3039. drm_handle_vblank(adev->ddev, crtc);
  3040. }
  3041. DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
  3042. break;
  3043. case 1: /* vline */
  3044. if (disp_int & interrupt_status_offsets[crtc].vline)
  3045. dce_v11_0_crtc_vline_int_ack(adev, crtc);
  3046. else
  3047. DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
  3048. DRM_DEBUG("IH: D%d vline\n", crtc + 1);
  3049. break;
  3050. default:
  3051. DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
  3052. break;
  3053. }
  3054. return 0;
  3055. }
  3056. static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
  3057. struct amdgpu_irq_src *source,
  3058. struct amdgpu_iv_entry *entry)
  3059. {
  3060. uint32_t disp_int, mask;
  3061. unsigned hpd;
  3062. if (entry->src_data >= adev->mode_info.num_hpd) {
  3063. DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
  3064. return 0;
  3065. }
  3066. hpd = entry->src_data;
  3067. disp_int = RREG32(interrupt_status_offsets[hpd].reg);
  3068. mask = interrupt_status_offsets[hpd].hpd;
  3069. if (disp_int & mask) {
  3070. dce_v11_0_hpd_int_ack(adev, hpd);
  3071. schedule_work(&adev->hotplug_work);
  3072. DRM_DEBUG("IH: HPD%d\n", hpd + 1);
  3073. }
  3074. return 0;
  3075. }
  3076. static int dce_v11_0_set_clockgating_state(void *handle,
  3077. enum amd_clockgating_state state)
  3078. {
  3079. return 0;
  3080. }
  3081. static int dce_v11_0_set_powergating_state(void *handle,
  3082. enum amd_powergating_state state)
  3083. {
  3084. return 0;
  3085. }
  3086. const struct amd_ip_funcs dce_v11_0_ip_funcs = {
  3087. .name = "dce_v11_0",
  3088. .early_init = dce_v11_0_early_init,
  3089. .late_init = NULL,
  3090. .sw_init = dce_v11_0_sw_init,
  3091. .sw_fini = dce_v11_0_sw_fini,
  3092. .hw_init = dce_v11_0_hw_init,
  3093. .hw_fini = dce_v11_0_hw_fini,
  3094. .suspend = dce_v11_0_suspend,
  3095. .resume = dce_v11_0_resume,
  3096. .is_idle = dce_v11_0_is_idle,
  3097. .wait_for_idle = dce_v11_0_wait_for_idle,
  3098. .soft_reset = dce_v11_0_soft_reset,
  3099. .set_clockgating_state = dce_v11_0_set_clockgating_state,
  3100. .set_powergating_state = dce_v11_0_set_powergating_state,
  3101. };
  3102. static void
  3103. dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
  3104. struct drm_display_mode *mode,
  3105. struct drm_display_mode *adjusted_mode)
  3106. {
  3107. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  3108. amdgpu_encoder->pixel_clock = adjusted_mode->clock;
  3109. /* need to call this here rather than in prepare() since we need some crtc info */
  3110. amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
  3111. /* set scaler clears this on some chips */
  3112. dce_v11_0_set_interleave(encoder->crtc, mode);
  3113. if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
  3114. dce_v11_0_afmt_enable(encoder, true);
  3115. dce_v11_0_afmt_setmode(encoder, adjusted_mode);
  3116. }
  3117. }
  3118. static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
  3119. {
  3120. struct amdgpu_device *adev = encoder->dev->dev_private;
  3121. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  3122. struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
  3123. if ((amdgpu_encoder->active_device &
  3124. (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
  3125. (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
  3126. ENCODER_OBJECT_ID_NONE)) {
  3127. struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
  3128. if (dig) {
  3129. dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
  3130. if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
  3131. dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
  3132. }
  3133. }
  3134. amdgpu_atombios_scratch_regs_lock(adev, true);
  3135. if (connector) {
  3136. struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
  3137. /* select the clock/data port if it uses a router */
  3138. if (amdgpu_connector->router.cd_valid)
  3139. amdgpu_i2c_router_select_cd_port(amdgpu_connector);
  3140. /* turn eDP panel on for mode set */
  3141. if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
  3142. amdgpu_atombios_encoder_set_edp_panel_power(connector,
  3143. ATOM_TRANSMITTER_ACTION_POWER_ON);
  3144. }
  3145. /* this is needed for the pll/ss setup to work correctly in some cases */
  3146. amdgpu_atombios_encoder_set_crtc_source(encoder);
  3147. /* set up the FMT blocks */
  3148. dce_v11_0_program_fmt(encoder);
  3149. }
  3150. static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
  3151. {
  3152. struct drm_device *dev = encoder->dev;
  3153. struct amdgpu_device *adev = dev->dev_private;
  3154. /* need to call this here as we need the crtc set up */
  3155. amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
  3156. amdgpu_atombios_scratch_regs_lock(adev, false);
  3157. }
  3158. static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
  3159. {
  3160. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  3161. struct amdgpu_encoder_atom_dig *dig;
  3162. amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
  3163. if (amdgpu_atombios_encoder_is_digital(encoder)) {
  3164. if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
  3165. dce_v11_0_afmt_enable(encoder, false);
  3166. dig = amdgpu_encoder->enc_priv;
  3167. dig->dig_encoder = -1;
  3168. }
  3169. amdgpu_encoder->active_device = 0;
  3170. }
  3171. /* these are handled by the primary encoders */
  3172. static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
  3173. {
  3174. }
  3175. static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
  3176. {
  3177. }
  3178. static void
  3179. dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
  3180. struct drm_display_mode *mode,
  3181. struct drm_display_mode *adjusted_mode)
  3182. {
  3183. }
  3184. static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
  3185. {
  3186. }
  3187. static void
  3188. dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
  3189. {
  3190. }
  3191. static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
  3192. .dpms = dce_v11_0_ext_dpms,
  3193. .prepare = dce_v11_0_ext_prepare,
  3194. .mode_set = dce_v11_0_ext_mode_set,
  3195. .commit = dce_v11_0_ext_commit,
  3196. .disable = dce_v11_0_ext_disable,
  3197. /* no detect for TMDS/LVDS yet */
  3198. };
  3199. static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
  3200. .dpms = amdgpu_atombios_encoder_dpms,
  3201. .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
  3202. .prepare = dce_v11_0_encoder_prepare,
  3203. .mode_set = dce_v11_0_encoder_mode_set,
  3204. .commit = dce_v11_0_encoder_commit,
  3205. .disable = dce_v11_0_encoder_disable,
  3206. .detect = amdgpu_atombios_encoder_dig_detect,
  3207. };
  3208. static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
  3209. .dpms = amdgpu_atombios_encoder_dpms,
  3210. .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
  3211. .prepare = dce_v11_0_encoder_prepare,
  3212. .mode_set = dce_v11_0_encoder_mode_set,
  3213. .commit = dce_v11_0_encoder_commit,
  3214. .detect = amdgpu_atombios_encoder_dac_detect,
  3215. };
  3216. static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
  3217. {
  3218. struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
  3219. if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
  3220. amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
  3221. kfree(amdgpu_encoder->enc_priv);
  3222. drm_encoder_cleanup(encoder);
  3223. kfree(amdgpu_encoder);
  3224. }
  3225. static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
  3226. .destroy = dce_v11_0_encoder_destroy,
  3227. };
  3228. static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
  3229. uint32_t encoder_enum,
  3230. uint32_t supported_device,
  3231. u16 caps)
  3232. {
  3233. struct drm_device *dev = adev->ddev;
  3234. struct drm_encoder *encoder;
  3235. struct amdgpu_encoder *amdgpu_encoder;
  3236. /* see if we already added it */
  3237. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  3238. amdgpu_encoder = to_amdgpu_encoder(encoder);
  3239. if (amdgpu_encoder->encoder_enum == encoder_enum) {
  3240. amdgpu_encoder->devices |= supported_device;
  3241. return;
  3242. }
  3243. }
  3244. /* add a new one */
  3245. amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
  3246. if (!amdgpu_encoder)
  3247. return;
  3248. encoder = &amdgpu_encoder->base;
  3249. switch (adev->mode_info.num_crtc) {
  3250. case 1:
  3251. encoder->possible_crtcs = 0x1;
  3252. break;
  3253. case 2:
  3254. default:
  3255. encoder->possible_crtcs = 0x3;
  3256. break;
  3257. case 4:
  3258. encoder->possible_crtcs = 0xf;
  3259. break;
  3260. case 6:
  3261. encoder->possible_crtcs = 0x3f;
  3262. break;
  3263. }
  3264. amdgpu_encoder->enc_priv = NULL;
  3265. amdgpu_encoder->encoder_enum = encoder_enum;
  3266. amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
  3267. amdgpu_encoder->devices = supported_device;
  3268. amdgpu_encoder->rmx_type = RMX_OFF;
  3269. amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
  3270. amdgpu_encoder->is_ext_encoder = false;
  3271. amdgpu_encoder->caps = caps;
  3272. switch (amdgpu_encoder->encoder_id) {
  3273. case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
  3274. case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
  3275. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3276. DRM_MODE_ENCODER_DAC, NULL);
  3277. drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
  3278. break;
  3279. case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
  3280. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
  3281. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
  3282. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
  3283. case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
  3284. if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
  3285. amdgpu_encoder->rmx_type = RMX_FULL;
  3286. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3287. DRM_MODE_ENCODER_LVDS, NULL);
  3288. amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
  3289. } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
  3290. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3291. DRM_MODE_ENCODER_DAC, NULL);
  3292. amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
  3293. } else {
  3294. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3295. DRM_MODE_ENCODER_TMDS, NULL);
  3296. amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
  3297. }
  3298. drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
  3299. break;
  3300. case ENCODER_OBJECT_ID_SI170B:
  3301. case ENCODER_OBJECT_ID_CH7303:
  3302. case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
  3303. case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
  3304. case ENCODER_OBJECT_ID_TITFP513:
  3305. case ENCODER_OBJECT_ID_VT1623:
  3306. case ENCODER_OBJECT_ID_HDMI_SI1930:
  3307. case ENCODER_OBJECT_ID_TRAVIS:
  3308. case ENCODER_OBJECT_ID_NUTMEG:
  3309. /* these are handled by the primary encoders */
  3310. amdgpu_encoder->is_ext_encoder = true;
  3311. if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
  3312. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3313. DRM_MODE_ENCODER_LVDS, NULL);
  3314. else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
  3315. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3316. DRM_MODE_ENCODER_DAC, NULL);
  3317. else
  3318. drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
  3319. DRM_MODE_ENCODER_TMDS, NULL);
  3320. drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
  3321. break;
  3322. }
  3323. }
  3324. static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
  3325. .set_vga_render_state = &dce_v11_0_set_vga_render_state,
  3326. .bandwidth_update = &dce_v11_0_bandwidth_update,
  3327. .vblank_get_counter = &dce_v11_0_vblank_get_counter,
  3328. .vblank_wait = &dce_v11_0_vblank_wait,
  3329. .is_display_hung = &dce_v11_0_is_display_hung,
  3330. .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
  3331. .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
  3332. .hpd_sense = &dce_v11_0_hpd_sense,
  3333. .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
  3334. .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
  3335. .page_flip = &dce_v11_0_page_flip,
  3336. .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
  3337. .add_encoder = &dce_v11_0_encoder_add,
  3338. .add_connector = &amdgpu_connector_add,
  3339. .stop_mc_access = &dce_v11_0_stop_mc_access,
  3340. .resume_mc_access = &dce_v11_0_resume_mc_access,
  3341. };
  3342. static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
  3343. {
  3344. if (adev->mode_info.funcs == NULL)
  3345. adev->mode_info.funcs = &dce_v11_0_display_funcs;
  3346. }
  3347. static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
  3348. .set = dce_v11_0_set_crtc_irq_state,
  3349. .process = dce_v11_0_crtc_irq,
  3350. };
  3351. static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
  3352. .set = dce_v11_0_set_pageflip_irq_state,
  3353. .process = dce_v11_0_pageflip_irq,
  3354. };
  3355. static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
  3356. .set = dce_v11_0_set_hpd_irq_state,
  3357. .process = dce_v11_0_hpd_irq,
  3358. };
  3359. static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
  3360. {
  3361. adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
  3362. adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
  3363. adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
  3364. adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
  3365. adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
  3366. adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
  3367. }