intel_pm.c 155 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052
  1. /*
  2. * Copyright © 2012 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eugeni Dodonov <[email protected]>
  25. *
  26. */
  27. #include "display/intel_de.h"
  28. #include "display/intel_display_trace.h"
  29. #include "display/skl_watermark.h"
  30. #include "gt/intel_engine_regs.h"
  31. #include "gt/intel_gt_regs.h"
  32. #include "i915_drv.h"
  33. #include "intel_mchbar_regs.h"
  34. #include "intel_pm.h"
  35. #include "vlv_sideband.h"
  36. struct drm_i915_clock_gating_funcs {
  37. void (*init_clock_gating)(struct drm_i915_private *i915);
  38. };
  39. /* used in computing the new watermarks state */
  40. struct intel_wm_config {
  41. unsigned int num_pipes_active;
  42. bool sprites_enabled;
  43. bool sprites_scaled;
  44. };
  45. static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
  46. {
  47. if (HAS_LLC(dev_priv)) {
  48. /*
  49. * WaCompressedResourceDisplayNewHashMode:skl,kbl
  50. * Display WA #0390: skl,kbl
  51. *
  52. * Must match Sampler, Pixel Back End, and Media. See
  53. * WaCompressedResourceSamplerPbeMediaNewHashMode.
  54. */
  55. intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
  56. intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) |
  57. SKL_DE_COMPRESSED_HASH_MODE);
  58. }
  59. /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */
  60. intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
  61. intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
  62. /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
  63. intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
  64. intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
  65. /*
  66. * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl
  67. * Display WA #0859: skl,bxt,kbl,glk,cfl
  68. */
  69. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  70. DISP_FBC_MEMORY_WAKE);
  71. }
  72. static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
  73. {
  74. gen9_init_clock_gating(dev_priv);
  75. /* WaDisableSDEUnitClockGating:bxt */
  76. intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
  77. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  78. /*
  79. * FIXME:
  80. * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
  81. */
  82. intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
  83. GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
  84. /*
  85. * Wa: Backlight PWM may stop in the asserted state, causing backlight
  86. * to stay fully on.
  87. */
  88. intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
  89. PWM1_GATING_DIS | PWM2_GATING_DIS);
  90. /*
  91. * Lower the display internal timeout.
  92. * This is needed to avoid any hard hangs when DSI port PLL
  93. * is off and a MMIO access is attempted by any privilege
  94. * application, using batch buffers or any other means.
  95. */
  96. intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950));
  97. /*
  98. * WaFbcTurnOffFbcWatermark:bxt
  99. * Display WA #0562: bxt
  100. */
  101. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  102. DISP_FBC_WM_DIS);
  103. /*
  104. * WaFbcHighMemBwCorruptionAvoidance:bxt
  105. * Display WA #0883: bxt
  106. */
  107. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  108. intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
  109. DPFC_DISABLE_DUMMY0);
  110. }
  111. static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
  112. {
  113. gen9_init_clock_gating(dev_priv);
  114. /*
  115. * WaDisablePWMClockGating:glk
  116. * Backlight PWM may stop in the asserted state, causing backlight
  117. * to stay fully on.
  118. */
  119. intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) |
  120. PWM1_GATING_DIS | PWM2_GATING_DIS);
  121. }
  122. static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
  123. {
  124. u32 tmp;
  125. tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
  126. switch (tmp & CLKCFG_FSB_MASK) {
  127. case CLKCFG_FSB_533:
  128. dev_priv->fsb_freq = 533; /* 133*4 */
  129. break;
  130. case CLKCFG_FSB_800:
  131. dev_priv->fsb_freq = 800; /* 200*4 */
  132. break;
  133. case CLKCFG_FSB_667:
  134. dev_priv->fsb_freq = 667; /* 167*4 */
  135. break;
  136. case CLKCFG_FSB_400:
  137. dev_priv->fsb_freq = 400; /* 100*4 */
  138. break;
  139. }
  140. switch (tmp & CLKCFG_MEM_MASK) {
  141. case CLKCFG_MEM_533:
  142. dev_priv->mem_freq = 533;
  143. break;
  144. case CLKCFG_MEM_667:
  145. dev_priv->mem_freq = 667;
  146. break;
  147. case CLKCFG_MEM_800:
  148. dev_priv->mem_freq = 800;
  149. break;
  150. }
  151. /* detect pineview DDR3 setting */
  152. tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL);
  153. dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
  154. }
  155. static void ilk_get_mem_freq(struct drm_i915_private *dev_priv)
  156. {
  157. u16 ddrpll, csipll;
  158. ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
  159. csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0);
  160. switch (ddrpll & 0xff) {
  161. case 0xc:
  162. dev_priv->mem_freq = 800;
  163. break;
  164. case 0x10:
  165. dev_priv->mem_freq = 1066;
  166. break;
  167. case 0x14:
  168. dev_priv->mem_freq = 1333;
  169. break;
  170. case 0x18:
  171. dev_priv->mem_freq = 1600;
  172. break;
  173. default:
  174. drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
  175. ddrpll & 0xff);
  176. dev_priv->mem_freq = 0;
  177. break;
  178. }
  179. switch (csipll & 0x3ff) {
  180. case 0x00c:
  181. dev_priv->fsb_freq = 3200;
  182. break;
  183. case 0x00e:
  184. dev_priv->fsb_freq = 3733;
  185. break;
  186. case 0x010:
  187. dev_priv->fsb_freq = 4266;
  188. break;
  189. case 0x012:
  190. dev_priv->fsb_freq = 4800;
  191. break;
  192. case 0x014:
  193. dev_priv->fsb_freq = 5333;
  194. break;
  195. case 0x016:
  196. dev_priv->fsb_freq = 5866;
  197. break;
  198. case 0x018:
  199. dev_priv->fsb_freq = 6400;
  200. break;
  201. default:
  202. drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n",
  203. csipll & 0x3ff);
  204. dev_priv->fsb_freq = 0;
  205. break;
  206. }
  207. }
  208. static const struct cxsr_latency cxsr_latency_table[] = {
  209. {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
  210. {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
  211. {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
  212. {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
  213. {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
  214. {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
  215. {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
  216. {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
  217. {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
  218. {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
  219. {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
  220. {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
  221. {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
  222. {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
  223. {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
  224. {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
  225. {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
  226. {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
  227. {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
  228. {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
  229. {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
  230. {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
  231. {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
  232. {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
  233. {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
  234. {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
  235. {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
  236. {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
  237. {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
  238. {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
  239. };
  240. static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
  241. bool is_ddr3,
  242. int fsb,
  243. int mem)
  244. {
  245. const struct cxsr_latency *latency;
  246. int i;
  247. if (fsb == 0 || mem == 0)
  248. return NULL;
  249. for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
  250. latency = &cxsr_latency_table[i];
  251. if (is_desktop == latency->is_desktop &&
  252. is_ddr3 == latency->is_ddr3 &&
  253. fsb == latency->fsb_freq && mem == latency->mem_freq)
  254. return latency;
  255. }
  256. DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
  257. return NULL;
  258. }
  259. static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
  260. {
  261. u32 val;
  262. vlv_punit_get(dev_priv);
  263. val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
  264. if (enable)
  265. val &= ~FORCE_DDR_HIGH_FREQ;
  266. else
  267. val |= FORCE_DDR_HIGH_FREQ;
  268. val &= ~FORCE_DDR_LOW_FREQ;
  269. val |= FORCE_DDR_FREQ_REQ_ACK;
  270. vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
  271. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
  272. FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
  273. drm_err(&dev_priv->drm,
  274. "timed out waiting for Punit DDR DVFS request\n");
  275. vlv_punit_put(dev_priv);
  276. }
  277. static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
  278. {
  279. u32 val;
  280. vlv_punit_get(dev_priv);
  281. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
  282. if (enable)
  283. val |= DSP_MAXFIFO_PM5_ENABLE;
  284. else
  285. val &= ~DSP_MAXFIFO_PM5_ENABLE;
  286. vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
  287. vlv_punit_put(dev_priv);
  288. }
  289. #define FW_WM(value, plane) \
  290. (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
  291. static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
  292. {
  293. bool was_enabled;
  294. u32 val;
  295. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  296. was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
  297. intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
  298. intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
  299. } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
  300. was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
  301. intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
  302. intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
  303. } else if (IS_PINEVIEW(dev_priv)) {
  304. val = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  305. was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
  306. if (enable)
  307. val |= PINEVIEW_SELF_REFRESH_EN;
  308. else
  309. val &= ~PINEVIEW_SELF_REFRESH_EN;
  310. intel_uncore_write(&dev_priv->uncore, DSPFW3, val);
  311. intel_uncore_posting_read(&dev_priv->uncore, DSPFW3);
  312. } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
  313. was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
  314. val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
  315. _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
  316. intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
  317. intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
  318. } else if (IS_I915GM(dev_priv)) {
  319. /*
  320. * FIXME can't find a bit like this for 915G, and
  321. * and yet it does have the related watermark in
  322. * FW_BLC_SELF. What's going on?
  323. */
  324. was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
  325. val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
  326. _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
  327. intel_uncore_write(&dev_priv->uncore, INSTPM, val);
  328. intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
  329. } else {
  330. return false;
  331. }
  332. trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
  333. drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
  334. str_enabled_disabled(enable),
  335. str_enabled_disabled(was_enabled));
  336. return was_enabled;
  337. }
  338. /**
  339. * intel_set_memory_cxsr - Configure CxSR state
  340. * @dev_priv: i915 device
  341. * @enable: Allow vs. disallow CxSR
  342. *
  343. * Allow or disallow the system to enter a special CxSR
  344. * (C-state self refresh) state. What typically happens in CxSR mode
  345. * is that several display FIFOs may get combined into a single larger
  346. * FIFO for a particular plane (so called max FIFO mode) to allow the
  347. * system to defer memory fetches longer, and the memory will enter
  348. * self refresh.
  349. *
  350. * Note that enabling CxSR does not guarantee that the system enter
  351. * this special mode, nor does it guarantee that the system stays
  352. * in that mode once entered. So this just allows/disallows the system
  353. * to autonomously utilize the CxSR mode. Other factors such as core
  354. * C-states will affect when/if the system actually enters/exits the
  355. * CxSR mode.
  356. *
  357. * Note that on VLV/CHV this actually only controls the max FIFO mode,
  358. * and the system is free to enter/exit memory self refresh at any time
  359. * even when the use of CxSR has been disallowed.
  360. *
  361. * While the system is actually in the CxSR/max FIFO mode, some plane
  362. * control registers will not get latched on vblank. Thus in order to
  363. * guarantee the system will respond to changes in the plane registers
  364. * we must always disallow CxSR prior to making changes to those registers.
  365. * Unfortunately the system will re-evaluate the CxSR conditions at
  366. * frame start which happens after vblank start (which is when the plane
  367. * registers would get latched), so we can't proceed with the plane update
  368. * during the same frame where we disallowed CxSR.
  369. *
  370. * Certain platforms also have a deeper HPLL SR mode. Fortunately the
  371. * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
  372. * the hardware w.r.t. HPLL SR when writing to plane registers.
  373. * Disallowing just CxSR is sufficient.
  374. */
  375. bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
  376. {
  377. bool ret;
  378. mutex_lock(&dev_priv->display.wm.wm_mutex);
  379. ret = _intel_set_memory_cxsr(dev_priv, enable);
  380. if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
  381. dev_priv->display.wm.vlv.cxsr = enable;
  382. else if (IS_G4X(dev_priv))
  383. dev_priv->display.wm.g4x.cxsr = enable;
  384. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  385. return ret;
  386. }
  387. /*
  388. * Latency for FIFO fetches is dependent on several factors:
  389. * - memory configuration (speed, channels)
  390. * - chipset
  391. * - current MCH state
  392. * It can be fairly high in some situations, so here we assume a fairly
  393. * pessimal value. It's a tradeoff between extra memory fetches (if we
  394. * set this value too high, the FIFO will fetch frequently to stay full)
  395. * and power consumption (set it too low to save power and we might see
  396. * FIFO underruns and display "flicker").
  397. *
  398. * A value of 5us seems to be a good balance; safe for very low end
  399. * platforms but not overly aggressive on lower latency configs.
  400. */
  401. static const int pessimal_latency_ns = 5000;
  402. #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
  403. ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
  404. static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
  405. {
  406. struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
  407. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  408. struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
  409. enum pipe pipe = crtc->pipe;
  410. int sprite0_start, sprite1_start;
  411. u32 dsparb, dsparb2, dsparb3;
  412. switch (pipe) {
  413. case PIPE_A:
  414. dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
  415. dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
  416. sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
  417. sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
  418. break;
  419. case PIPE_B:
  420. dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
  421. dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
  422. sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
  423. sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
  424. break;
  425. case PIPE_C:
  426. dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
  427. dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
  428. sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
  429. sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
  430. break;
  431. default:
  432. MISSING_CASE(pipe);
  433. return;
  434. }
  435. fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
  436. fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
  437. fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
  438. fifo_state->plane[PLANE_CURSOR] = 63;
  439. }
  440. static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
  441. enum i9xx_plane_id i9xx_plane)
  442. {
  443. u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
  444. int size;
  445. size = dsparb & 0x7f;
  446. if (i9xx_plane == PLANE_B)
  447. size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
  448. drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
  449. dsparb, plane_name(i9xx_plane), size);
  450. return size;
  451. }
  452. static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
  453. enum i9xx_plane_id i9xx_plane)
  454. {
  455. u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
  456. int size;
  457. size = dsparb & 0x1ff;
  458. if (i9xx_plane == PLANE_B)
  459. size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
  460. size >>= 1; /* Convert to cachelines */
  461. drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
  462. dsparb, plane_name(i9xx_plane), size);
  463. return size;
  464. }
  465. static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
  466. enum i9xx_plane_id i9xx_plane)
  467. {
  468. u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB);
  469. int size;
  470. size = dsparb & 0x7f;
  471. size >>= 2; /* Convert to cachelines */
  472. drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
  473. dsparb, plane_name(i9xx_plane), size);
  474. return size;
  475. }
  476. /* Pineview has different values for various configs */
  477. static const struct intel_watermark_params pnv_display_wm = {
  478. .fifo_size = PINEVIEW_DISPLAY_FIFO,
  479. .max_wm = PINEVIEW_MAX_WM,
  480. .default_wm = PINEVIEW_DFT_WM,
  481. .guard_size = PINEVIEW_GUARD_WM,
  482. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  483. };
  484. static const struct intel_watermark_params pnv_display_hplloff_wm = {
  485. .fifo_size = PINEVIEW_DISPLAY_FIFO,
  486. .max_wm = PINEVIEW_MAX_WM,
  487. .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
  488. .guard_size = PINEVIEW_GUARD_WM,
  489. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  490. };
  491. static const struct intel_watermark_params pnv_cursor_wm = {
  492. .fifo_size = PINEVIEW_CURSOR_FIFO,
  493. .max_wm = PINEVIEW_CURSOR_MAX_WM,
  494. .default_wm = PINEVIEW_CURSOR_DFT_WM,
  495. .guard_size = PINEVIEW_CURSOR_GUARD_WM,
  496. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  497. };
  498. static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
  499. .fifo_size = PINEVIEW_CURSOR_FIFO,
  500. .max_wm = PINEVIEW_CURSOR_MAX_WM,
  501. .default_wm = PINEVIEW_CURSOR_DFT_WM,
  502. .guard_size = PINEVIEW_CURSOR_GUARD_WM,
  503. .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  504. };
  505. static const struct intel_watermark_params i965_cursor_wm_info = {
  506. .fifo_size = I965_CURSOR_FIFO,
  507. .max_wm = I965_CURSOR_MAX_WM,
  508. .default_wm = I965_CURSOR_DFT_WM,
  509. .guard_size = 2,
  510. .cacheline_size = I915_FIFO_LINE_SIZE,
  511. };
  512. static const struct intel_watermark_params i945_wm_info = {
  513. .fifo_size = I945_FIFO_SIZE,
  514. .max_wm = I915_MAX_WM,
  515. .default_wm = 1,
  516. .guard_size = 2,
  517. .cacheline_size = I915_FIFO_LINE_SIZE,
  518. };
  519. static const struct intel_watermark_params i915_wm_info = {
  520. .fifo_size = I915_FIFO_SIZE,
  521. .max_wm = I915_MAX_WM,
  522. .default_wm = 1,
  523. .guard_size = 2,
  524. .cacheline_size = I915_FIFO_LINE_SIZE,
  525. };
  526. static const struct intel_watermark_params i830_a_wm_info = {
  527. .fifo_size = I855GM_FIFO_SIZE,
  528. .max_wm = I915_MAX_WM,
  529. .default_wm = 1,
  530. .guard_size = 2,
  531. .cacheline_size = I830_FIFO_LINE_SIZE,
  532. };
  533. static const struct intel_watermark_params i830_bc_wm_info = {
  534. .fifo_size = I855GM_FIFO_SIZE,
  535. .max_wm = I915_MAX_WM/2,
  536. .default_wm = 1,
  537. .guard_size = 2,
  538. .cacheline_size = I830_FIFO_LINE_SIZE,
  539. };
  540. static const struct intel_watermark_params i845_wm_info = {
  541. .fifo_size = I830_FIFO_SIZE,
  542. .max_wm = I915_MAX_WM,
  543. .default_wm = 1,
  544. .guard_size = 2,
  545. .cacheline_size = I830_FIFO_LINE_SIZE,
  546. };
  547. /**
  548. * intel_wm_method1 - Method 1 / "small buffer" watermark formula
  549. * @pixel_rate: Pipe pixel rate in kHz
  550. * @cpp: Plane bytes per pixel
  551. * @latency: Memory wakeup latency in 0.1us units
  552. *
  553. * Compute the watermark using the method 1 or "small buffer"
  554. * formula. The caller may additonally add extra cachelines
  555. * to account for TLB misses and clock crossings.
  556. *
  557. * This method is concerned with the short term drain rate
  558. * of the FIFO, ie. it does not account for blanking periods
  559. * which would effectively reduce the average drain rate across
  560. * a longer period. The name "small" refers to the fact the
  561. * FIFO is relatively small compared to the amount of data
  562. * fetched.
  563. *
  564. * The FIFO level vs. time graph might look something like:
  565. *
  566. * |\ |\
  567. * | \ | \
  568. * __---__---__ (- plane active, _ blanking)
  569. * -> time
  570. *
  571. * or perhaps like this:
  572. *
  573. * |\|\ |\|\
  574. * __----__----__ (- plane active, _ blanking)
  575. * -> time
  576. *
  577. * Returns:
  578. * The watermark in bytes
  579. */
  580. static unsigned int intel_wm_method1(unsigned int pixel_rate,
  581. unsigned int cpp,
  582. unsigned int latency)
  583. {
  584. u64 ret;
  585. ret = mul_u32_u32(pixel_rate, cpp * latency);
  586. ret = DIV_ROUND_UP_ULL(ret, 10000);
  587. return ret;
  588. }
  589. /**
  590. * intel_wm_method2 - Method 2 / "large buffer" watermark formula
  591. * @pixel_rate: Pipe pixel rate in kHz
  592. * @htotal: Pipe horizontal total
  593. * @width: Plane width in pixels
  594. * @cpp: Plane bytes per pixel
  595. * @latency: Memory wakeup latency in 0.1us units
  596. *
  597. * Compute the watermark using the method 2 or "large buffer"
  598. * formula. The caller may additonally add extra cachelines
  599. * to account for TLB misses and clock crossings.
  600. *
  601. * This method is concerned with the long term drain rate
  602. * of the FIFO, ie. it does account for blanking periods
  603. * which effectively reduce the average drain rate across
  604. * a longer period. The name "large" refers to the fact the
  605. * FIFO is relatively large compared to the amount of data
  606. * fetched.
  607. *
  608. * The FIFO level vs. time graph might look something like:
  609. *
  610. * |\___ |\___
  611. * | \___ | \___
  612. * | \ | \
  613. * __ --__--__--__--__--__--__ (- plane active, _ blanking)
  614. * -> time
  615. *
  616. * Returns:
  617. * The watermark in bytes
  618. */
  619. static unsigned int intel_wm_method2(unsigned int pixel_rate,
  620. unsigned int htotal,
  621. unsigned int width,
  622. unsigned int cpp,
  623. unsigned int latency)
  624. {
  625. unsigned int ret;
  626. /*
  627. * FIXME remove once all users are computing
  628. * watermarks in the correct place.
  629. */
  630. if (WARN_ON_ONCE(htotal == 0))
  631. htotal = 1;
  632. ret = (latency * pixel_rate) / (htotal * 10000);
  633. ret = (ret + 1) * width * cpp;
  634. return ret;
  635. }
  636. /**
  637. * intel_calculate_wm - calculate watermark level
  638. * @pixel_rate: pixel clock
  639. * @wm: chip FIFO params
  640. * @fifo_size: size of the FIFO buffer
  641. * @cpp: bytes per pixel
  642. * @latency_ns: memory latency for the platform
  643. *
  644. * Calculate the watermark level (the level at which the display plane will
  645. * start fetching from memory again). Each chip has a different display
  646. * FIFO size and allocation, so the caller needs to figure that out and pass
  647. * in the correct intel_watermark_params structure.
  648. *
  649. * As the pixel clock runs, the FIFO will be drained at a rate that depends
  650. * on the pixel size. When it reaches the watermark level, it'll start
  651. * fetching FIFO line sized based chunks from memory until the FIFO fills
  652. * past the watermark point. If the FIFO drains completely, a FIFO underrun
  653. * will occur, and a display engine hang could result.
  654. */
  655. static unsigned int intel_calculate_wm(int pixel_rate,
  656. const struct intel_watermark_params *wm,
  657. int fifo_size, int cpp,
  658. unsigned int latency_ns)
  659. {
  660. int entries, wm_size;
  661. /*
  662. * Note: we need to make sure we don't overflow for various clock &
  663. * latency values.
  664. * clocks go from a few thousand to several hundred thousand.
  665. * latency is usually a few thousand
  666. */
  667. entries = intel_wm_method1(pixel_rate, cpp,
  668. latency_ns / 100);
  669. entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
  670. wm->guard_size;
  671. DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
  672. wm_size = fifo_size - entries;
  673. DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
  674. /* Don't promote wm_size to unsigned... */
  675. if (wm_size > wm->max_wm)
  676. wm_size = wm->max_wm;
  677. if (wm_size <= 0)
  678. wm_size = wm->default_wm;
  679. /*
  680. * Bspec seems to indicate that the value shouldn't be lower than
  681. * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
  682. * Lets go for 8 which is the burst size since certain platforms
  683. * already use a hardcoded 8 (which is what the spec says should be
  684. * done).
  685. */
  686. if (wm_size <= 8)
  687. wm_size = 8;
  688. return wm_size;
  689. }
  690. static bool is_disabling(int old, int new, int threshold)
  691. {
  692. return old >= threshold && new < threshold;
  693. }
  694. static bool is_enabling(int old, int new, int threshold)
  695. {
  696. return old < threshold && new >= threshold;
  697. }
  698. static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
  699. {
  700. return dev_priv->display.wm.max_level + 1;
  701. }
  702. bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
  703. const struct intel_plane_state *plane_state)
  704. {
  705. struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
  706. /* FIXME check the 'enable' instead */
  707. if (!crtc_state->hw.active)
  708. return false;
  709. /*
  710. * Treat cursor with fb as always visible since cursor updates
  711. * can happen faster than the vrefresh rate, and the current
  712. * watermark code doesn't handle that correctly. Cursor updates
  713. * which set/clear the fb or change the cursor size are going
  714. * to get throttled by intel_legacy_cursor_update() to work
  715. * around this problem with the watermark code.
  716. */
  717. if (plane->id == PLANE_CURSOR)
  718. return plane_state->hw.fb != NULL;
  719. else
  720. return plane_state->uapi.visible;
  721. }
  722. static bool intel_crtc_active(struct intel_crtc *crtc)
  723. {
  724. /* Be paranoid as we can arrive here with only partial
  725. * state retrieved from the hardware during setup.
  726. *
  727. * We can ditch the adjusted_mode.crtc_clock check as soon
  728. * as Haswell has gained clock readout/fastboot support.
  729. *
  730. * We can ditch the crtc->primary->state->fb check as soon as we can
  731. * properly reconstruct framebuffers.
  732. *
  733. * FIXME: The intel_crtc->active here should be switched to
  734. * crtc->state->active once we have proper CRTC states wired up
  735. * for atomic.
  736. */
  737. return crtc && crtc->active && crtc->base.primary->state->fb &&
  738. crtc->config->hw.adjusted_mode.crtc_clock;
  739. }
  740. static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
  741. {
  742. struct intel_crtc *crtc, *enabled = NULL;
  743. for_each_intel_crtc(&dev_priv->drm, crtc) {
  744. if (intel_crtc_active(crtc)) {
  745. if (enabled)
  746. return NULL;
  747. enabled = crtc;
  748. }
  749. }
  750. return enabled;
  751. }
  752. static void pnv_update_wm(struct drm_i915_private *dev_priv)
  753. {
  754. struct intel_crtc *crtc;
  755. const struct cxsr_latency *latency;
  756. u32 reg;
  757. unsigned int wm;
  758. latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
  759. dev_priv->is_ddr3,
  760. dev_priv->fsb_freq,
  761. dev_priv->mem_freq);
  762. if (!latency) {
  763. drm_dbg_kms(&dev_priv->drm,
  764. "Unknown FSB/MEM found, disable CxSR\n");
  765. intel_set_memory_cxsr(dev_priv, false);
  766. return;
  767. }
  768. crtc = single_enabled_crtc(dev_priv);
  769. if (crtc) {
  770. const struct drm_framebuffer *fb =
  771. crtc->base.primary->state->fb;
  772. int pixel_rate = crtc->config->pixel_rate;
  773. int cpp = fb->format->cpp[0];
  774. /* Display SR */
  775. wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
  776. pnv_display_wm.fifo_size,
  777. cpp, latency->display_sr);
  778. reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
  779. reg &= ~DSPFW_SR_MASK;
  780. reg |= FW_WM(wm, SR);
  781. intel_uncore_write(&dev_priv->uncore, DSPFW1, reg);
  782. drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
  783. /* cursor SR */
  784. wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
  785. pnv_display_wm.fifo_size,
  786. 4, latency->cursor_sr);
  787. reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  788. reg &= ~DSPFW_CURSOR_SR_MASK;
  789. reg |= FW_WM(wm, CURSOR_SR);
  790. intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
  791. /* Display HPLL off SR */
  792. wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
  793. pnv_display_hplloff_wm.fifo_size,
  794. cpp, latency->display_hpll_disable);
  795. reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  796. reg &= ~DSPFW_HPLL_SR_MASK;
  797. reg |= FW_WM(wm, HPLL_SR);
  798. intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
  799. /* cursor HPLL off SR */
  800. wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
  801. pnv_display_hplloff_wm.fifo_size,
  802. 4, latency->cursor_hpll_disable);
  803. reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  804. reg &= ~DSPFW_HPLL_CURSOR_MASK;
  805. reg |= FW_WM(wm, HPLL_CURSOR);
  806. intel_uncore_write(&dev_priv->uncore, DSPFW3, reg);
  807. drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
  808. intel_set_memory_cxsr(dev_priv, true);
  809. } else {
  810. intel_set_memory_cxsr(dev_priv, false);
  811. }
  812. }
  813. /*
  814. * Documentation says:
  815. * "If the line size is small, the TLB fetches can get in the way of the
  816. * data fetches, causing some lag in the pixel data return which is not
  817. * accounted for in the above formulas. The following adjustment only
  818. * needs to be applied if eight whole lines fit in the buffer at once.
  819. * The WM is adjusted upwards by the difference between the FIFO size
  820. * and the size of 8 whole lines. This adjustment is always performed
  821. * in the actual pixel depth regardless of whether FBC is enabled or not."
  822. */
  823. static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
  824. {
  825. int tlb_miss = fifo_size * 64 - width * cpp * 8;
  826. return max(0, tlb_miss);
  827. }
  828. static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
  829. const struct g4x_wm_values *wm)
  830. {
  831. enum pipe pipe;
  832. for_each_pipe(dev_priv, pipe)
  833. trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
  834. intel_uncore_write(&dev_priv->uncore, DSPFW1,
  835. FW_WM(wm->sr.plane, SR) |
  836. FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
  837. FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
  838. FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
  839. intel_uncore_write(&dev_priv->uncore, DSPFW2,
  840. (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
  841. FW_WM(wm->sr.fbc, FBC_SR) |
  842. FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
  843. FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
  844. FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
  845. FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
  846. intel_uncore_write(&dev_priv->uncore, DSPFW3,
  847. (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
  848. FW_WM(wm->sr.cursor, CURSOR_SR) |
  849. FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
  850. FW_WM(wm->hpll.plane, HPLL_SR));
  851. intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
  852. }
  853. #define FW_WM_VLV(value, plane) \
  854. (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
  855. static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
  856. const struct vlv_wm_values *wm)
  857. {
  858. enum pipe pipe;
  859. for_each_pipe(dev_priv, pipe) {
  860. trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm);
  861. intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
  862. (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
  863. (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
  864. (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
  865. (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
  866. }
  867. /*
  868. * Zero the (unused) WM1 watermarks, and also clear all the
  869. * high order bits so that there are no out of bounds values
  870. * present in the registers during the reprogramming.
  871. */
  872. intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
  873. intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
  874. intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
  875. intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
  876. intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
  877. intel_uncore_write(&dev_priv->uncore, DSPFW1,
  878. FW_WM(wm->sr.plane, SR) |
  879. FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
  880. FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
  881. FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
  882. intel_uncore_write(&dev_priv->uncore, DSPFW2,
  883. FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
  884. FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
  885. FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
  886. intel_uncore_write(&dev_priv->uncore, DSPFW3,
  887. FW_WM(wm->sr.cursor, CURSOR_SR));
  888. if (IS_CHERRYVIEW(dev_priv)) {
  889. intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
  890. FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
  891. FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
  892. intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
  893. FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
  894. FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
  895. intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
  896. FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
  897. FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
  898. intel_uncore_write(&dev_priv->uncore, DSPHOWM,
  899. FW_WM(wm->sr.plane >> 9, SR_HI) |
  900. FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
  901. FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
  902. FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
  903. FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
  904. FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
  905. FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
  906. FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
  907. FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
  908. FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
  909. } else {
  910. intel_uncore_write(&dev_priv->uncore, DSPFW7,
  911. FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
  912. FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
  913. intel_uncore_write(&dev_priv->uncore, DSPHOWM,
  914. FW_WM(wm->sr.plane >> 9, SR_HI) |
  915. FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
  916. FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
  917. FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
  918. FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
  919. FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
  920. FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
  921. }
  922. intel_uncore_posting_read(&dev_priv->uncore, DSPFW1);
  923. }
  924. #undef FW_WM_VLV
  925. static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
  926. {
  927. /* all latencies in usec */
  928. dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
  929. dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
  930. dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
  931. dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL;
  932. }
  933. static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
  934. {
  935. /*
  936. * DSPCNTR[13] supposedly controls whether the
  937. * primary plane can use the FIFO space otherwise
  938. * reserved for the sprite plane. It's not 100% clear
  939. * what the actual FIFO size is, but it looks like we
  940. * can happily set both primary and sprite watermarks
  941. * up to 127 cachelines. So that would seem to mean
  942. * that either DSPCNTR[13] doesn't do anything, or that
  943. * the total FIFO is >= 256 cachelines in size. Either
  944. * way, we don't seem to have to worry about this
  945. * repartitioning as the maximum watermark value the
  946. * register can hold for each plane is lower than the
  947. * minimum FIFO size.
  948. */
  949. switch (plane_id) {
  950. case PLANE_CURSOR:
  951. return 63;
  952. case PLANE_PRIMARY:
  953. return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
  954. case PLANE_SPRITE0:
  955. return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
  956. default:
  957. MISSING_CASE(plane_id);
  958. return 0;
  959. }
  960. }
  961. static int g4x_fbc_fifo_size(int level)
  962. {
  963. switch (level) {
  964. case G4X_WM_LEVEL_SR:
  965. return 7;
  966. case G4X_WM_LEVEL_HPLL:
  967. return 15;
  968. default:
  969. MISSING_CASE(level);
  970. return 0;
  971. }
  972. }
  973. static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
  974. const struct intel_plane_state *plane_state,
  975. int level)
  976. {
  977. struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
  978. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  979. const struct drm_display_mode *pipe_mode =
  980. &crtc_state->hw.pipe_mode;
  981. unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
  982. unsigned int pixel_rate, htotal, cpp, width, wm;
  983. if (latency == 0)
  984. return USHRT_MAX;
  985. if (!intel_wm_plane_visible(crtc_state, plane_state))
  986. return 0;
  987. cpp = plane_state->hw.fb->format->cpp[0];
  988. /*
  989. * WaUse32BppForSRWM:ctg,elk
  990. *
  991. * The spec fails to list this restriction for the
  992. * HPLL watermark, which seems a little strange.
  993. * Let's use 32bpp for the HPLL watermark as well.
  994. */
  995. if (plane->id == PLANE_PRIMARY &&
  996. level != G4X_WM_LEVEL_NORMAL)
  997. cpp = max(cpp, 4u);
  998. pixel_rate = crtc_state->pixel_rate;
  999. htotal = pipe_mode->crtc_htotal;
  1000. width = drm_rect_width(&plane_state->uapi.src) >> 16;
  1001. if (plane->id == PLANE_CURSOR) {
  1002. wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
  1003. } else if (plane->id == PLANE_PRIMARY &&
  1004. level == G4X_WM_LEVEL_NORMAL) {
  1005. wm = intel_wm_method1(pixel_rate, cpp, latency);
  1006. } else {
  1007. unsigned int small, large;
  1008. small = intel_wm_method1(pixel_rate, cpp, latency);
  1009. large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
  1010. wm = min(small, large);
  1011. }
  1012. wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
  1013. width, cpp);
  1014. wm = DIV_ROUND_UP(wm, 64) + 2;
  1015. return min_t(unsigned int, wm, USHRT_MAX);
  1016. }
  1017. static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
  1018. int level, enum plane_id plane_id, u16 value)
  1019. {
  1020. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1021. bool dirty = false;
  1022. for (; level < intel_wm_num_levels(dev_priv); level++) {
  1023. struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
  1024. dirty |= raw->plane[plane_id] != value;
  1025. raw->plane[plane_id] = value;
  1026. }
  1027. return dirty;
  1028. }
  1029. static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
  1030. int level, u16 value)
  1031. {
  1032. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1033. bool dirty = false;
  1034. /* NORMAL level doesn't have an FBC watermark */
  1035. level = max(level, G4X_WM_LEVEL_SR);
  1036. for (; level < intel_wm_num_levels(dev_priv); level++) {
  1037. struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
  1038. dirty |= raw->fbc != value;
  1039. raw->fbc = value;
  1040. }
  1041. return dirty;
  1042. }
  1043. static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
  1044. const struct intel_plane_state *plane_state,
  1045. u32 pri_val);
  1046. static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
  1047. const struct intel_plane_state *plane_state)
  1048. {
  1049. struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
  1050. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1051. int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
  1052. enum plane_id plane_id = plane->id;
  1053. bool dirty = false;
  1054. int level;
  1055. if (!intel_wm_plane_visible(crtc_state, plane_state)) {
  1056. dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
  1057. if (plane_id == PLANE_PRIMARY)
  1058. dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
  1059. goto out;
  1060. }
  1061. for (level = 0; level < num_levels; level++) {
  1062. struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
  1063. int wm, max_wm;
  1064. wm = g4x_compute_wm(crtc_state, plane_state, level);
  1065. max_wm = g4x_plane_fifo_size(plane_id, level);
  1066. if (wm > max_wm)
  1067. break;
  1068. dirty |= raw->plane[plane_id] != wm;
  1069. raw->plane[plane_id] = wm;
  1070. if (plane_id != PLANE_PRIMARY ||
  1071. level == G4X_WM_LEVEL_NORMAL)
  1072. continue;
  1073. wm = ilk_compute_fbc_wm(crtc_state, plane_state,
  1074. raw->plane[plane_id]);
  1075. max_wm = g4x_fbc_fifo_size(level);
  1076. /*
  1077. * FBC wm is not mandatory as we
  1078. * can always just disable its use.
  1079. */
  1080. if (wm > max_wm)
  1081. wm = USHRT_MAX;
  1082. dirty |= raw->fbc != wm;
  1083. raw->fbc = wm;
  1084. }
  1085. /* mark watermarks as invalid */
  1086. dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
  1087. if (plane_id == PLANE_PRIMARY)
  1088. dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
  1089. out:
  1090. if (dirty) {
  1091. drm_dbg_kms(&dev_priv->drm,
  1092. "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
  1093. plane->base.name,
  1094. crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
  1095. crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
  1096. crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
  1097. if (plane_id == PLANE_PRIMARY)
  1098. drm_dbg_kms(&dev_priv->drm,
  1099. "FBC watermarks: SR=%d, HPLL=%d\n",
  1100. crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
  1101. crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
  1102. }
  1103. return dirty;
  1104. }
  1105. static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
  1106. enum plane_id plane_id, int level)
  1107. {
  1108. const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
  1109. return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
  1110. }
  1111. static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
  1112. int level)
  1113. {
  1114. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1115. if (level > dev_priv->display.wm.max_level)
  1116. return false;
  1117. return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
  1118. g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
  1119. g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
  1120. }
  1121. /* mark all levels starting from 'level' as invalid */
  1122. static void g4x_invalidate_wms(struct intel_crtc *crtc,
  1123. struct g4x_wm_state *wm_state, int level)
  1124. {
  1125. if (level <= G4X_WM_LEVEL_NORMAL) {
  1126. enum plane_id plane_id;
  1127. for_each_plane_id_on_crtc(crtc, plane_id)
  1128. wm_state->wm.plane[plane_id] = USHRT_MAX;
  1129. }
  1130. if (level <= G4X_WM_LEVEL_SR) {
  1131. wm_state->cxsr = false;
  1132. wm_state->sr.cursor = USHRT_MAX;
  1133. wm_state->sr.plane = USHRT_MAX;
  1134. wm_state->sr.fbc = USHRT_MAX;
  1135. }
  1136. if (level <= G4X_WM_LEVEL_HPLL) {
  1137. wm_state->hpll_en = false;
  1138. wm_state->hpll.cursor = USHRT_MAX;
  1139. wm_state->hpll.plane = USHRT_MAX;
  1140. wm_state->hpll.fbc = USHRT_MAX;
  1141. }
  1142. }
  1143. static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
  1144. int level)
  1145. {
  1146. if (level < G4X_WM_LEVEL_SR)
  1147. return false;
  1148. if (level >= G4X_WM_LEVEL_SR &&
  1149. wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
  1150. return false;
  1151. if (level >= G4X_WM_LEVEL_HPLL &&
  1152. wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
  1153. return false;
  1154. return true;
  1155. }
  1156. static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
  1157. struct intel_crtc *crtc)
  1158. {
  1159. struct intel_crtc_state *crtc_state =
  1160. intel_atomic_get_new_crtc_state(state, crtc);
  1161. struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
  1162. u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
  1163. const struct g4x_pipe_wm *raw;
  1164. const struct intel_plane_state *old_plane_state;
  1165. const struct intel_plane_state *new_plane_state;
  1166. struct intel_plane *plane;
  1167. enum plane_id plane_id;
  1168. int i, level;
  1169. unsigned int dirty = 0;
  1170. for_each_oldnew_intel_plane_in_state(state, plane,
  1171. old_plane_state,
  1172. new_plane_state, i) {
  1173. if (new_plane_state->hw.crtc != &crtc->base &&
  1174. old_plane_state->hw.crtc != &crtc->base)
  1175. continue;
  1176. if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
  1177. dirty |= BIT(plane->id);
  1178. }
  1179. if (!dirty)
  1180. return 0;
  1181. level = G4X_WM_LEVEL_NORMAL;
  1182. if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
  1183. goto out;
  1184. raw = &crtc_state->wm.g4x.raw[level];
  1185. for_each_plane_id_on_crtc(crtc, plane_id)
  1186. wm_state->wm.plane[plane_id] = raw->plane[plane_id];
  1187. level = G4X_WM_LEVEL_SR;
  1188. if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
  1189. goto out;
  1190. raw = &crtc_state->wm.g4x.raw[level];
  1191. wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
  1192. wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
  1193. wm_state->sr.fbc = raw->fbc;
  1194. wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
  1195. level = G4X_WM_LEVEL_HPLL;
  1196. if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
  1197. goto out;
  1198. raw = &crtc_state->wm.g4x.raw[level];
  1199. wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
  1200. wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
  1201. wm_state->hpll.fbc = raw->fbc;
  1202. wm_state->hpll_en = wm_state->cxsr;
  1203. level++;
  1204. out:
  1205. if (level == G4X_WM_LEVEL_NORMAL)
  1206. return -EINVAL;
  1207. /* invalidate the higher levels */
  1208. g4x_invalidate_wms(crtc, wm_state, level);
  1209. /*
  1210. * Determine if the FBC watermark(s) can be used. IF
  1211. * this isn't the case we prefer to disable the FBC
  1212. * watermark(s) rather than disable the SR/HPLL
  1213. * level(s) entirely. 'level-1' is the highest valid
  1214. * level here.
  1215. */
  1216. wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
  1217. return 0;
  1218. }
  1219. static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
  1220. struct intel_crtc *crtc)
  1221. {
  1222. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1223. struct intel_crtc_state *new_crtc_state =
  1224. intel_atomic_get_new_crtc_state(state, crtc);
  1225. const struct intel_crtc_state *old_crtc_state =
  1226. intel_atomic_get_old_crtc_state(state, crtc);
  1227. struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
  1228. const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
  1229. const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
  1230. enum plane_id plane_id;
  1231. if (!new_crtc_state->hw.active ||
  1232. drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
  1233. *intermediate = *optimal;
  1234. intermediate->cxsr = false;
  1235. intermediate->hpll_en = false;
  1236. goto out;
  1237. }
  1238. intermediate->cxsr = optimal->cxsr && active->cxsr &&
  1239. !new_crtc_state->disable_cxsr;
  1240. intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
  1241. !new_crtc_state->disable_cxsr;
  1242. intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
  1243. for_each_plane_id_on_crtc(crtc, plane_id) {
  1244. intermediate->wm.plane[plane_id] =
  1245. max(optimal->wm.plane[plane_id],
  1246. active->wm.plane[plane_id]);
  1247. drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
  1248. g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
  1249. }
  1250. intermediate->sr.plane = max(optimal->sr.plane,
  1251. active->sr.plane);
  1252. intermediate->sr.cursor = max(optimal->sr.cursor,
  1253. active->sr.cursor);
  1254. intermediate->sr.fbc = max(optimal->sr.fbc,
  1255. active->sr.fbc);
  1256. intermediate->hpll.plane = max(optimal->hpll.plane,
  1257. active->hpll.plane);
  1258. intermediate->hpll.cursor = max(optimal->hpll.cursor,
  1259. active->hpll.cursor);
  1260. intermediate->hpll.fbc = max(optimal->hpll.fbc,
  1261. active->hpll.fbc);
  1262. drm_WARN_ON(&dev_priv->drm,
  1263. (intermediate->sr.plane >
  1264. g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
  1265. intermediate->sr.cursor >
  1266. g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
  1267. intermediate->cxsr);
  1268. drm_WARN_ON(&dev_priv->drm,
  1269. (intermediate->sr.plane >
  1270. g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
  1271. intermediate->sr.cursor >
  1272. g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
  1273. intermediate->hpll_en);
  1274. drm_WARN_ON(&dev_priv->drm,
  1275. intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
  1276. intermediate->fbc_en && intermediate->cxsr);
  1277. drm_WARN_ON(&dev_priv->drm,
  1278. intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
  1279. intermediate->fbc_en && intermediate->hpll_en);
  1280. out:
  1281. /*
  1282. * If our intermediate WM are identical to the final WM, then we can
  1283. * omit the post-vblank programming; only update if it's different.
  1284. */
  1285. if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
  1286. new_crtc_state->wm.need_postvbl_update = true;
  1287. return 0;
  1288. }
  1289. static void g4x_merge_wm(struct drm_i915_private *dev_priv,
  1290. struct g4x_wm_values *wm)
  1291. {
  1292. struct intel_crtc *crtc;
  1293. int num_active_pipes = 0;
  1294. wm->cxsr = true;
  1295. wm->hpll_en = true;
  1296. wm->fbc_en = true;
  1297. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1298. const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
  1299. if (!crtc->active)
  1300. continue;
  1301. if (!wm_state->cxsr)
  1302. wm->cxsr = false;
  1303. if (!wm_state->hpll_en)
  1304. wm->hpll_en = false;
  1305. if (!wm_state->fbc_en)
  1306. wm->fbc_en = false;
  1307. num_active_pipes++;
  1308. }
  1309. if (num_active_pipes != 1) {
  1310. wm->cxsr = false;
  1311. wm->hpll_en = false;
  1312. wm->fbc_en = false;
  1313. }
  1314. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1315. const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
  1316. enum pipe pipe = crtc->pipe;
  1317. wm->pipe[pipe] = wm_state->wm;
  1318. if (crtc->active && wm->cxsr)
  1319. wm->sr = wm_state->sr;
  1320. if (crtc->active && wm->hpll_en)
  1321. wm->hpll = wm_state->hpll;
  1322. }
  1323. }
  1324. static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
  1325. {
  1326. struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
  1327. struct g4x_wm_values new_wm = {};
  1328. g4x_merge_wm(dev_priv, &new_wm);
  1329. if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
  1330. return;
  1331. if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
  1332. _intel_set_memory_cxsr(dev_priv, false);
  1333. g4x_write_wm_values(dev_priv, &new_wm);
  1334. if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
  1335. _intel_set_memory_cxsr(dev_priv, true);
  1336. *old_wm = new_wm;
  1337. }
  1338. static void g4x_initial_watermarks(struct intel_atomic_state *state,
  1339. struct intel_crtc *crtc)
  1340. {
  1341. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1342. const struct intel_crtc_state *crtc_state =
  1343. intel_atomic_get_new_crtc_state(state, crtc);
  1344. mutex_lock(&dev_priv->display.wm.wm_mutex);
  1345. crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
  1346. g4x_program_watermarks(dev_priv);
  1347. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  1348. }
  1349. static void g4x_optimize_watermarks(struct intel_atomic_state *state,
  1350. struct intel_crtc *crtc)
  1351. {
  1352. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1353. const struct intel_crtc_state *crtc_state =
  1354. intel_atomic_get_new_crtc_state(state, crtc);
  1355. if (!crtc_state->wm.need_postvbl_update)
  1356. return;
  1357. mutex_lock(&dev_priv->display.wm.wm_mutex);
  1358. crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
  1359. g4x_program_watermarks(dev_priv);
  1360. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  1361. }
  1362. /* latency must be in 0.1us units. */
  1363. static unsigned int vlv_wm_method2(unsigned int pixel_rate,
  1364. unsigned int htotal,
  1365. unsigned int width,
  1366. unsigned int cpp,
  1367. unsigned int latency)
  1368. {
  1369. unsigned int ret;
  1370. ret = intel_wm_method2(pixel_rate, htotal,
  1371. width, cpp, latency);
  1372. ret = DIV_ROUND_UP(ret, 64);
  1373. return ret;
  1374. }
  1375. static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
  1376. {
  1377. /* all latencies in usec */
  1378. dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
  1379. dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2;
  1380. if (IS_CHERRYVIEW(dev_priv)) {
  1381. dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
  1382. dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
  1383. dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
  1384. }
  1385. }
  1386. static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
  1387. const struct intel_plane_state *plane_state,
  1388. int level)
  1389. {
  1390. struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
  1391. struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
  1392. const struct drm_display_mode *pipe_mode =
  1393. &crtc_state->hw.pipe_mode;
  1394. unsigned int pixel_rate, htotal, cpp, width, wm;
  1395. if (dev_priv->display.wm.pri_latency[level] == 0)
  1396. return USHRT_MAX;
  1397. if (!intel_wm_plane_visible(crtc_state, plane_state))
  1398. return 0;
  1399. cpp = plane_state->hw.fb->format->cpp[0];
  1400. pixel_rate = crtc_state->pixel_rate;
  1401. htotal = pipe_mode->crtc_htotal;
  1402. width = drm_rect_width(&plane_state->uapi.src) >> 16;
  1403. if (plane->id == PLANE_CURSOR) {
  1404. /*
  1405. * FIXME the formula gives values that are
  1406. * too big for the cursor FIFO, and hence we
  1407. * would never be able to use cursors. For
  1408. * now just hardcode the watermark.
  1409. */
  1410. wm = 63;
  1411. } else {
  1412. wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
  1413. dev_priv->display.wm.pri_latency[level] * 10);
  1414. }
  1415. return min_t(unsigned int, wm, USHRT_MAX);
  1416. }
  1417. static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
  1418. {
  1419. return (active_planes & (BIT(PLANE_SPRITE0) |
  1420. BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
  1421. }
  1422. static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
  1423. {
  1424. struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
  1425. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1426. const struct g4x_pipe_wm *raw =
  1427. &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
  1428. struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
  1429. u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
  1430. int num_active_planes = hweight8(active_planes);
  1431. const int fifo_size = 511;
  1432. int fifo_extra, fifo_left = fifo_size;
  1433. int sprite0_fifo_extra = 0;
  1434. unsigned int total_rate;
  1435. enum plane_id plane_id;
  1436. /*
  1437. * When enabling sprite0 after sprite1 has already been enabled
  1438. * we tend to get an underrun unless sprite0 already has some
  1439. * FIFO space allcoated. Hence we always allocate at least one
  1440. * cacheline for sprite0 whenever sprite1 is enabled.
  1441. *
  1442. * All other plane enable sequences appear immune to this problem.
  1443. */
  1444. if (vlv_need_sprite0_fifo_workaround(active_planes))
  1445. sprite0_fifo_extra = 1;
  1446. total_rate = raw->plane[PLANE_PRIMARY] +
  1447. raw->plane[PLANE_SPRITE0] +
  1448. raw->plane[PLANE_SPRITE1] +
  1449. sprite0_fifo_extra;
  1450. if (total_rate > fifo_size)
  1451. return -EINVAL;
  1452. if (total_rate == 0)
  1453. total_rate = 1;
  1454. for_each_plane_id_on_crtc(crtc, plane_id) {
  1455. unsigned int rate;
  1456. if ((active_planes & BIT(plane_id)) == 0) {
  1457. fifo_state->plane[plane_id] = 0;
  1458. continue;
  1459. }
  1460. rate = raw->plane[plane_id];
  1461. fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
  1462. fifo_left -= fifo_state->plane[plane_id];
  1463. }
  1464. fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
  1465. fifo_left -= sprite0_fifo_extra;
  1466. fifo_state->plane[PLANE_CURSOR] = 63;
  1467. fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
  1468. /* spread the remainder evenly */
  1469. for_each_plane_id_on_crtc(crtc, plane_id) {
  1470. int plane_extra;
  1471. if (fifo_left == 0)
  1472. break;
  1473. if ((active_planes & BIT(plane_id)) == 0)
  1474. continue;
  1475. plane_extra = min(fifo_extra, fifo_left);
  1476. fifo_state->plane[plane_id] += plane_extra;
  1477. fifo_left -= plane_extra;
  1478. }
  1479. drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
  1480. /* give it all to the first plane if none are active */
  1481. if (active_planes == 0) {
  1482. drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
  1483. fifo_state->plane[PLANE_PRIMARY] = fifo_left;
  1484. }
  1485. return 0;
  1486. }
  1487. /* mark all levels starting from 'level' as invalid */
  1488. static void vlv_invalidate_wms(struct intel_crtc *crtc,
  1489. struct vlv_wm_state *wm_state, int level)
  1490. {
  1491. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1492. for (; level < intel_wm_num_levels(dev_priv); level++) {
  1493. enum plane_id plane_id;
  1494. for_each_plane_id_on_crtc(crtc, plane_id)
  1495. wm_state->wm[level].plane[plane_id] = USHRT_MAX;
  1496. wm_state->sr[level].cursor = USHRT_MAX;
  1497. wm_state->sr[level].plane = USHRT_MAX;
  1498. }
  1499. }
  1500. static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
  1501. {
  1502. if (wm > fifo_size)
  1503. return USHRT_MAX;
  1504. else
  1505. return fifo_size - wm;
  1506. }
  1507. /*
  1508. * Starting from 'level' set all higher
  1509. * levels to 'value' in the "raw" watermarks.
  1510. */
  1511. static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
  1512. int level, enum plane_id plane_id, u16 value)
  1513. {
  1514. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1515. int num_levels = intel_wm_num_levels(dev_priv);
  1516. bool dirty = false;
  1517. for (; level < num_levels; level++) {
  1518. struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
  1519. dirty |= raw->plane[plane_id] != value;
  1520. raw->plane[plane_id] = value;
  1521. }
  1522. return dirty;
  1523. }
  1524. static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
  1525. const struct intel_plane_state *plane_state)
  1526. {
  1527. struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
  1528. struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
  1529. enum plane_id plane_id = plane->id;
  1530. int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
  1531. int level;
  1532. bool dirty = false;
  1533. if (!intel_wm_plane_visible(crtc_state, plane_state)) {
  1534. dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
  1535. goto out;
  1536. }
  1537. for (level = 0; level < num_levels; level++) {
  1538. struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
  1539. int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
  1540. int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
  1541. if (wm > max_wm)
  1542. break;
  1543. dirty |= raw->plane[plane_id] != wm;
  1544. raw->plane[plane_id] = wm;
  1545. }
  1546. /* mark all higher levels as invalid */
  1547. dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
  1548. out:
  1549. if (dirty)
  1550. drm_dbg_kms(&dev_priv->drm,
  1551. "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
  1552. plane->base.name,
  1553. crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
  1554. crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
  1555. crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
  1556. return dirty;
  1557. }
  1558. static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
  1559. enum plane_id plane_id, int level)
  1560. {
  1561. const struct g4x_pipe_wm *raw =
  1562. &crtc_state->wm.vlv.raw[level];
  1563. const struct vlv_fifo_state *fifo_state =
  1564. &crtc_state->wm.vlv.fifo_state;
  1565. return raw->plane[plane_id] <= fifo_state->plane[plane_id];
  1566. }
  1567. static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
  1568. {
  1569. return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
  1570. vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
  1571. vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
  1572. vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
  1573. }
  1574. static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
  1575. struct intel_crtc *crtc)
  1576. {
  1577. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1578. struct intel_crtc_state *crtc_state =
  1579. intel_atomic_get_new_crtc_state(state, crtc);
  1580. struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
  1581. const struct vlv_fifo_state *fifo_state =
  1582. &crtc_state->wm.vlv.fifo_state;
  1583. u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
  1584. int num_active_planes = hweight8(active_planes);
  1585. bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
  1586. const struct intel_plane_state *old_plane_state;
  1587. const struct intel_plane_state *new_plane_state;
  1588. struct intel_plane *plane;
  1589. enum plane_id plane_id;
  1590. int level, ret, i;
  1591. unsigned int dirty = 0;
  1592. for_each_oldnew_intel_plane_in_state(state, plane,
  1593. old_plane_state,
  1594. new_plane_state, i) {
  1595. if (new_plane_state->hw.crtc != &crtc->base &&
  1596. old_plane_state->hw.crtc != &crtc->base)
  1597. continue;
  1598. if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
  1599. dirty |= BIT(plane->id);
  1600. }
  1601. /*
  1602. * DSPARB registers may have been reset due to the
  1603. * power well being turned off. Make sure we restore
  1604. * them to a consistent state even if no primary/sprite
  1605. * planes are initially active.
  1606. */
  1607. if (needs_modeset)
  1608. crtc_state->fifo_changed = true;
  1609. if (!dirty)
  1610. return 0;
  1611. /* cursor changes don't warrant a FIFO recompute */
  1612. if (dirty & ~BIT(PLANE_CURSOR)) {
  1613. const struct intel_crtc_state *old_crtc_state =
  1614. intel_atomic_get_old_crtc_state(state, crtc);
  1615. const struct vlv_fifo_state *old_fifo_state =
  1616. &old_crtc_state->wm.vlv.fifo_state;
  1617. ret = vlv_compute_fifo(crtc_state);
  1618. if (ret)
  1619. return ret;
  1620. if (needs_modeset ||
  1621. memcmp(old_fifo_state, fifo_state,
  1622. sizeof(*fifo_state)) != 0)
  1623. crtc_state->fifo_changed = true;
  1624. }
  1625. /* initially allow all levels */
  1626. wm_state->num_levels = intel_wm_num_levels(dev_priv);
  1627. /*
  1628. * Note that enabling cxsr with no primary/sprite planes
  1629. * enabled can wedge the pipe. Hence we only allow cxsr
  1630. * with exactly one enabled primary/sprite plane.
  1631. */
  1632. wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
  1633. for (level = 0; level < wm_state->num_levels; level++) {
  1634. const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
  1635. const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
  1636. if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
  1637. break;
  1638. for_each_plane_id_on_crtc(crtc, plane_id) {
  1639. wm_state->wm[level].plane[plane_id] =
  1640. vlv_invert_wm_value(raw->plane[plane_id],
  1641. fifo_state->plane[plane_id]);
  1642. }
  1643. wm_state->sr[level].plane =
  1644. vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
  1645. raw->plane[PLANE_SPRITE0],
  1646. raw->plane[PLANE_SPRITE1]),
  1647. sr_fifo_size);
  1648. wm_state->sr[level].cursor =
  1649. vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
  1650. 63);
  1651. }
  1652. if (level == 0)
  1653. return -EINVAL;
  1654. /* limit to only levels we can actually handle */
  1655. wm_state->num_levels = level;
  1656. /* invalidate the higher levels */
  1657. vlv_invalidate_wms(crtc, wm_state, level);
  1658. return 0;
  1659. }
  1660. #define VLV_FIFO(plane, value) \
  1661. (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
  1662. static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
  1663. struct intel_crtc *crtc)
  1664. {
  1665. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1666. struct intel_uncore *uncore = &dev_priv->uncore;
  1667. const struct intel_crtc_state *crtc_state =
  1668. intel_atomic_get_new_crtc_state(state, crtc);
  1669. const struct vlv_fifo_state *fifo_state =
  1670. &crtc_state->wm.vlv.fifo_state;
  1671. int sprite0_start, sprite1_start, fifo_size;
  1672. u32 dsparb, dsparb2, dsparb3;
  1673. if (!crtc_state->fifo_changed)
  1674. return;
  1675. sprite0_start = fifo_state->plane[PLANE_PRIMARY];
  1676. sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
  1677. fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
  1678. drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
  1679. drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
  1680. trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
  1681. /*
  1682. * uncore.lock serves a double purpose here. It allows us to
  1683. * use the less expensive I915_{READ,WRITE}_FW() functions, and
  1684. * it protects the DSPARB registers from getting clobbered by
  1685. * parallel updates from multiple pipes.
  1686. *
  1687. * intel_pipe_update_start() has already disabled interrupts
  1688. * for us, so a plain spin_lock() is sufficient here.
  1689. */
  1690. spin_lock(&uncore->lock);
  1691. switch (crtc->pipe) {
  1692. case PIPE_A:
  1693. dsparb = intel_uncore_read_fw(uncore, DSPARB);
  1694. dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
  1695. dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
  1696. VLV_FIFO(SPRITEB, 0xff));
  1697. dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
  1698. VLV_FIFO(SPRITEB, sprite1_start));
  1699. dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
  1700. VLV_FIFO(SPRITEB_HI, 0x1));
  1701. dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
  1702. VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
  1703. intel_uncore_write_fw(uncore, DSPARB, dsparb);
  1704. intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
  1705. break;
  1706. case PIPE_B:
  1707. dsparb = intel_uncore_read_fw(uncore, DSPARB);
  1708. dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
  1709. dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
  1710. VLV_FIFO(SPRITED, 0xff));
  1711. dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
  1712. VLV_FIFO(SPRITED, sprite1_start));
  1713. dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
  1714. VLV_FIFO(SPRITED_HI, 0xff));
  1715. dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
  1716. VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
  1717. intel_uncore_write_fw(uncore, DSPARB, dsparb);
  1718. intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
  1719. break;
  1720. case PIPE_C:
  1721. dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
  1722. dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
  1723. dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
  1724. VLV_FIFO(SPRITEF, 0xff));
  1725. dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
  1726. VLV_FIFO(SPRITEF, sprite1_start));
  1727. dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
  1728. VLV_FIFO(SPRITEF_HI, 0xff));
  1729. dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
  1730. VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
  1731. intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
  1732. intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
  1733. break;
  1734. default:
  1735. break;
  1736. }
  1737. intel_uncore_posting_read_fw(uncore, DSPARB);
  1738. spin_unlock(&uncore->lock);
  1739. }
  1740. #undef VLV_FIFO
  1741. static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
  1742. struct intel_crtc *crtc)
  1743. {
  1744. struct intel_crtc_state *new_crtc_state =
  1745. intel_atomic_get_new_crtc_state(state, crtc);
  1746. const struct intel_crtc_state *old_crtc_state =
  1747. intel_atomic_get_old_crtc_state(state, crtc);
  1748. struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
  1749. const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
  1750. const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
  1751. int level;
  1752. if (!new_crtc_state->hw.active ||
  1753. drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) {
  1754. *intermediate = *optimal;
  1755. intermediate->cxsr = false;
  1756. goto out;
  1757. }
  1758. intermediate->num_levels = min(optimal->num_levels, active->num_levels);
  1759. intermediate->cxsr = optimal->cxsr && active->cxsr &&
  1760. !new_crtc_state->disable_cxsr;
  1761. for (level = 0; level < intermediate->num_levels; level++) {
  1762. enum plane_id plane_id;
  1763. for_each_plane_id_on_crtc(crtc, plane_id) {
  1764. intermediate->wm[level].plane[plane_id] =
  1765. min(optimal->wm[level].plane[plane_id],
  1766. active->wm[level].plane[plane_id]);
  1767. }
  1768. intermediate->sr[level].plane = min(optimal->sr[level].plane,
  1769. active->sr[level].plane);
  1770. intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
  1771. active->sr[level].cursor);
  1772. }
  1773. vlv_invalidate_wms(crtc, intermediate, level);
  1774. out:
  1775. /*
  1776. * If our intermediate WM are identical to the final WM, then we can
  1777. * omit the post-vblank programming; only update if it's different.
  1778. */
  1779. if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
  1780. new_crtc_state->wm.need_postvbl_update = true;
  1781. return 0;
  1782. }
  1783. static void vlv_merge_wm(struct drm_i915_private *dev_priv,
  1784. struct vlv_wm_values *wm)
  1785. {
  1786. struct intel_crtc *crtc;
  1787. int num_active_pipes = 0;
  1788. wm->level = dev_priv->display.wm.max_level;
  1789. wm->cxsr = true;
  1790. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1791. const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
  1792. if (!crtc->active)
  1793. continue;
  1794. if (!wm_state->cxsr)
  1795. wm->cxsr = false;
  1796. num_active_pipes++;
  1797. wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
  1798. }
  1799. if (num_active_pipes != 1)
  1800. wm->cxsr = false;
  1801. if (num_active_pipes > 1)
  1802. wm->level = VLV_WM_LEVEL_PM2;
  1803. for_each_intel_crtc(&dev_priv->drm, crtc) {
  1804. const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
  1805. enum pipe pipe = crtc->pipe;
  1806. wm->pipe[pipe] = wm_state->wm[wm->level];
  1807. if (crtc->active && wm->cxsr)
  1808. wm->sr = wm_state->sr[wm->level];
  1809. wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
  1810. wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
  1811. wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
  1812. wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
  1813. }
  1814. }
  1815. static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
  1816. {
  1817. struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
  1818. struct vlv_wm_values new_wm = {};
  1819. vlv_merge_wm(dev_priv, &new_wm);
  1820. if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
  1821. return;
  1822. if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
  1823. chv_set_memory_dvfs(dev_priv, false);
  1824. if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
  1825. chv_set_memory_pm5(dev_priv, false);
  1826. if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
  1827. _intel_set_memory_cxsr(dev_priv, false);
  1828. vlv_write_wm_values(dev_priv, &new_wm);
  1829. if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
  1830. _intel_set_memory_cxsr(dev_priv, true);
  1831. if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
  1832. chv_set_memory_pm5(dev_priv, true);
  1833. if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
  1834. chv_set_memory_dvfs(dev_priv, true);
  1835. *old_wm = new_wm;
  1836. }
  1837. static void vlv_initial_watermarks(struct intel_atomic_state *state,
  1838. struct intel_crtc *crtc)
  1839. {
  1840. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1841. const struct intel_crtc_state *crtc_state =
  1842. intel_atomic_get_new_crtc_state(state, crtc);
  1843. mutex_lock(&dev_priv->display.wm.wm_mutex);
  1844. crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
  1845. vlv_program_watermarks(dev_priv);
  1846. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  1847. }
  1848. static void vlv_optimize_watermarks(struct intel_atomic_state *state,
  1849. struct intel_crtc *crtc)
  1850. {
  1851. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  1852. const struct intel_crtc_state *crtc_state =
  1853. intel_atomic_get_new_crtc_state(state, crtc);
  1854. if (!crtc_state->wm.need_postvbl_update)
  1855. return;
  1856. mutex_lock(&dev_priv->display.wm.wm_mutex);
  1857. crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
  1858. vlv_program_watermarks(dev_priv);
  1859. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  1860. }
  1861. static void i965_update_wm(struct drm_i915_private *dev_priv)
  1862. {
  1863. struct intel_crtc *crtc;
  1864. int srwm = 1;
  1865. int cursor_sr = 16;
  1866. bool cxsr_enabled;
  1867. /* Calc sr entries for one plane configs */
  1868. crtc = single_enabled_crtc(dev_priv);
  1869. if (crtc) {
  1870. /* self-refresh has much higher latency */
  1871. static const int sr_latency_ns = 12000;
  1872. const struct drm_display_mode *pipe_mode =
  1873. &crtc->config->hw.pipe_mode;
  1874. const struct drm_framebuffer *fb =
  1875. crtc->base.primary->state->fb;
  1876. int pixel_rate = crtc->config->pixel_rate;
  1877. int htotal = pipe_mode->crtc_htotal;
  1878. int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
  1879. int cpp = fb->format->cpp[0];
  1880. int entries;
  1881. entries = intel_wm_method2(pixel_rate, htotal,
  1882. width, cpp, sr_latency_ns / 100);
  1883. entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
  1884. srwm = I965_FIFO_SIZE - entries;
  1885. if (srwm < 0)
  1886. srwm = 1;
  1887. srwm &= 0x1ff;
  1888. drm_dbg_kms(&dev_priv->drm,
  1889. "self-refresh entries: %d, wm: %d\n",
  1890. entries, srwm);
  1891. entries = intel_wm_method2(pixel_rate, htotal,
  1892. crtc->base.cursor->state->crtc_w, 4,
  1893. sr_latency_ns / 100);
  1894. entries = DIV_ROUND_UP(entries,
  1895. i965_cursor_wm_info.cacheline_size) +
  1896. i965_cursor_wm_info.guard_size;
  1897. cursor_sr = i965_cursor_wm_info.fifo_size - entries;
  1898. if (cursor_sr > i965_cursor_wm_info.max_wm)
  1899. cursor_sr = i965_cursor_wm_info.max_wm;
  1900. drm_dbg_kms(&dev_priv->drm,
  1901. "self-refresh watermark: display plane %d "
  1902. "cursor %d\n", srwm, cursor_sr);
  1903. cxsr_enabled = true;
  1904. } else {
  1905. cxsr_enabled = false;
  1906. /* Turn off self refresh if both pipes are enabled */
  1907. intel_set_memory_cxsr(dev_priv, false);
  1908. }
  1909. drm_dbg_kms(&dev_priv->drm,
  1910. "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
  1911. srwm);
  1912. /* 965 has limitations... */
  1913. intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) |
  1914. FW_WM(8, CURSORB) |
  1915. FW_WM(8, PLANEB) |
  1916. FW_WM(8, PLANEA));
  1917. intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) |
  1918. FW_WM(8, PLANEC_OLD));
  1919. /* update cursor SR watermark */
  1920. intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR));
  1921. if (cxsr_enabled)
  1922. intel_set_memory_cxsr(dev_priv, true);
  1923. }
  1924. #undef FW_WM
  1925. static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
  1926. enum i9xx_plane_id i9xx_plane)
  1927. {
  1928. struct intel_plane *plane;
  1929. for_each_intel_plane(&i915->drm, plane) {
  1930. if (plane->id == PLANE_PRIMARY &&
  1931. plane->i9xx_plane == i9xx_plane)
  1932. return intel_crtc_for_pipe(i915, plane->pipe);
  1933. }
  1934. return NULL;
  1935. }
  1936. static void i9xx_update_wm(struct drm_i915_private *dev_priv)
  1937. {
  1938. const struct intel_watermark_params *wm_info;
  1939. u32 fwater_lo;
  1940. u32 fwater_hi;
  1941. int cwm, srwm = 1;
  1942. int fifo_size;
  1943. int planea_wm, planeb_wm;
  1944. struct intel_crtc *crtc;
  1945. if (IS_I945GM(dev_priv))
  1946. wm_info = &i945_wm_info;
  1947. else if (DISPLAY_VER(dev_priv) != 2)
  1948. wm_info = &i915_wm_info;
  1949. else
  1950. wm_info = &i830_a_wm_info;
  1951. if (DISPLAY_VER(dev_priv) == 2)
  1952. fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
  1953. else
  1954. fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
  1955. crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
  1956. if (intel_crtc_active(crtc)) {
  1957. const struct drm_framebuffer *fb =
  1958. crtc->base.primary->state->fb;
  1959. int cpp;
  1960. if (DISPLAY_VER(dev_priv) == 2)
  1961. cpp = 4;
  1962. else
  1963. cpp = fb->format->cpp[0];
  1964. planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
  1965. wm_info, fifo_size, cpp,
  1966. pessimal_latency_ns);
  1967. } else {
  1968. planea_wm = fifo_size - wm_info->guard_size;
  1969. if (planea_wm > (long)wm_info->max_wm)
  1970. planea_wm = wm_info->max_wm;
  1971. }
  1972. if (DISPLAY_VER(dev_priv) == 2)
  1973. wm_info = &i830_bc_wm_info;
  1974. if (DISPLAY_VER(dev_priv) == 2)
  1975. fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
  1976. else
  1977. fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
  1978. crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
  1979. if (intel_crtc_active(crtc)) {
  1980. const struct drm_framebuffer *fb =
  1981. crtc->base.primary->state->fb;
  1982. int cpp;
  1983. if (DISPLAY_VER(dev_priv) == 2)
  1984. cpp = 4;
  1985. else
  1986. cpp = fb->format->cpp[0];
  1987. planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
  1988. wm_info, fifo_size, cpp,
  1989. pessimal_latency_ns);
  1990. } else {
  1991. planeb_wm = fifo_size - wm_info->guard_size;
  1992. if (planeb_wm > (long)wm_info->max_wm)
  1993. planeb_wm = wm_info->max_wm;
  1994. }
  1995. drm_dbg_kms(&dev_priv->drm,
  1996. "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
  1997. crtc = single_enabled_crtc(dev_priv);
  1998. if (IS_I915GM(dev_priv) && crtc) {
  1999. struct drm_i915_gem_object *obj;
  2000. obj = intel_fb_obj(crtc->base.primary->state->fb);
  2001. /* self-refresh seems busted with untiled */
  2002. if (!i915_gem_object_is_tiled(obj))
  2003. crtc = NULL;
  2004. }
  2005. /*
  2006. * Overlay gets an aggressive default since video jitter is bad.
  2007. */
  2008. cwm = 2;
  2009. /* Play safe and disable self-refresh before adjusting watermarks. */
  2010. intel_set_memory_cxsr(dev_priv, false);
  2011. /* Calc sr entries for one plane configs */
  2012. if (HAS_FW_BLC(dev_priv) && crtc) {
  2013. /* self-refresh has much higher latency */
  2014. static const int sr_latency_ns = 6000;
  2015. const struct drm_display_mode *pipe_mode =
  2016. &crtc->config->hw.pipe_mode;
  2017. const struct drm_framebuffer *fb =
  2018. crtc->base.primary->state->fb;
  2019. int pixel_rate = crtc->config->pixel_rate;
  2020. int htotal = pipe_mode->crtc_htotal;
  2021. int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
  2022. int cpp;
  2023. int entries;
  2024. if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
  2025. cpp = 4;
  2026. else
  2027. cpp = fb->format->cpp[0];
  2028. entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
  2029. sr_latency_ns / 100);
  2030. entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
  2031. drm_dbg_kms(&dev_priv->drm,
  2032. "self-refresh entries: %d\n", entries);
  2033. srwm = wm_info->fifo_size - entries;
  2034. if (srwm < 0)
  2035. srwm = 1;
  2036. if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
  2037. intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
  2038. FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
  2039. else
  2040. intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
  2041. }
  2042. drm_dbg_kms(&dev_priv->drm,
  2043. "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
  2044. planea_wm, planeb_wm, cwm, srwm);
  2045. fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
  2046. fwater_hi = (cwm & 0x1f);
  2047. /* Set request length to 8 cachelines per fetch */
  2048. fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
  2049. fwater_hi = fwater_hi | (1 << 8);
  2050. intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
  2051. intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
  2052. if (crtc)
  2053. intel_set_memory_cxsr(dev_priv, true);
  2054. }
  2055. static void i845_update_wm(struct drm_i915_private *dev_priv)
  2056. {
  2057. struct intel_crtc *crtc;
  2058. u32 fwater_lo;
  2059. int planea_wm;
  2060. crtc = single_enabled_crtc(dev_priv);
  2061. if (crtc == NULL)
  2062. return;
  2063. planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
  2064. &i845_wm_info,
  2065. i845_get_fifo_size(dev_priv, PLANE_A),
  2066. 4, pessimal_latency_ns);
  2067. fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
  2068. fwater_lo |= (3<<8) | planea_wm;
  2069. drm_dbg_kms(&dev_priv->drm,
  2070. "Setting FIFO watermarks - A: %d\n", planea_wm);
  2071. intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
  2072. }
  2073. /* latency must be in 0.1us units. */
  2074. static unsigned int ilk_wm_method1(unsigned int pixel_rate,
  2075. unsigned int cpp,
  2076. unsigned int latency)
  2077. {
  2078. unsigned int ret;
  2079. ret = intel_wm_method1(pixel_rate, cpp, latency);
  2080. ret = DIV_ROUND_UP(ret, 64) + 2;
  2081. return ret;
  2082. }
  2083. /* latency must be in 0.1us units. */
  2084. static unsigned int ilk_wm_method2(unsigned int pixel_rate,
  2085. unsigned int htotal,
  2086. unsigned int width,
  2087. unsigned int cpp,
  2088. unsigned int latency)
  2089. {
  2090. unsigned int ret;
  2091. ret = intel_wm_method2(pixel_rate, htotal,
  2092. width, cpp, latency);
  2093. ret = DIV_ROUND_UP(ret, 64) + 2;
  2094. return ret;
  2095. }
  2096. static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
  2097. {
  2098. /*
  2099. * Neither of these should be possible since this function shouldn't be
  2100. * called if the CRTC is off or the plane is invisible. But let's be
  2101. * extra paranoid to avoid a potential divide-by-zero if we screw up
  2102. * elsewhere in the driver.
  2103. */
  2104. if (WARN_ON(!cpp))
  2105. return 0;
  2106. if (WARN_ON(!horiz_pixels))
  2107. return 0;
  2108. return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
  2109. }
  2110. struct ilk_wm_maximums {
  2111. u16 pri;
  2112. u16 spr;
  2113. u16 cur;
  2114. u16 fbc;
  2115. };
  2116. /*
  2117. * For both WM_PIPE and WM_LP.
  2118. * mem_value must be in 0.1us units.
  2119. */
  2120. static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
  2121. const struct intel_plane_state *plane_state,
  2122. u32 mem_value, bool is_lp)
  2123. {
  2124. u32 method1, method2;
  2125. int cpp;
  2126. if (mem_value == 0)
  2127. return U32_MAX;
  2128. if (!intel_wm_plane_visible(crtc_state, plane_state))
  2129. return 0;
  2130. cpp = plane_state->hw.fb->format->cpp[0];
  2131. method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
  2132. if (!is_lp)
  2133. return method1;
  2134. method2 = ilk_wm_method2(crtc_state->pixel_rate,
  2135. crtc_state->hw.pipe_mode.crtc_htotal,
  2136. drm_rect_width(&plane_state->uapi.src) >> 16,
  2137. cpp, mem_value);
  2138. return min(method1, method2);
  2139. }
  2140. /*
  2141. * For both WM_PIPE and WM_LP.
  2142. * mem_value must be in 0.1us units.
  2143. */
  2144. static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
  2145. const struct intel_plane_state *plane_state,
  2146. u32 mem_value)
  2147. {
  2148. u32 method1, method2;
  2149. int cpp;
  2150. if (mem_value == 0)
  2151. return U32_MAX;
  2152. if (!intel_wm_plane_visible(crtc_state, plane_state))
  2153. return 0;
  2154. cpp = plane_state->hw.fb->format->cpp[0];
  2155. method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
  2156. method2 = ilk_wm_method2(crtc_state->pixel_rate,
  2157. crtc_state->hw.pipe_mode.crtc_htotal,
  2158. drm_rect_width(&plane_state->uapi.src) >> 16,
  2159. cpp, mem_value);
  2160. return min(method1, method2);
  2161. }
  2162. /*
  2163. * For both WM_PIPE and WM_LP.
  2164. * mem_value must be in 0.1us units.
  2165. */
  2166. static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
  2167. const struct intel_plane_state *plane_state,
  2168. u32 mem_value)
  2169. {
  2170. int cpp;
  2171. if (mem_value == 0)
  2172. return U32_MAX;
  2173. if (!intel_wm_plane_visible(crtc_state, plane_state))
  2174. return 0;
  2175. cpp = plane_state->hw.fb->format->cpp[0];
  2176. return ilk_wm_method2(crtc_state->pixel_rate,
  2177. crtc_state->hw.pipe_mode.crtc_htotal,
  2178. drm_rect_width(&plane_state->uapi.src) >> 16,
  2179. cpp, mem_value);
  2180. }
  2181. /* Only for WM_LP. */
  2182. static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
  2183. const struct intel_plane_state *plane_state,
  2184. u32 pri_val)
  2185. {
  2186. int cpp;
  2187. if (!intel_wm_plane_visible(crtc_state, plane_state))
  2188. return 0;
  2189. cpp = plane_state->hw.fb->format->cpp[0];
  2190. return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
  2191. cpp);
  2192. }
  2193. static unsigned int
  2194. ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
  2195. {
  2196. if (DISPLAY_VER(dev_priv) >= 8)
  2197. return 3072;
  2198. else if (DISPLAY_VER(dev_priv) >= 7)
  2199. return 768;
  2200. else
  2201. return 512;
  2202. }
  2203. static unsigned int
  2204. ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
  2205. int level, bool is_sprite)
  2206. {
  2207. if (DISPLAY_VER(dev_priv) >= 8)
  2208. /* BDW primary/sprite plane watermarks */
  2209. return level == 0 ? 255 : 2047;
  2210. else if (DISPLAY_VER(dev_priv) >= 7)
  2211. /* IVB/HSW primary/sprite plane watermarks */
  2212. return level == 0 ? 127 : 1023;
  2213. else if (!is_sprite)
  2214. /* ILK/SNB primary plane watermarks */
  2215. return level == 0 ? 127 : 511;
  2216. else
  2217. /* ILK/SNB sprite plane watermarks */
  2218. return level == 0 ? 63 : 255;
  2219. }
  2220. static unsigned int
  2221. ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
  2222. {
  2223. if (DISPLAY_VER(dev_priv) >= 7)
  2224. return level == 0 ? 63 : 255;
  2225. else
  2226. return level == 0 ? 31 : 63;
  2227. }
  2228. static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
  2229. {
  2230. if (DISPLAY_VER(dev_priv) >= 8)
  2231. return 31;
  2232. else
  2233. return 15;
  2234. }
  2235. /* Calculate the maximum primary/sprite plane watermark */
  2236. static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
  2237. int level,
  2238. const struct intel_wm_config *config,
  2239. enum intel_ddb_partitioning ddb_partitioning,
  2240. bool is_sprite)
  2241. {
  2242. unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
  2243. /* if sprites aren't enabled, sprites get nothing */
  2244. if (is_sprite && !config->sprites_enabled)
  2245. return 0;
  2246. /* HSW allows LP1+ watermarks even with multiple pipes */
  2247. if (level == 0 || config->num_pipes_active > 1) {
  2248. fifo_size /= INTEL_NUM_PIPES(dev_priv);
  2249. /*
  2250. * For some reason the non self refresh
  2251. * FIFO size is only half of the self
  2252. * refresh FIFO size on ILK/SNB.
  2253. */
  2254. if (DISPLAY_VER(dev_priv) <= 6)
  2255. fifo_size /= 2;
  2256. }
  2257. if (config->sprites_enabled) {
  2258. /* level 0 is always calculated with 1:1 split */
  2259. if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
  2260. if (is_sprite)
  2261. fifo_size *= 5;
  2262. fifo_size /= 6;
  2263. } else {
  2264. fifo_size /= 2;
  2265. }
  2266. }
  2267. /* clamp to max that the registers can hold */
  2268. return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
  2269. }
  2270. /* Calculate the maximum cursor plane watermark */
  2271. static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
  2272. int level,
  2273. const struct intel_wm_config *config)
  2274. {
  2275. /* HSW LP1+ watermarks w/ multiple pipes */
  2276. if (level > 0 && config->num_pipes_active > 1)
  2277. return 64;
  2278. /* otherwise just report max that registers can hold */
  2279. return ilk_cursor_wm_reg_max(dev_priv, level);
  2280. }
  2281. static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
  2282. int level,
  2283. const struct intel_wm_config *config,
  2284. enum intel_ddb_partitioning ddb_partitioning,
  2285. struct ilk_wm_maximums *max)
  2286. {
  2287. max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
  2288. max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
  2289. max->cur = ilk_cursor_wm_max(dev_priv, level, config);
  2290. max->fbc = ilk_fbc_wm_reg_max(dev_priv);
  2291. }
  2292. static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
  2293. int level,
  2294. struct ilk_wm_maximums *max)
  2295. {
  2296. max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
  2297. max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
  2298. max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
  2299. max->fbc = ilk_fbc_wm_reg_max(dev_priv);
  2300. }
  2301. static bool ilk_validate_wm_level(int level,
  2302. const struct ilk_wm_maximums *max,
  2303. struct intel_wm_level *result)
  2304. {
  2305. bool ret;
  2306. /* already determined to be invalid? */
  2307. if (!result->enable)
  2308. return false;
  2309. result->enable = result->pri_val <= max->pri &&
  2310. result->spr_val <= max->spr &&
  2311. result->cur_val <= max->cur;
  2312. ret = result->enable;
  2313. /*
  2314. * HACK until we can pre-compute everything,
  2315. * and thus fail gracefully if LP0 watermarks
  2316. * are exceeded...
  2317. */
  2318. if (level == 0 && !result->enable) {
  2319. if (result->pri_val > max->pri)
  2320. DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
  2321. level, result->pri_val, max->pri);
  2322. if (result->spr_val > max->spr)
  2323. DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
  2324. level, result->spr_val, max->spr);
  2325. if (result->cur_val > max->cur)
  2326. DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
  2327. level, result->cur_val, max->cur);
  2328. result->pri_val = min_t(u32, result->pri_val, max->pri);
  2329. result->spr_val = min_t(u32, result->spr_val, max->spr);
  2330. result->cur_val = min_t(u32, result->cur_val, max->cur);
  2331. result->enable = true;
  2332. }
  2333. return ret;
  2334. }
  2335. static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
  2336. const struct intel_crtc *crtc,
  2337. int level,
  2338. struct intel_crtc_state *crtc_state,
  2339. const struct intel_plane_state *pristate,
  2340. const struct intel_plane_state *sprstate,
  2341. const struct intel_plane_state *curstate,
  2342. struct intel_wm_level *result)
  2343. {
  2344. u16 pri_latency = dev_priv->display.wm.pri_latency[level];
  2345. u16 spr_latency = dev_priv->display.wm.spr_latency[level];
  2346. u16 cur_latency = dev_priv->display.wm.cur_latency[level];
  2347. /* WM1+ latency values stored in 0.5us units */
  2348. if (level > 0) {
  2349. pri_latency *= 5;
  2350. spr_latency *= 5;
  2351. cur_latency *= 5;
  2352. }
  2353. if (pristate) {
  2354. result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
  2355. pri_latency, level);
  2356. result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
  2357. }
  2358. if (sprstate)
  2359. result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
  2360. if (curstate)
  2361. result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
  2362. result->enable = true;
  2363. }
  2364. static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
  2365. {
  2366. u64 sskpd;
  2367. sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
  2368. wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
  2369. if (wm[0] == 0)
  2370. wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
  2371. wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
  2372. wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
  2373. wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
  2374. wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
  2375. }
  2376. static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
  2377. {
  2378. u32 sskpd;
  2379. sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
  2380. wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
  2381. wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
  2382. wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
  2383. wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
  2384. }
  2385. static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
  2386. {
  2387. u32 mltr;
  2388. mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
  2389. /* ILK primary LP0 latency is 700 ns */
  2390. wm[0] = 7;
  2391. wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
  2392. wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
  2393. }
  2394. static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
  2395. u16 wm[5])
  2396. {
  2397. /* ILK sprite LP0 latency is 1300 ns */
  2398. if (DISPLAY_VER(dev_priv) == 5)
  2399. wm[0] = 13;
  2400. }
  2401. static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
  2402. u16 wm[5])
  2403. {
  2404. /* ILK cursor LP0 latency is 1300 ns */
  2405. if (DISPLAY_VER(dev_priv) == 5)
  2406. wm[0] = 13;
  2407. }
  2408. int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
  2409. {
  2410. /* how many WM levels are we expecting */
  2411. if (HAS_HW_SAGV_WM(dev_priv))
  2412. return 5;
  2413. else if (DISPLAY_VER(dev_priv) >= 9)
  2414. return 7;
  2415. else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2416. return 4;
  2417. else if (DISPLAY_VER(dev_priv) >= 6)
  2418. return 3;
  2419. else
  2420. return 2;
  2421. }
  2422. void intel_print_wm_latency(struct drm_i915_private *dev_priv,
  2423. const char *name, const u16 wm[])
  2424. {
  2425. int level, max_level = ilk_wm_max_level(dev_priv);
  2426. for (level = 0; level <= max_level; level++) {
  2427. unsigned int latency = wm[level];
  2428. if (latency == 0) {
  2429. drm_dbg_kms(&dev_priv->drm,
  2430. "%s WM%d latency not provided\n",
  2431. name, level);
  2432. continue;
  2433. }
  2434. /*
  2435. * - latencies are in us on gen9.
  2436. * - before then, WM1+ latency values are in 0.5us units
  2437. */
  2438. if (DISPLAY_VER(dev_priv) >= 9)
  2439. latency *= 10;
  2440. else if (level > 0)
  2441. latency *= 5;
  2442. drm_dbg_kms(&dev_priv->drm,
  2443. "%s WM%d latency %u (%u.%u usec)\n", name, level,
  2444. wm[level], latency / 10, latency % 10);
  2445. }
  2446. }
  2447. static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
  2448. u16 wm[5], u16 min)
  2449. {
  2450. int level, max_level = ilk_wm_max_level(dev_priv);
  2451. if (wm[0] >= min)
  2452. return false;
  2453. wm[0] = max(wm[0], min);
  2454. for (level = 1; level <= max_level; level++)
  2455. wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
  2456. return true;
  2457. }
  2458. static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
  2459. {
  2460. bool changed;
  2461. /*
  2462. * The BIOS provided WM memory latency values are often
  2463. * inadequate for high resolution displays. Adjust them.
  2464. */
  2465. changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
  2466. changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
  2467. changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
  2468. if (!changed)
  2469. return;
  2470. drm_dbg_kms(&dev_priv->drm,
  2471. "WM latency values increased to avoid potential underruns\n");
  2472. intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
  2473. intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
  2474. intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  2475. }
  2476. static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
  2477. {
  2478. /*
  2479. * On some SNB machines (Thinkpad X220 Tablet at least)
  2480. * LP3 usage can cause vblank interrupts to be lost.
  2481. * The DEIIR bit will go high but it looks like the CPU
  2482. * never gets interrupted.
  2483. *
  2484. * It's not clear whether other interrupt source could
  2485. * be affected or if this is somehow limited to vblank
  2486. * interrupts only. To play it safe we disable LP3
  2487. * watermarks entirely.
  2488. */
  2489. if (dev_priv->display.wm.pri_latency[3] == 0 &&
  2490. dev_priv->display.wm.spr_latency[3] == 0 &&
  2491. dev_priv->display.wm.cur_latency[3] == 0)
  2492. return;
  2493. dev_priv->display.wm.pri_latency[3] = 0;
  2494. dev_priv->display.wm.spr_latency[3] = 0;
  2495. dev_priv->display.wm.cur_latency[3] = 0;
  2496. drm_dbg_kms(&dev_priv->drm,
  2497. "LP3 watermarks disabled due to potential for lost interrupts\n");
  2498. intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
  2499. intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
  2500. intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  2501. }
  2502. static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
  2503. {
  2504. if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
  2505. hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
  2506. else if (DISPLAY_VER(dev_priv) >= 6)
  2507. snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
  2508. else
  2509. ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
  2510. memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
  2511. sizeof(dev_priv->display.wm.pri_latency));
  2512. memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
  2513. sizeof(dev_priv->display.wm.pri_latency));
  2514. intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
  2515. intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
  2516. intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
  2517. intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
  2518. intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
  2519. if (DISPLAY_VER(dev_priv) == 6) {
  2520. snb_wm_latency_quirk(dev_priv);
  2521. snb_wm_lp3_irq_quirk(dev_priv);
  2522. }
  2523. }
  2524. static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
  2525. struct intel_pipe_wm *pipe_wm)
  2526. {
  2527. /* LP0 watermark maximums depend on this pipe alone */
  2528. const struct intel_wm_config config = {
  2529. .num_pipes_active = 1,
  2530. .sprites_enabled = pipe_wm->sprites_enabled,
  2531. .sprites_scaled = pipe_wm->sprites_scaled,
  2532. };
  2533. struct ilk_wm_maximums max;
  2534. /* LP0 watermarks always use 1/2 DDB partitioning */
  2535. ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
  2536. /* At least LP0 must be valid */
  2537. if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
  2538. drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
  2539. return false;
  2540. }
  2541. return true;
  2542. }
  2543. /* Compute new watermarks for the pipe */
  2544. static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
  2545. struct intel_crtc *crtc)
  2546. {
  2547. struct drm_i915_private *dev_priv = to_i915(state->base.dev);
  2548. struct intel_crtc_state *crtc_state =
  2549. intel_atomic_get_new_crtc_state(state, crtc);
  2550. struct intel_pipe_wm *pipe_wm;
  2551. struct intel_plane *plane;
  2552. const struct intel_plane_state *plane_state;
  2553. const struct intel_plane_state *pristate = NULL;
  2554. const struct intel_plane_state *sprstate = NULL;
  2555. const struct intel_plane_state *curstate = NULL;
  2556. int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
  2557. struct ilk_wm_maximums max;
  2558. pipe_wm = &crtc_state->wm.ilk.optimal;
  2559. intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
  2560. if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
  2561. pristate = plane_state;
  2562. else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
  2563. sprstate = plane_state;
  2564. else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
  2565. curstate = plane_state;
  2566. }
  2567. pipe_wm->pipe_enabled = crtc_state->hw.active;
  2568. pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
  2569. pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
  2570. usable_level = max_level;
  2571. /* ILK/SNB: LP2+ watermarks only w/o sprites */
  2572. if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
  2573. usable_level = 1;
  2574. /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
  2575. if (pipe_wm->sprites_scaled)
  2576. usable_level = 0;
  2577. memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
  2578. ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
  2579. pristate, sprstate, curstate, &pipe_wm->wm[0]);
  2580. if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
  2581. return -EINVAL;
  2582. ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
  2583. for (level = 1; level <= usable_level; level++) {
  2584. struct intel_wm_level *wm = &pipe_wm->wm[level];
  2585. ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
  2586. pristate, sprstate, curstate, wm);
  2587. /*
  2588. * Disable any watermark level that exceeds the
  2589. * register maximums since such watermarks are
  2590. * always invalid.
  2591. */
  2592. if (!ilk_validate_wm_level(level, &max, wm)) {
  2593. memset(wm, 0, sizeof(*wm));
  2594. break;
  2595. }
  2596. }
  2597. return 0;
  2598. }
  2599. /*
  2600. * Build a set of 'intermediate' watermark values that satisfy both the old
  2601. * state and the new state. These can be programmed to the hardware
  2602. * immediately.
  2603. */
  2604. static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
  2605. struct intel_crtc *crtc)
  2606. {
  2607. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  2608. struct intel_crtc_state *new_crtc_state =
  2609. intel_atomic_get_new_crtc_state(state, crtc);
  2610. const struct intel_crtc_state *old_crtc_state =
  2611. intel_atomic_get_old_crtc_state(state, crtc);
  2612. struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate;
  2613. const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal;
  2614. int level, max_level = ilk_wm_max_level(dev_priv);
  2615. /*
  2616. * Start with the final, target watermarks, then combine with the
  2617. * currently active watermarks to get values that are safe both before
  2618. * and after the vblank.
  2619. */
  2620. *a = new_crtc_state->wm.ilk.optimal;
  2621. if (!new_crtc_state->hw.active ||
  2622. drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi) ||
  2623. state->skip_intermediate_wm)
  2624. return 0;
  2625. a->pipe_enabled |= b->pipe_enabled;
  2626. a->sprites_enabled |= b->sprites_enabled;
  2627. a->sprites_scaled |= b->sprites_scaled;
  2628. for (level = 0; level <= max_level; level++) {
  2629. struct intel_wm_level *a_wm = &a->wm[level];
  2630. const struct intel_wm_level *b_wm = &b->wm[level];
  2631. a_wm->enable &= b_wm->enable;
  2632. a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
  2633. a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
  2634. a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
  2635. a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
  2636. }
  2637. /*
  2638. * We need to make sure that these merged watermark values are
  2639. * actually a valid configuration themselves. If they're not,
  2640. * there's no safe way to transition from the old state to
  2641. * the new state, so we need to fail the atomic transaction.
  2642. */
  2643. if (!ilk_validate_pipe_wm(dev_priv, a))
  2644. return -EINVAL;
  2645. /*
  2646. * If our intermediate WM are identical to the final WM, then we can
  2647. * omit the post-vblank programming; only update if it's different.
  2648. */
  2649. if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0)
  2650. new_crtc_state->wm.need_postvbl_update = true;
  2651. return 0;
  2652. }
  2653. /*
  2654. * Merge the watermarks from all active pipes for a specific level.
  2655. */
  2656. static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
  2657. int level,
  2658. struct intel_wm_level *ret_wm)
  2659. {
  2660. const struct intel_crtc *crtc;
  2661. ret_wm->enable = true;
  2662. for_each_intel_crtc(&dev_priv->drm, crtc) {
  2663. const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
  2664. const struct intel_wm_level *wm = &active->wm[level];
  2665. if (!active->pipe_enabled)
  2666. continue;
  2667. /*
  2668. * The watermark values may have been used in the past,
  2669. * so we must maintain them in the registers for some
  2670. * time even if the level is now disabled.
  2671. */
  2672. if (!wm->enable)
  2673. ret_wm->enable = false;
  2674. ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
  2675. ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
  2676. ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
  2677. ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
  2678. }
  2679. }
  2680. /*
  2681. * Merge all low power watermarks for all active pipes.
  2682. */
  2683. static void ilk_wm_merge(struct drm_i915_private *dev_priv,
  2684. const struct intel_wm_config *config,
  2685. const struct ilk_wm_maximums *max,
  2686. struct intel_pipe_wm *merged)
  2687. {
  2688. int level, max_level = ilk_wm_max_level(dev_priv);
  2689. int last_enabled_level = max_level;
  2690. /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
  2691. if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
  2692. config->num_pipes_active > 1)
  2693. last_enabled_level = 0;
  2694. /* ILK: FBC WM must be disabled always */
  2695. merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
  2696. /* merge each WM1+ level */
  2697. for (level = 1; level <= max_level; level++) {
  2698. struct intel_wm_level *wm = &merged->wm[level];
  2699. ilk_merge_wm_level(dev_priv, level, wm);
  2700. if (level > last_enabled_level)
  2701. wm->enable = false;
  2702. else if (!ilk_validate_wm_level(level, max, wm))
  2703. /* make sure all following levels get disabled */
  2704. last_enabled_level = level - 1;
  2705. /*
  2706. * The spec says it is preferred to disable
  2707. * FBC WMs instead of disabling a WM level.
  2708. */
  2709. if (wm->fbc_val > max->fbc) {
  2710. if (wm->enable)
  2711. merged->fbc_wm_enabled = false;
  2712. wm->fbc_val = 0;
  2713. }
  2714. }
  2715. /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
  2716. if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
  2717. dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
  2718. for (level = 2; level <= max_level; level++) {
  2719. struct intel_wm_level *wm = &merged->wm[level];
  2720. wm->enable = false;
  2721. }
  2722. }
  2723. }
  2724. static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
  2725. {
  2726. /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
  2727. return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
  2728. }
  2729. /* The value we need to program into the WM_LPx latency field */
  2730. static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
  2731. int level)
  2732. {
  2733. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  2734. return 2 * level;
  2735. else
  2736. return dev_priv->display.wm.pri_latency[level];
  2737. }
  2738. static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
  2739. const struct intel_pipe_wm *merged,
  2740. enum intel_ddb_partitioning partitioning,
  2741. struct ilk_wm_values *results)
  2742. {
  2743. struct intel_crtc *crtc;
  2744. int level, wm_lp;
  2745. results->enable_fbc_wm = merged->fbc_wm_enabled;
  2746. results->partitioning = partitioning;
  2747. /* LP1+ register values */
  2748. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2749. const struct intel_wm_level *r;
  2750. level = ilk_wm_lp_to_level(wm_lp, merged);
  2751. r = &merged->wm[level];
  2752. /*
  2753. * Maintain the watermark values even if the level is
  2754. * disabled. Doing otherwise could cause underruns.
  2755. */
  2756. results->wm_lp[wm_lp - 1] =
  2757. WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
  2758. WM_LP_PRIMARY(r->pri_val) |
  2759. WM_LP_CURSOR(r->cur_val);
  2760. if (r->enable)
  2761. results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
  2762. if (DISPLAY_VER(dev_priv) >= 8)
  2763. results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
  2764. else
  2765. results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
  2766. results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
  2767. /*
  2768. * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
  2769. * level is disabled. Doing otherwise could cause underruns.
  2770. */
  2771. if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
  2772. drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
  2773. results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
  2774. }
  2775. }
  2776. /* LP0 register values */
  2777. for_each_intel_crtc(&dev_priv->drm, crtc) {
  2778. enum pipe pipe = crtc->pipe;
  2779. const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
  2780. const struct intel_wm_level *r = &pipe_wm->wm[0];
  2781. if (drm_WARN_ON(&dev_priv->drm, !r->enable))
  2782. continue;
  2783. results->wm_pipe[pipe] =
  2784. WM0_PIPE_PRIMARY(r->pri_val) |
  2785. WM0_PIPE_SPRITE(r->spr_val) |
  2786. WM0_PIPE_CURSOR(r->cur_val);
  2787. }
  2788. }
  2789. /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  2790. * case both are at the same level. Prefer r1 in case they're the same. */
  2791. static struct intel_pipe_wm *
  2792. ilk_find_best_result(struct drm_i915_private *dev_priv,
  2793. struct intel_pipe_wm *r1,
  2794. struct intel_pipe_wm *r2)
  2795. {
  2796. int level, max_level = ilk_wm_max_level(dev_priv);
  2797. int level1 = 0, level2 = 0;
  2798. for (level = 1; level <= max_level; level++) {
  2799. if (r1->wm[level].enable)
  2800. level1 = level;
  2801. if (r2->wm[level].enable)
  2802. level2 = level;
  2803. }
  2804. if (level1 == level2) {
  2805. if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
  2806. return r2;
  2807. else
  2808. return r1;
  2809. } else if (level1 > level2) {
  2810. return r1;
  2811. } else {
  2812. return r2;
  2813. }
  2814. }
  2815. /* dirty bits used to track which watermarks need changes */
  2816. #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
  2817. #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
  2818. #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
  2819. #define WM_DIRTY_FBC (1 << 24)
  2820. #define WM_DIRTY_DDB (1 << 25)
  2821. static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
  2822. const struct ilk_wm_values *old,
  2823. const struct ilk_wm_values *new)
  2824. {
  2825. unsigned int dirty = 0;
  2826. enum pipe pipe;
  2827. int wm_lp;
  2828. for_each_pipe(dev_priv, pipe) {
  2829. if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
  2830. dirty |= WM_DIRTY_PIPE(pipe);
  2831. /* Must disable LP1+ watermarks too */
  2832. dirty |= WM_DIRTY_LP_ALL;
  2833. }
  2834. }
  2835. if (old->enable_fbc_wm != new->enable_fbc_wm) {
  2836. dirty |= WM_DIRTY_FBC;
  2837. /* Must disable LP1+ watermarks too */
  2838. dirty |= WM_DIRTY_LP_ALL;
  2839. }
  2840. if (old->partitioning != new->partitioning) {
  2841. dirty |= WM_DIRTY_DDB;
  2842. /* Must disable LP1+ watermarks too */
  2843. dirty |= WM_DIRTY_LP_ALL;
  2844. }
  2845. /* LP1+ watermarks already deemed dirty, no need to continue */
  2846. if (dirty & WM_DIRTY_LP_ALL)
  2847. return dirty;
  2848. /* Find the lowest numbered LP1+ watermark in need of an update... */
  2849. for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
  2850. if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
  2851. old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
  2852. break;
  2853. }
  2854. /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
  2855. for (; wm_lp <= 3; wm_lp++)
  2856. dirty |= WM_DIRTY_LP(wm_lp);
  2857. return dirty;
  2858. }
  2859. static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
  2860. unsigned int dirty)
  2861. {
  2862. struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
  2863. bool changed = false;
  2864. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
  2865. previous->wm_lp[2] &= ~WM_LP_ENABLE;
  2866. intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
  2867. changed = true;
  2868. }
  2869. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
  2870. previous->wm_lp[1] &= ~WM_LP_ENABLE;
  2871. intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
  2872. changed = true;
  2873. }
  2874. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
  2875. previous->wm_lp[0] &= ~WM_LP_ENABLE;
  2876. intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
  2877. changed = true;
  2878. }
  2879. /*
  2880. * Don't touch WM_LP_SPRITE_ENABLE here.
  2881. * Doing so could cause underruns.
  2882. */
  2883. return changed;
  2884. }
  2885. /*
  2886. * The spec says we shouldn't write when we don't need, because every write
  2887. * causes WMs to be re-evaluated, expending some power.
  2888. */
  2889. static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
  2890. struct ilk_wm_values *results)
  2891. {
  2892. struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
  2893. unsigned int dirty;
  2894. u32 val;
  2895. dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
  2896. if (!dirty)
  2897. return;
  2898. _ilk_disable_lp_wm(dev_priv, dirty);
  2899. if (dirty & WM_DIRTY_PIPE(PIPE_A))
  2900. intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
  2901. if (dirty & WM_DIRTY_PIPE(PIPE_B))
  2902. intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
  2903. if (dirty & WM_DIRTY_PIPE(PIPE_C))
  2904. intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
  2905. if (dirty & WM_DIRTY_DDB) {
  2906. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
  2907. val = intel_uncore_read(&dev_priv->uncore, WM_MISC);
  2908. if (results->partitioning == INTEL_DDB_PART_1_2)
  2909. val &= ~WM_MISC_DATA_PARTITION_5_6;
  2910. else
  2911. val |= WM_MISC_DATA_PARTITION_5_6;
  2912. intel_uncore_write(&dev_priv->uncore, WM_MISC, val);
  2913. } else {
  2914. val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2);
  2915. if (results->partitioning == INTEL_DDB_PART_1_2)
  2916. val &= ~DISP_DATA_PARTITION_5_6;
  2917. else
  2918. val |= DISP_DATA_PARTITION_5_6;
  2919. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val);
  2920. }
  2921. }
  2922. if (dirty & WM_DIRTY_FBC) {
  2923. val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL);
  2924. if (results->enable_fbc_wm)
  2925. val &= ~DISP_FBC_WM_DIS;
  2926. else
  2927. val |= DISP_FBC_WM_DIS;
  2928. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val);
  2929. }
  2930. if (dirty & WM_DIRTY_LP(1) &&
  2931. previous->wm_lp_spr[0] != results->wm_lp_spr[0])
  2932. intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
  2933. if (DISPLAY_VER(dev_priv) >= 7) {
  2934. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
  2935. intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
  2936. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
  2937. intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
  2938. }
  2939. if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
  2940. intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
  2941. if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
  2942. intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
  2943. if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
  2944. intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
  2945. dev_priv->display.wm.hw = *results;
  2946. }
  2947. bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
  2948. {
  2949. return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
  2950. }
  2951. static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
  2952. struct intel_wm_config *config)
  2953. {
  2954. struct intel_crtc *crtc;
  2955. /* Compute the currently _active_ config */
  2956. for_each_intel_crtc(&dev_priv->drm, crtc) {
  2957. const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
  2958. if (!wm->pipe_enabled)
  2959. continue;
  2960. config->sprites_enabled |= wm->sprites_enabled;
  2961. config->sprites_scaled |= wm->sprites_scaled;
  2962. config->num_pipes_active++;
  2963. }
  2964. }
  2965. static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
  2966. {
  2967. struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
  2968. struct ilk_wm_maximums max;
  2969. struct intel_wm_config config = {};
  2970. struct ilk_wm_values results = {};
  2971. enum intel_ddb_partitioning partitioning;
  2972. ilk_compute_wm_config(dev_priv, &config);
  2973. ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
  2974. ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
  2975. /* 5/6 split only in single pipe config on IVB+ */
  2976. if (DISPLAY_VER(dev_priv) >= 7 &&
  2977. config.num_pipes_active == 1 && config.sprites_enabled) {
  2978. ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
  2979. ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
  2980. best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
  2981. } else {
  2982. best_lp_wm = &lp_wm_1_2;
  2983. }
  2984. partitioning = (best_lp_wm == &lp_wm_1_2) ?
  2985. INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
  2986. ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
  2987. ilk_write_wm_values(dev_priv, &results);
  2988. }
  2989. static void ilk_initial_watermarks(struct intel_atomic_state *state,
  2990. struct intel_crtc *crtc)
  2991. {
  2992. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  2993. const struct intel_crtc_state *crtc_state =
  2994. intel_atomic_get_new_crtc_state(state, crtc);
  2995. mutex_lock(&dev_priv->display.wm.wm_mutex);
  2996. crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
  2997. ilk_program_watermarks(dev_priv);
  2998. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  2999. }
  3000. static void ilk_optimize_watermarks(struct intel_atomic_state *state,
  3001. struct intel_crtc *crtc)
  3002. {
  3003. struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
  3004. const struct intel_crtc_state *crtc_state =
  3005. intel_atomic_get_new_crtc_state(state, crtc);
  3006. if (!crtc_state->wm.need_postvbl_update)
  3007. return;
  3008. mutex_lock(&dev_priv->display.wm.wm_mutex);
  3009. crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
  3010. ilk_program_watermarks(dev_priv);
  3011. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  3012. }
  3013. static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
  3014. {
  3015. struct drm_device *dev = crtc->base.dev;
  3016. struct drm_i915_private *dev_priv = to_i915(dev);
  3017. struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
  3018. struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
  3019. struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
  3020. enum pipe pipe = crtc->pipe;
  3021. hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
  3022. memset(active, 0, sizeof(*active));
  3023. active->pipe_enabled = crtc->active;
  3024. if (active->pipe_enabled) {
  3025. u32 tmp = hw->wm_pipe[pipe];
  3026. /*
  3027. * For active pipes LP0 watermark is marked as
  3028. * enabled, and LP1+ watermaks as disabled since
  3029. * we can't really reverse compute them in case
  3030. * multiple pipes are active.
  3031. */
  3032. active->wm[0].enable = true;
  3033. active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
  3034. active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
  3035. active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
  3036. } else {
  3037. int level, max_level = ilk_wm_max_level(dev_priv);
  3038. /*
  3039. * For inactive pipes, all watermark levels
  3040. * should be marked as enabled but zeroed,
  3041. * which is what we'd compute them to.
  3042. */
  3043. for (level = 0; level <= max_level; level++)
  3044. active->wm[level].enable = true;
  3045. }
  3046. crtc->wm.active.ilk = *active;
  3047. }
  3048. #define _FW_WM(value, plane) \
  3049. (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
  3050. #define _FW_WM_VLV(value, plane) \
  3051. (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
  3052. static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
  3053. struct g4x_wm_values *wm)
  3054. {
  3055. u32 tmp;
  3056. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
  3057. wm->sr.plane = _FW_WM(tmp, SR);
  3058. wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
  3059. wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
  3060. wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
  3061. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
  3062. wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
  3063. wm->sr.fbc = _FW_WM(tmp, FBC_SR);
  3064. wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
  3065. wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
  3066. wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
  3067. wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
  3068. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  3069. wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
  3070. wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
  3071. wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
  3072. wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
  3073. }
  3074. static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
  3075. struct vlv_wm_values *wm)
  3076. {
  3077. enum pipe pipe;
  3078. u32 tmp;
  3079. for_each_pipe(dev_priv, pipe) {
  3080. tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
  3081. wm->ddl[pipe].plane[PLANE_PRIMARY] =
  3082. (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
  3083. wm->ddl[pipe].plane[PLANE_CURSOR] =
  3084. (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
  3085. wm->ddl[pipe].plane[PLANE_SPRITE0] =
  3086. (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
  3087. wm->ddl[pipe].plane[PLANE_SPRITE1] =
  3088. (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
  3089. }
  3090. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1);
  3091. wm->sr.plane = _FW_WM(tmp, SR);
  3092. wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
  3093. wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
  3094. wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
  3095. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2);
  3096. wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
  3097. wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
  3098. wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
  3099. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3);
  3100. wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
  3101. if (IS_CHERRYVIEW(dev_priv)) {
  3102. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
  3103. wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
  3104. wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
  3105. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
  3106. wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
  3107. wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
  3108. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
  3109. wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
  3110. wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
  3111. tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
  3112. wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
  3113. wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
  3114. wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
  3115. wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
  3116. wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
  3117. wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
  3118. wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
  3119. wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
  3120. wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
  3121. wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
  3122. } else {
  3123. tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
  3124. wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
  3125. wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
  3126. tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
  3127. wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
  3128. wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
  3129. wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
  3130. wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
  3131. wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
  3132. wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
  3133. wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
  3134. }
  3135. }
  3136. #undef _FW_WM
  3137. #undef _FW_WM_VLV
  3138. void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
  3139. {
  3140. struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
  3141. struct intel_crtc *crtc;
  3142. g4x_read_wm_values(dev_priv, wm);
  3143. wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
  3144. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3145. struct intel_crtc_state *crtc_state =
  3146. to_intel_crtc_state(crtc->base.state);
  3147. struct g4x_wm_state *active = &crtc->wm.active.g4x;
  3148. struct g4x_pipe_wm *raw;
  3149. enum pipe pipe = crtc->pipe;
  3150. enum plane_id plane_id;
  3151. int level, max_level;
  3152. active->cxsr = wm->cxsr;
  3153. active->hpll_en = wm->hpll_en;
  3154. active->fbc_en = wm->fbc_en;
  3155. active->sr = wm->sr;
  3156. active->hpll = wm->hpll;
  3157. for_each_plane_id_on_crtc(crtc, plane_id) {
  3158. active->wm.plane[plane_id] =
  3159. wm->pipe[pipe].plane[plane_id];
  3160. }
  3161. if (wm->cxsr && wm->hpll_en)
  3162. max_level = G4X_WM_LEVEL_HPLL;
  3163. else if (wm->cxsr)
  3164. max_level = G4X_WM_LEVEL_SR;
  3165. else
  3166. max_level = G4X_WM_LEVEL_NORMAL;
  3167. level = G4X_WM_LEVEL_NORMAL;
  3168. raw = &crtc_state->wm.g4x.raw[level];
  3169. for_each_plane_id_on_crtc(crtc, plane_id)
  3170. raw->plane[plane_id] = active->wm.plane[plane_id];
  3171. level = G4X_WM_LEVEL_SR;
  3172. if (level > max_level)
  3173. goto out;
  3174. raw = &crtc_state->wm.g4x.raw[level];
  3175. raw->plane[PLANE_PRIMARY] = active->sr.plane;
  3176. raw->plane[PLANE_CURSOR] = active->sr.cursor;
  3177. raw->plane[PLANE_SPRITE0] = 0;
  3178. raw->fbc = active->sr.fbc;
  3179. level = G4X_WM_LEVEL_HPLL;
  3180. if (level > max_level)
  3181. goto out;
  3182. raw = &crtc_state->wm.g4x.raw[level];
  3183. raw->plane[PLANE_PRIMARY] = active->hpll.plane;
  3184. raw->plane[PLANE_CURSOR] = active->hpll.cursor;
  3185. raw->plane[PLANE_SPRITE0] = 0;
  3186. raw->fbc = active->hpll.fbc;
  3187. level++;
  3188. out:
  3189. for_each_plane_id_on_crtc(crtc, plane_id)
  3190. g4x_raw_plane_wm_set(crtc_state, level,
  3191. plane_id, USHRT_MAX);
  3192. g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
  3193. crtc_state->wm.g4x.optimal = *active;
  3194. crtc_state->wm.g4x.intermediate = *active;
  3195. drm_dbg_kms(&dev_priv->drm,
  3196. "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
  3197. pipe_name(pipe),
  3198. wm->pipe[pipe].plane[PLANE_PRIMARY],
  3199. wm->pipe[pipe].plane[PLANE_CURSOR],
  3200. wm->pipe[pipe].plane[PLANE_SPRITE0]);
  3201. }
  3202. drm_dbg_kms(&dev_priv->drm,
  3203. "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
  3204. wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
  3205. drm_dbg_kms(&dev_priv->drm,
  3206. "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
  3207. wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
  3208. drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
  3209. str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
  3210. str_yes_no(wm->fbc_en));
  3211. }
  3212. void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
  3213. {
  3214. struct intel_plane *plane;
  3215. struct intel_crtc *crtc;
  3216. mutex_lock(&dev_priv->display.wm.wm_mutex);
  3217. for_each_intel_plane(&dev_priv->drm, plane) {
  3218. struct intel_crtc *crtc =
  3219. intel_crtc_for_pipe(dev_priv, plane->pipe);
  3220. struct intel_crtc_state *crtc_state =
  3221. to_intel_crtc_state(crtc->base.state);
  3222. struct intel_plane_state *plane_state =
  3223. to_intel_plane_state(plane->base.state);
  3224. struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
  3225. enum plane_id plane_id = plane->id;
  3226. int level;
  3227. if (plane_state->uapi.visible)
  3228. continue;
  3229. for (level = 0; level < 3; level++) {
  3230. struct g4x_pipe_wm *raw =
  3231. &crtc_state->wm.g4x.raw[level];
  3232. raw->plane[plane_id] = 0;
  3233. wm_state->wm.plane[plane_id] = 0;
  3234. }
  3235. if (plane_id == PLANE_PRIMARY) {
  3236. for (level = 0; level < 3; level++) {
  3237. struct g4x_pipe_wm *raw =
  3238. &crtc_state->wm.g4x.raw[level];
  3239. raw->fbc = 0;
  3240. }
  3241. wm_state->sr.fbc = 0;
  3242. wm_state->hpll.fbc = 0;
  3243. wm_state->fbc_en = false;
  3244. }
  3245. }
  3246. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3247. struct intel_crtc_state *crtc_state =
  3248. to_intel_crtc_state(crtc->base.state);
  3249. crtc_state->wm.g4x.intermediate =
  3250. crtc_state->wm.g4x.optimal;
  3251. crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
  3252. }
  3253. g4x_program_watermarks(dev_priv);
  3254. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  3255. }
  3256. void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
  3257. {
  3258. struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
  3259. struct intel_crtc *crtc;
  3260. u32 val;
  3261. vlv_read_wm_values(dev_priv, wm);
  3262. wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
  3263. wm->level = VLV_WM_LEVEL_PM2;
  3264. if (IS_CHERRYVIEW(dev_priv)) {
  3265. vlv_punit_get(dev_priv);
  3266. val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
  3267. if (val & DSP_MAXFIFO_PM5_ENABLE)
  3268. wm->level = VLV_WM_LEVEL_PM5;
  3269. /*
  3270. * If DDR DVFS is disabled in the BIOS, Punit
  3271. * will never ack the request. So if that happens
  3272. * assume we don't have to enable/disable DDR DVFS
  3273. * dynamically. To test that just set the REQ_ACK
  3274. * bit to poke the Punit, but don't change the
  3275. * HIGH/LOW bits so that we don't actually change
  3276. * the current state.
  3277. */
  3278. val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
  3279. val |= FORCE_DDR_FREQ_REQ_ACK;
  3280. vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
  3281. if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
  3282. FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
  3283. drm_dbg_kms(&dev_priv->drm,
  3284. "Punit not acking DDR DVFS request, "
  3285. "assuming DDR DVFS is disabled\n");
  3286. dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5;
  3287. } else {
  3288. val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
  3289. if ((val & FORCE_DDR_HIGH_FREQ) == 0)
  3290. wm->level = VLV_WM_LEVEL_DDR_DVFS;
  3291. }
  3292. vlv_punit_put(dev_priv);
  3293. }
  3294. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3295. struct intel_crtc_state *crtc_state =
  3296. to_intel_crtc_state(crtc->base.state);
  3297. struct vlv_wm_state *active = &crtc->wm.active.vlv;
  3298. const struct vlv_fifo_state *fifo_state =
  3299. &crtc_state->wm.vlv.fifo_state;
  3300. enum pipe pipe = crtc->pipe;
  3301. enum plane_id plane_id;
  3302. int level;
  3303. vlv_get_fifo_size(crtc_state);
  3304. active->num_levels = wm->level + 1;
  3305. active->cxsr = wm->cxsr;
  3306. for (level = 0; level < active->num_levels; level++) {
  3307. struct g4x_pipe_wm *raw =
  3308. &crtc_state->wm.vlv.raw[level];
  3309. active->sr[level].plane = wm->sr.plane;
  3310. active->sr[level].cursor = wm->sr.cursor;
  3311. for_each_plane_id_on_crtc(crtc, plane_id) {
  3312. active->wm[level].plane[plane_id] =
  3313. wm->pipe[pipe].plane[plane_id];
  3314. raw->plane[plane_id] =
  3315. vlv_invert_wm_value(active->wm[level].plane[plane_id],
  3316. fifo_state->plane[plane_id]);
  3317. }
  3318. }
  3319. for_each_plane_id_on_crtc(crtc, plane_id)
  3320. vlv_raw_plane_wm_set(crtc_state, level,
  3321. plane_id, USHRT_MAX);
  3322. vlv_invalidate_wms(crtc, active, level);
  3323. crtc_state->wm.vlv.optimal = *active;
  3324. crtc_state->wm.vlv.intermediate = *active;
  3325. drm_dbg_kms(&dev_priv->drm,
  3326. "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
  3327. pipe_name(pipe),
  3328. wm->pipe[pipe].plane[PLANE_PRIMARY],
  3329. wm->pipe[pipe].plane[PLANE_CURSOR],
  3330. wm->pipe[pipe].plane[PLANE_SPRITE0],
  3331. wm->pipe[pipe].plane[PLANE_SPRITE1]);
  3332. }
  3333. drm_dbg_kms(&dev_priv->drm,
  3334. "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
  3335. wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
  3336. }
  3337. void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
  3338. {
  3339. struct intel_plane *plane;
  3340. struct intel_crtc *crtc;
  3341. mutex_lock(&dev_priv->display.wm.wm_mutex);
  3342. for_each_intel_plane(&dev_priv->drm, plane) {
  3343. struct intel_crtc *crtc =
  3344. intel_crtc_for_pipe(dev_priv, plane->pipe);
  3345. struct intel_crtc_state *crtc_state =
  3346. to_intel_crtc_state(crtc->base.state);
  3347. struct intel_plane_state *plane_state =
  3348. to_intel_plane_state(plane->base.state);
  3349. struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
  3350. const struct vlv_fifo_state *fifo_state =
  3351. &crtc_state->wm.vlv.fifo_state;
  3352. enum plane_id plane_id = plane->id;
  3353. int level;
  3354. if (plane_state->uapi.visible)
  3355. continue;
  3356. for (level = 0; level < wm_state->num_levels; level++) {
  3357. struct g4x_pipe_wm *raw =
  3358. &crtc_state->wm.vlv.raw[level];
  3359. raw->plane[plane_id] = 0;
  3360. wm_state->wm[level].plane[plane_id] =
  3361. vlv_invert_wm_value(raw->plane[plane_id],
  3362. fifo_state->plane[plane_id]);
  3363. }
  3364. }
  3365. for_each_intel_crtc(&dev_priv->drm, crtc) {
  3366. struct intel_crtc_state *crtc_state =
  3367. to_intel_crtc_state(crtc->base.state);
  3368. crtc_state->wm.vlv.intermediate =
  3369. crtc_state->wm.vlv.optimal;
  3370. crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
  3371. }
  3372. vlv_program_watermarks(dev_priv);
  3373. mutex_unlock(&dev_priv->display.wm.wm_mutex);
  3374. }
  3375. /*
  3376. * FIXME should probably kill this and improve
  3377. * the real watermark readout/sanitation instead
  3378. */
  3379. static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
  3380. {
  3381. intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE);
  3382. intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE);
  3383. intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE);
  3384. /*
  3385. * Don't touch WM_LP_SPRITE_ENABLE here.
  3386. * Doing so could cause underruns.
  3387. */
  3388. }
  3389. void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
  3390. {
  3391. struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
  3392. struct intel_crtc *crtc;
  3393. ilk_init_lp_watermarks(dev_priv);
  3394. for_each_intel_crtc(&dev_priv->drm, crtc)
  3395. ilk_pipe_wm_get_hw_state(crtc);
  3396. hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
  3397. hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
  3398. hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
  3399. hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
  3400. if (DISPLAY_VER(dev_priv) >= 7) {
  3401. hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
  3402. hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
  3403. }
  3404. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  3405. hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
  3406. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  3407. else if (IS_IVYBRIDGE(dev_priv))
  3408. hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
  3409. INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
  3410. hw->enable_fbc_wm =
  3411. !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
  3412. }
  3413. static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
  3414. {
  3415. /*
  3416. * On Ibex Peak and Cougar Point, we need to disable clock
  3417. * gating for the panel power sequencer or it will fail to
  3418. * start up when no ports are active.
  3419. */
  3420. intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
  3421. }
  3422. static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
  3423. {
  3424. enum pipe pipe;
  3425. for_each_pipe(dev_priv, pipe) {
  3426. intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe),
  3427. intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) |
  3428. DISP_TRICKLE_FEED_DISABLE);
  3429. intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe)));
  3430. intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe));
  3431. }
  3432. }
  3433. static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
  3434. {
  3435. u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  3436. /*
  3437. * Required for FBC
  3438. * WaFbcDisableDpfcClockGating:ilk
  3439. */
  3440. dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
  3441. ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
  3442. ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
  3443. intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0,
  3444. MARIUNIT_CLOCK_GATE_DISABLE |
  3445. SVSMUNIT_CLOCK_GATE_DISABLE);
  3446. intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1,
  3447. VFMUNIT_CLOCK_GATE_DISABLE);
  3448. /*
  3449. * According to the spec the following bits should be set in
  3450. * order to enable memory self-refresh
  3451. * The bit 22/21 of 0x42004
  3452. * The bit 5 of 0x42020
  3453. * The bit 15 of 0x45000
  3454. */
  3455. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
  3456. (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
  3457. ILK_DPARB_GATE | ILK_VSDPFD_FULL));
  3458. dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
  3459. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL,
  3460. (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  3461. DISP_FBC_WM_DIS));
  3462. /*
  3463. * Based on the document from hardware guys the following bits
  3464. * should be set unconditionally in order to enable FBC.
  3465. * The bit 22 of 0x42000
  3466. * The bit 22 of 0x42004
  3467. * The bit 7,8,9 of 0x42020.
  3468. */
  3469. if (IS_IRONLAKE_M(dev_priv)) {
  3470. /* WaFbcAsynchFlipDisableFbcQueue:ilk */
  3471. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
  3472. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
  3473. ILK_FBCQ_DIS);
  3474. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
  3475. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
  3476. ILK_DPARB_GATE);
  3477. }
  3478. intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
  3479. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
  3480. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
  3481. ILK_ELPIN_409_SELECT);
  3482. g4x_disable_trickle_feed(dev_priv);
  3483. ibx_init_clock_gating(dev_priv);
  3484. }
  3485. static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
  3486. {
  3487. enum pipe pipe;
  3488. u32 val;
  3489. /*
  3490. * On Ibex Peak and Cougar Point, we need to disable clock
  3491. * gating for the panel power sequencer or it will fail to
  3492. * start up when no ports are active.
  3493. */
  3494. intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
  3495. PCH_DPLUNIT_CLOCK_GATE_DISABLE |
  3496. PCH_CPUNIT_CLOCK_GATE_DISABLE);
  3497. intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) |
  3498. DPLS_EDP_PPS_FIX_DIS);
  3499. /* The below fixes the weird display corruption, a few pixels shifted
  3500. * downward, on (only) LVDS of some HP laptops with IVY.
  3501. */
  3502. for_each_pipe(dev_priv, pipe) {
  3503. val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe));
  3504. val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
  3505. val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  3506. if (dev_priv->display.vbt.fdi_rx_polarity_inverted)
  3507. val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
  3508. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
  3509. val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
  3510. intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val);
  3511. }
  3512. /* WADP0ClockGatingDisable */
  3513. for_each_pipe(dev_priv, pipe) {
  3514. intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe),
  3515. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  3516. }
  3517. }
  3518. static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
  3519. {
  3520. u32 tmp;
  3521. tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD);
  3522. if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12)
  3523. drm_dbg_kms(&dev_priv->drm,
  3524. "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
  3525. tmp);
  3526. }
  3527. static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
  3528. {
  3529. u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
  3530. intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate);
  3531. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
  3532. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
  3533. ILK_ELPIN_409_SELECT);
  3534. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
  3535. intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
  3536. GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
  3537. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  3538. /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
  3539. * gating disable must be set. Failure to set it results in
  3540. * flickering pixels due to Z write ordering failures after
  3541. * some amount of runtime in the Mesa "fire" demo, and Unigine
  3542. * Sanctuary and Tropics, and apparently anything else with
  3543. * alpha test or pixel discard.
  3544. *
  3545. * According to the spec, bit 11 (RCCUNIT) must also be set,
  3546. * but we didn't debug actual testcases to find it out.
  3547. *
  3548. * WaDisableRCCUnitClockGating:snb
  3549. * WaDisableRCPBUnitClockGating:snb
  3550. */
  3551. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
  3552. GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
  3553. GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
  3554. /*
  3555. * According to the spec the following bits should be
  3556. * set in order to enable memory self-refresh and fbc:
  3557. * The bit21 and bit22 of 0x42000
  3558. * The bit21 and bit22 of 0x42004
  3559. * The bit5 and bit7 of 0x42020
  3560. * The bit14 of 0x70180
  3561. * The bit14 of 0x71180
  3562. *
  3563. * WaFbcAsynchFlipDisableFbcQueue:snb
  3564. */
  3565. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
  3566. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
  3567. ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
  3568. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2,
  3569. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) |
  3570. ILK_DPARB_GATE | ILK_VSDPFD_FULL);
  3571. intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D,
  3572. intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) |
  3573. ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
  3574. ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
  3575. g4x_disable_trickle_feed(dev_priv);
  3576. cpt_init_clock_gating(dev_priv);
  3577. gen6_check_mch_setup(dev_priv);
  3578. }
  3579. static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
  3580. {
  3581. /*
  3582. * TODO: this bit should only be enabled when really needed, then
  3583. * disabled when not needed anymore in order to save power.
  3584. */
  3585. if (HAS_PCH_LPT_LP(dev_priv))
  3586. intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D,
  3587. intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
  3588. PCH_LP_PARTITION_LEVEL_DISABLE);
  3589. /* WADPOClockGatingDisable:hsw */
  3590. intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A),
  3591. intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) |
  3592. TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
  3593. }
  3594. static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
  3595. {
  3596. if (HAS_PCH_LPT_LP(dev_priv)) {
  3597. u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D);
  3598. val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
  3599. intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val);
  3600. }
  3601. }
  3602. static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
  3603. int general_prio_credits,
  3604. int high_prio_credits)
  3605. {
  3606. u32 misccpctl;
  3607. u32 val;
  3608. /* WaTempDisableDOPClkGating:bdw */
  3609. misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
  3610. intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
  3611. val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1);
  3612. val &= ~L3_PRIO_CREDITS_MASK;
  3613. val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
  3614. val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
  3615. intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val);
  3616. /*
  3617. * Wait at least 100 clocks before re-enabling clock gating.
  3618. * See the definition of L3SQCREG1 in BSpec.
  3619. */
  3620. intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1);
  3621. udelay(1);
  3622. intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
  3623. }
  3624. static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
  3625. {
  3626. /* Wa_1409120013:icl,ehl */
  3627. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3628. DPFC_CHICKEN_COMP_DUMMY_PIXEL);
  3629. /*Wa_14010594013:icl, ehl */
  3630. intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
  3631. 0, ICL_DELAY_PMRSP);
  3632. }
  3633. static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
  3634. {
  3635. /* Wa_1409120013 */
  3636. if (DISPLAY_VER(dev_priv) == 12)
  3637. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3638. DPFC_CHICKEN_COMP_DUMMY_PIXEL);
  3639. /* Wa_1409825376:tgl (pre-prod)*/
  3640. if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
  3641. intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
  3642. TGL_VRH_GATING_DIS);
  3643. /* Wa_14013723622:tgl,rkl,dg1,adl-s */
  3644. if (DISPLAY_VER(dev_priv) == 12)
  3645. intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
  3646. CLKREQ_POLICY_MEM_UP_OVRD, 0);
  3647. }
  3648. static void adlp_init_clock_gating(struct drm_i915_private *dev_priv)
  3649. {
  3650. gen12lp_init_clock_gating(dev_priv);
  3651. /* Wa_22011091694:adlp */
  3652. intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS);
  3653. /* Bspec/49189 Initialize Sequence */
  3654. intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
  3655. }
  3656. static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
  3657. {
  3658. gen12lp_init_clock_gating(dev_priv);
  3659. /* Wa_1409836686:dg1[a0] */
  3660. if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
  3661. intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
  3662. DPT_GATING_DIS);
  3663. }
  3664. static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv)
  3665. {
  3666. /* Wa_22010146351:xehpsdv */
  3667. if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0))
  3668. intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
  3669. }
  3670. static void dg2_init_clock_gating(struct drm_i915_private *i915)
  3671. {
  3672. /* Wa_22010954014:dg2 */
  3673. intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
  3674. SGSI_SIDECLK_DIS);
  3675. /*
  3676. * Wa_14010733611:dg2_g10
  3677. * Wa_22010146351:dg2_g10
  3678. */
  3679. if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0))
  3680. intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0,
  3681. SGR_DIS | SGGI_DIS);
  3682. }
  3683. static void pvc_init_clock_gating(struct drm_i915_private *dev_priv)
  3684. {
  3685. /* Wa_14012385139:pvc */
  3686. if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
  3687. intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS);
  3688. /* Wa_22010954014:pvc */
  3689. if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0))
  3690. intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
  3691. }
  3692. static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
  3693. {
  3694. if (!HAS_PCH_CNP(dev_priv))
  3695. return;
  3696. /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
  3697. intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) |
  3698. CNP_PWM_CGE_GATING_DISABLE);
  3699. }
  3700. static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
  3701. {
  3702. cnp_init_clock_gating(dev_priv);
  3703. gen9_init_clock_gating(dev_priv);
  3704. /* WAC6entrylatency:cfl */
  3705. intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
  3706. FBC_LLC_FULLY_OPEN);
  3707. /*
  3708. * WaFbcTurnOffFbcWatermark:cfl
  3709. * Display WA #0562: cfl
  3710. */
  3711. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  3712. DISP_FBC_WM_DIS);
  3713. /*
  3714. * WaFbcNukeOnHostModify:cfl
  3715. * Display WA #0873: cfl
  3716. */
  3717. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3718. intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
  3719. DPFC_NUKE_ON_ANY_MODIFICATION);
  3720. }
  3721. static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
  3722. {
  3723. gen9_init_clock_gating(dev_priv);
  3724. /* WAC6entrylatency:kbl */
  3725. intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
  3726. FBC_LLC_FULLY_OPEN);
  3727. /* WaDisableSDEUnitClockGating:kbl */
  3728. if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
  3729. intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
  3730. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  3731. /* WaDisableGamClockGating:kbl */
  3732. if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0))
  3733. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
  3734. GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
  3735. /*
  3736. * WaFbcTurnOffFbcWatermark:kbl
  3737. * Display WA #0562: kbl
  3738. */
  3739. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  3740. DISP_FBC_WM_DIS);
  3741. /*
  3742. * WaFbcNukeOnHostModify:kbl
  3743. * Display WA #0873: kbl
  3744. */
  3745. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3746. intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
  3747. DPFC_NUKE_ON_ANY_MODIFICATION);
  3748. }
  3749. static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
  3750. {
  3751. gen9_init_clock_gating(dev_priv);
  3752. /* WaDisableDopClockGating:skl */
  3753. intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) &
  3754. ~GEN7_DOP_CLOCK_GATE_ENABLE);
  3755. /* WAC6entrylatency:skl */
  3756. intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) |
  3757. FBC_LLC_FULLY_OPEN);
  3758. /*
  3759. * WaFbcTurnOffFbcWatermark:skl
  3760. * Display WA #0562: skl
  3761. */
  3762. intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
  3763. DISP_FBC_WM_DIS);
  3764. /*
  3765. * WaFbcNukeOnHostModify:skl
  3766. * Display WA #0873: skl
  3767. */
  3768. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3769. intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
  3770. DPFC_NUKE_ON_ANY_MODIFICATION);
  3771. /*
  3772. * WaFbcHighMemBwCorruptionAvoidance:skl
  3773. * Display WA #0883: skl
  3774. */
  3775. intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A),
  3776. intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) |
  3777. DPFC_DISABLE_DUMMY0);
  3778. }
  3779. static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
  3780. {
  3781. enum pipe pipe;
  3782. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  3783. intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
  3784. intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
  3785. HSW_FBCQ_DIS);
  3786. /* WaSwitchSolVfFArbitrationPriority:bdw */
  3787. intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  3788. /* WaPsrDPAMaskVBlankInSRD:bdw */
  3789. intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1,
  3790. intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
  3791. for_each_pipe(dev_priv, pipe) {
  3792. /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
  3793. intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
  3794. intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
  3795. BDW_DPRS_MASK_VBLANK_SRD);
  3796. }
  3797. /* WaVSRefCountFullforceMissDisable:bdw */
  3798. /* WaDSRefCountFullforceMissDisable:bdw */
  3799. intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
  3800. intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
  3801. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  3802. intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
  3803. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  3804. /* WaDisableSDEUnitClockGating:bdw */
  3805. intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
  3806. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  3807. /* WaProgramL3SqcReg1Default:bdw */
  3808. gen8_set_l3sqc_credits(dev_priv, 30, 2);
  3809. /* WaKVMNotificationOnConfigChange:bdw */
  3810. intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1)
  3811. | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
  3812. lpt_init_clock_gating(dev_priv);
  3813. /* WaDisableDopClockGating:bdw
  3814. *
  3815. * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
  3816. * clock gating.
  3817. */
  3818. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1,
  3819. intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
  3820. }
  3821. static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
  3822. {
  3823. /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
  3824. intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
  3825. intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
  3826. HSW_FBCQ_DIS);
  3827. /* This is required by WaCatErrorRejectionIssue:hsw */
  3828. intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  3829. intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  3830. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  3831. /* WaSwitchSolVfFArbitrationPriority:hsw */
  3832. intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
  3833. lpt_init_clock_gating(dev_priv);
  3834. }
  3835. static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
  3836. {
  3837. u32 snpcr;
  3838. intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
  3839. /* WaFbcAsynchFlipDisableFbcQueue:ivb */
  3840. intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1,
  3841. intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) |
  3842. ILK_FBCQ_DIS);
  3843. /* WaDisableBackToBackFlipFix:ivb */
  3844. intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
  3845. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  3846. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  3847. if (IS_IVB_GT1(dev_priv))
  3848. intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
  3849. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  3850. else {
  3851. /* must write both registers */
  3852. intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
  3853. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  3854. intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2,
  3855. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  3856. }
  3857. /*
  3858. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  3859. * This implements the WaDisableRCZUnitClockGating:ivb workaround.
  3860. */
  3861. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
  3862. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  3863. /* This is required by WaCatErrorRejectionIssue:ivb */
  3864. intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  3865. intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  3866. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  3867. g4x_disable_trickle_feed(dev_priv);
  3868. snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR);
  3869. snpcr &= ~GEN6_MBC_SNPCR_MASK;
  3870. snpcr |= GEN6_MBC_SNPCR_MED;
  3871. intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr);
  3872. if (!HAS_PCH_NOP(dev_priv))
  3873. cpt_init_clock_gating(dev_priv);
  3874. gen6_check_mch_setup(dev_priv);
  3875. }
  3876. static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
  3877. {
  3878. /* WaDisableBackToBackFlipFix:vlv */
  3879. intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3,
  3880. CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
  3881. CHICKEN3_DGMG_DONE_FIX_DISABLE);
  3882. /* WaDisableDopClockGating:vlv */
  3883. intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2,
  3884. _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
  3885. /* This is required by WaCatErrorRejectionIssue:vlv */
  3886. intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
  3887. intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
  3888. GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
  3889. /*
  3890. * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
  3891. * This implements the WaDisableRCZUnitClockGating:vlv workaround.
  3892. */
  3893. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2,
  3894. GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
  3895. /* WaDisableL3Bank2xClockGate:vlv
  3896. * Disabling L3 clock gating- MMIO 940c[25] = 1
  3897. * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
  3898. intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4,
  3899. intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
  3900. /*
  3901. * WaDisableVLVClockGating_VBIIssue:vlv
  3902. * Disable clock gating on th GCFG unit to prevent a delay
  3903. * in the reporting of vblank events.
  3904. */
  3905. intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
  3906. }
  3907. static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
  3908. {
  3909. /* WaVSRefCountFullforceMissDisable:chv */
  3910. /* WaDSRefCountFullforceMissDisable:chv */
  3911. intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE,
  3912. intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) &
  3913. ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
  3914. /* WaDisableSemaphoreAndSyncFlipWait:chv */
  3915. intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
  3916. _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
  3917. /* WaDisableCSUnitClockGating:chv */
  3918. intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
  3919. GEN6_CSUNIT_CLOCK_GATE_DISABLE);
  3920. /* WaDisableSDEUnitClockGating:chv */
  3921. intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
  3922. GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
  3923. /*
  3924. * WaProgramL3SqcReg1Default:chv
  3925. * See gfxspecs/Related Documents/Performance Guide/
  3926. * LSQC Setting Recommendations.
  3927. */
  3928. gen8_set_l3sqc_credits(dev_priv, 38, 2);
  3929. }
  3930. static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
  3931. {
  3932. u32 dspclk_gate;
  3933. intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0);
  3934. intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
  3935. GS_UNIT_CLOCK_GATE_DISABLE |
  3936. CL_UNIT_CLOCK_GATE_DISABLE);
  3937. intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0);
  3938. dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
  3939. OVRUNIT_CLOCK_GATE_DISABLE |
  3940. OVCUNIT_CLOCK_GATE_DISABLE;
  3941. if (IS_GM45(dev_priv))
  3942. dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
  3943. intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate);
  3944. g4x_disable_trickle_feed(dev_priv);
  3945. }
  3946. static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
  3947. {
  3948. struct intel_uncore *uncore = &dev_priv->uncore;
  3949. intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
  3950. intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
  3951. intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0);
  3952. intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
  3953. intel_uncore_write16(uncore, DEUC, 0);
  3954. intel_uncore_write(uncore,
  3955. MI_ARB_STATE,
  3956. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  3957. }
  3958. static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
  3959. {
  3960. intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
  3961. I965_RCC_CLOCK_GATE_DISABLE |
  3962. I965_RCPB_CLOCK_GATE_DISABLE |
  3963. I965_ISC_CLOCK_GATE_DISABLE |
  3964. I965_FBC_CLOCK_GATE_DISABLE);
  3965. intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0);
  3966. intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
  3967. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  3968. }
  3969. static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
  3970. {
  3971. u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE);
  3972. dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
  3973. DSTATE_DOT_CLOCK_GATING;
  3974. intel_uncore_write(&dev_priv->uncore, D_STATE, dstate);
  3975. if (IS_PINEVIEW(dev_priv))
  3976. intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
  3977. _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
  3978. /* IIR "flip pending" means done if this bit is set */
  3979. intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE),
  3980. _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
  3981. /* interrupts should cause a wake up from C3 */
  3982. intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
  3983. /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  3984. intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  3985. intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE,
  3986. _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
  3987. }
  3988. static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
  3989. {
  3990. intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
  3991. /* interrupts should cause a wake up from C3 */
  3992. intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
  3993. _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
  3994. intel_uncore_write(&dev_priv->uncore, MEM_MODE,
  3995. _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
  3996. /*
  3997. * Have FBC ignore 3D activity since we use software
  3998. * render tracking, and otherwise a pure 3D workload
  3999. * (even if it just renders a single frame and then does
  4000. * abosultely nothing) would not allow FBC to recompress
  4001. * until a 2D blit occurs.
  4002. */
  4003. intel_uncore_write(&dev_priv->uncore, SCPD0,
  4004. _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
  4005. }
  4006. static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
  4007. {
  4008. intel_uncore_write(&dev_priv->uncore, MEM_MODE,
  4009. _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
  4010. _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
  4011. }
  4012. void intel_init_clock_gating(struct drm_i915_private *dev_priv)
  4013. {
  4014. dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
  4015. }
  4016. void intel_suspend_hw(struct drm_i915_private *dev_priv)
  4017. {
  4018. if (HAS_PCH_LPT(dev_priv))
  4019. lpt_suspend_hw(dev_priv);
  4020. }
  4021. static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
  4022. {
  4023. drm_dbg_kms(&dev_priv->drm,
  4024. "No clock gating settings or workarounds applied.\n");
  4025. }
  4026. #define CG_FUNCS(platform) \
  4027. static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
  4028. .init_clock_gating = platform##_init_clock_gating, \
  4029. }
  4030. CG_FUNCS(pvc);
  4031. CG_FUNCS(dg2);
  4032. CG_FUNCS(xehpsdv);
  4033. CG_FUNCS(adlp);
  4034. CG_FUNCS(dg1);
  4035. CG_FUNCS(gen12lp);
  4036. CG_FUNCS(icl);
  4037. CG_FUNCS(cfl);
  4038. CG_FUNCS(skl);
  4039. CG_FUNCS(kbl);
  4040. CG_FUNCS(bxt);
  4041. CG_FUNCS(glk);
  4042. CG_FUNCS(bdw);
  4043. CG_FUNCS(chv);
  4044. CG_FUNCS(hsw);
  4045. CG_FUNCS(ivb);
  4046. CG_FUNCS(vlv);
  4047. CG_FUNCS(gen6);
  4048. CG_FUNCS(ilk);
  4049. CG_FUNCS(g4x);
  4050. CG_FUNCS(i965gm);
  4051. CG_FUNCS(i965g);
  4052. CG_FUNCS(gen3);
  4053. CG_FUNCS(i85x);
  4054. CG_FUNCS(i830);
  4055. CG_FUNCS(nop);
  4056. #undef CG_FUNCS
  4057. /**
  4058. * intel_init_clock_gating_hooks - setup the clock gating hooks
  4059. * @dev_priv: device private
  4060. *
  4061. * Setup the hooks that configure which clocks of a given platform can be
  4062. * gated and also apply various GT and display specific workarounds for these
  4063. * platforms. Note that some GT specific workarounds are applied separately
  4064. * when GPU contexts or batchbuffers start their execution.
  4065. */
  4066. void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
  4067. {
  4068. if (IS_PONTEVECCHIO(dev_priv))
  4069. dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs;
  4070. else if (IS_DG2(dev_priv))
  4071. dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs;
  4072. else if (IS_XEHPSDV(dev_priv))
  4073. dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs;
  4074. else if (IS_ALDERLAKE_P(dev_priv))
  4075. dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
  4076. else if (IS_DG1(dev_priv))
  4077. dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
  4078. else if (GRAPHICS_VER(dev_priv) == 12)
  4079. dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
  4080. else if (GRAPHICS_VER(dev_priv) == 11)
  4081. dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
  4082. else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
  4083. dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
  4084. else if (IS_SKYLAKE(dev_priv))
  4085. dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
  4086. else if (IS_KABYLAKE(dev_priv))
  4087. dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
  4088. else if (IS_BROXTON(dev_priv))
  4089. dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
  4090. else if (IS_GEMINILAKE(dev_priv))
  4091. dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
  4092. else if (IS_BROADWELL(dev_priv))
  4093. dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
  4094. else if (IS_CHERRYVIEW(dev_priv))
  4095. dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
  4096. else if (IS_HASWELL(dev_priv))
  4097. dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
  4098. else if (IS_IVYBRIDGE(dev_priv))
  4099. dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
  4100. else if (IS_VALLEYVIEW(dev_priv))
  4101. dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
  4102. else if (GRAPHICS_VER(dev_priv) == 6)
  4103. dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
  4104. else if (GRAPHICS_VER(dev_priv) == 5)
  4105. dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
  4106. else if (IS_G4X(dev_priv))
  4107. dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
  4108. else if (IS_I965GM(dev_priv))
  4109. dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
  4110. else if (IS_I965G(dev_priv))
  4111. dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
  4112. else if (GRAPHICS_VER(dev_priv) == 3)
  4113. dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
  4114. else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
  4115. dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
  4116. else if (GRAPHICS_VER(dev_priv) == 2)
  4117. dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
  4118. else {
  4119. MISSING_CASE(INTEL_DEVID(dev_priv));
  4120. dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
  4121. }
  4122. }
  4123. static const struct intel_wm_funcs ilk_wm_funcs = {
  4124. .compute_pipe_wm = ilk_compute_pipe_wm,
  4125. .compute_intermediate_wm = ilk_compute_intermediate_wm,
  4126. .initial_watermarks = ilk_initial_watermarks,
  4127. .optimize_watermarks = ilk_optimize_watermarks,
  4128. };
  4129. static const struct intel_wm_funcs vlv_wm_funcs = {
  4130. .compute_pipe_wm = vlv_compute_pipe_wm,
  4131. .compute_intermediate_wm = vlv_compute_intermediate_wm,
  4132. .initial_watermarks = vlv_initial_watermarks,
  4133. .optimize_watermarks = vlv_optimize_watermarks,
  4134. .atomic_update_watermarks = vlv_atomic_update_fifo,
  4135. };
  4136. static const struct intel_wm_funcs g4x_wm_funcs = {
  4137. .compute_pipe_wm = g4x_compute_pipe_wm,
  4138. .compute_intermediate_wm = g4x_compute_intermediate_wm,
  4139. .initial_watermarks = g4x_initial_watermarks,
  4140. .optimize_watermarks = g4x_optimize_watermarks,
  4141. };
  4142. static const struct intel_wm_funcs pnv_wm_funcs = {
  4143. .update_wm = pnv_update_wm,
  4144. };
  4145. static const struct intel_wm_funcs i965_wm_funcs = {
  4146. .update_wm = i965_update_wm,
  4147. };
  4148. static const struct intel_wm_funcs i9xx_wm_funcs = {
  4149. .update_wm = i9xx_update_wm,
  4150. };
  4151. static const struct intel_wm_funcs i845_wm_funcs = {
  4152. .update_wm = i845_update_wm,
  4153. };
  4154. static const struct intel_wm_funcs nop_funcs = {
  4155. };
  4156. /* Set up chip specific power management-related functions */
  4157. void intel_init_pm(struct drm_i915_private *dev_priv)
  4158. {
  4159. if (DISPLAY_VER(dev_priv) >= 9) {
  4160. skl_wm_init(dev_priv);
  4161. return;
  4162. }
  4163. /* For cxsr */
  4164. if (IS_PINEVIEW(dev_priv))
  4165. pnv_get_mem_freq(dev_priv);
  4166. else if (GRAPHICS_VER(dev_priv) == 5)
  4167. ilk_get_mem_freq(dev_priv);
  4168. /* For FIFO watermark updates */
  4169. if (HAS_PCH_SPLIT(dev_priv)) {
  4170. ilk_setup_wm_latency(dev_priv);
  4171. if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] &&
  4172. dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) ||
  4173. (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] &&
  4174. dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) {
  4175. dev_priv->display.funcs.wm = &ilk_wm_funcs;
  4176. } else {
  4177. drm_dbg_kms(&dev_priv->drm,
  4178. "Failed to read display plane latency. "
  4179. "Disable CxSR\n");
  4180. dev_priv->display.funcs.wm = &nop_funcs;
  4181. }
  4182. } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
  4183. vlv_setup_wm_latency(dev_priv);
  4184. dev_priv->display.funcs.wm = &vlv_wm_funcs;
  4185. } else if (IS_G4X(dev_priv)) {
  4186. g4x_setup_wm_latency(dev_priv);
  4187. dev_priv->display.funcs.wm = &g4x_wm_funcs;
  4188. } else if (IS_PINEVIEW(dev_priv)) {
  4189. if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
  4190. dev_priv->is_ddr3,
  4191. dev_priv->fsb_freq,
  4192. dev_priv->mem_freq)) {
  4193. drm_info(&dev_priv->drm,
  4194. "failed to find known CxSR latency "
  4195. "(found ddr%s fsb freq %d, mem freq %d), "
  4196. "disabling CxSR\n",
  4197. (dev_priv->is_ddr3 == 1) ? "3" : "2",
  4198. dev_priv->fsb_freq, dev_priv->mem_freq);
  4199. /* Disable CxSR and never update its watermark again */
  4200. intel_set_memory_cxsr(dev_priv, false);
  4201. dev_priv->display.funcs.wm = &nop_funcs;
  4202. } else
  4203. dev_priv->display.funcs.wm = &pnv_wm_funcs;
  4204. } else if (DISPLAY_VER(dev_priv) == 4) {
  4205. dev_priv->display.funcs.wm = &i965_wm_funcs;
  4206. } else if (DISPLAY_VER(dev_priv) == 3) {
  4207. dev_priv->display.funcs.wm = &i9xx_wm_funcs;
  4208. } else if (DISPLAY_VER(dev_priv) == 2) {
  4209. if (INTEL_NUM_PIPES(dev_priv) == 1)
  4210. dev_priv->display.funcs.wm = &i845_wm_funcs;
  4211. else
  4212. dev_priv->display.funcs.wm = &i9xx_wm_funcs;
  4213. } else {
  4214. drm_err(&dev_priv->drm,
  4215. "unexpected fall-through in %s\n", __func__);
  4216. dev_priv->display.funcs.wm = &nop_funcs;
  4217. }
  4218. }
  4219. void intel_pm_setup(struct drm_i915_private *dev_priv)
  4220. {
  4221. dev_priv->runtime_pm.suspended = false;
  4222. atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);
  4223. }