disp.c 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "disp.h"
  25. #include "atom.h"
  26. #include "core.h"
  27. #include "head.h"
  28. #include "wndw.h"
  29. #include "handles.h"
  30. #include <linux/dma-mapping.h>
  31. #include <linux/hdmi.h>
  32. #include <linux/component.h>
  33. #include <linux/iopoll.h>
  34. #include <drm/display/drm_dp_helper.h>
  35. #include <drm/display/drm_scdc_helper.h>
  36. #include <drm/drm_atomic.h>
  37. #include <drm/drm_atomic_helper.h>
  38. #include <drm/drm_edid.h>
  39. #include <drm/drm_fb_helper.h>
  40. #include <drm/drm_probe_helper.h>
  41. #include <drm/drm_vblank.h>
  42. #include <nvif/push507c.h>
  43. #include <nvif/class.h>
  44. #include <nvif/cl0002.h>
  45. #include <nvif/cl5070.h>
  46. #include <nvif/event.h>
  47. #include <nvif/if0014.h>
  48. #include <nvif/timer.h>
  49. #include <nvhw/class/cl507c.h>
  50. #include <nvhw/class/cl507d.h>
  51. #include <nvhw/class/cl837d.h>
  52. #include <nvhw/class/cl887d.h>
  53. #include <nvhw/class/cl907d.h>
  54. #include <nvhw/class/cl917d.h>
  55. #include "nouveau_drv.h"
  56. #include "nouveau_dma.h"
  57. #include "nouveau_gem.h"
  58. #include "nouveau_connector.h"
  59. #include "nouveau_encoder.h"
  60. #include "nouveau_fence.h"
  61. #include "nouveau_fbcon.h"
  62. #include <subdev/bios/dp.h>
  63. /******************************************************************************
  64. * EVO channel
  65. *****************************************************************************/
  66. static int
  67. nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
  68. const s32 *oclass, u8 head, void *data, u32 size,
  69. struct nv50_chan *chan)
  70. {
  71. struct nvif_sclass *sclass;
  72. int ret, i, n;
  73. chan->device = device;
  74. ret = n = nvif_object_sclass_get(disp, &sclass);
  75. if (ret < 0)
  76. return ret;
  77. while (oclass[0]) {
  78. for (i = 0; i < n; i++) {
  79. if (sclass[i].oclass == oclass[0]) {
  80. ret = nvif_object_ctor(disp, "kmsChan", 0,
  81. oclass[0], data, size,
  82. &chan->user);
  83. if (ret == 0)
  84. nvif_object_map(&chan->user, NULL, 0);
  85. nvif_object_sclass_put(&sclass);
  86. return ret;
  87. }
  88. }
  89. oclass++;
  90. }
  91. nvif_object_sclass_put(&sclass);
  92. return -ENOSYS;
  93. }
  94. static void
  95. nv50_chan_destroy(struct nv50_chan *chan)
  96. {
  97. nvif_object_dtor(&chan->user);
  98. }
  99. /******************************************************************************
  100. * DMA EVO channel
  101. *****************************************************************************/
  102. void
  103. nv50_dmac_destroy(struct nv50_dmac *dmac)
  104. {
  105. nvif_object_dtor(&dmac->vram);
  106. nvif_object_dtor(&dmac->sync);
  107. nv50_chan_destroy(&dmac->base);
  108. nvif_mem_dtor(&dmac->_push.mem);
  109. }
  110. static void
  111. nv50_dmac_kick(struct nvif_push *push)
  112. {
  113. struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
  114. dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
  115. if (dmac->put != dmac->cur) {
  116. /* Push buffer fetches are not coherent with BAR1, we need to ensure
  117. * writes have been flushed right through to VRAM before writing PUT.
  118. */
  119. if (dmac->push->mem.type & NVIF_MEM_VRAM) {
  120. struct nvif_device *device = dmac->base.device;
  121. nvif_wr32(&device->object, 0x070000, 0x00000001);
  122. nvif_msec(device, 2000,
  123. if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
  124. break;
  125. );
  126. }
  127. NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
  128. dmac->put = dmac->cur;
  129. }
  130. push->bgn = push->cur;
  131. }
  132. static int
  133. nv50_dmac_free(struct nv50_dmac *dmac)
  134. {
  135. u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
  136. if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
  137. return get - dmac->cur - 5;
  138. return dmac->max - dmac->cur;
  139. }
  140. static int
  141. nv50_dmac_wind(struct nv50_dmac *dmac)
  142. {
  143. /* Wait for GET to depart from the beginning of the push buffer to
  144. * prevent writing PUT == GET, which would be ignored by HW.
  145. */
  146. u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
  147. if (get == 0) {
  148. /* Corner-case, HW idle, but non-committed work pending. */
  149. if (dmac->put == 0)
  150. nv50_dmac_kick(dmac->push);
  151. if (nvif_msec(dmac->base.device, 2000,
  152. if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
  153. break;
  154. ) < 0)
  155. return -ETIMEDOUT;
  156. }
  157. PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
  158. dmac->cur = 0;
  159. return 0;
  160. }
  161. static int
  162. nv50_dmac_wait(struct nvif_push *push, u32 size)
  163. {
  164. struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
  165. int free;
  166. if (WARN_ON(size > dmac->max))
  167. return -EINVAL;
  168. dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
  169. if (dmac->cur + size >= dmac->max) {
  170. int ret = nv50_dmac_wind(dmac);
  171. if (ret)
  172. return ret;
  173. push->cur = dmac->_push.mem.object.map.ptr;
  174. push->cur = push->cur + dmac->cur;
  175. nv50_dmac_kick(push);
  176. }
  177. if (nvif_msec(dmac->base.device, 2000,
  178. if ((free = nv50_dmac_free(dmac)) >= size)
  179. break;
  180. ) < 0) {
  181. WARN_ON(1);
  182. return -ETIMEDOUT;
  183. }
  184. push->bgn = dmac->_push.mem.object.map.ptr;
  185. push->bgn = push->bgn + dmac->cur;
  186. push->cur = push->bgn;
  187. push->end = push->cur + free;
  188. return 0;
  189. }
  190. MODULE_PARM_DESC(kms_vram_pushbuf, "Place EVO/NVD push buffers in VRAM (default: auto)");
  191. static int nv50_dmac_vram_pushbuf = -1;
  192. module_param_named(kms_vram_pushbuf, nv50_dmac_vram_pushbuf, int, 0400);
  193. int
  194. nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
  195. const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
  196. struct nv50_dmac *dmac)
  197. {
  198. struct nouveau_cli *cli = (void *)device->object.client;
  199. struct nvif_disp_chan_v0 *args = data;
  200. u8 type = NVIF_MEM_COHERENT;
  201. int ret;
  202. mutex_init(&dmac->lock);
  203. /* Pascal added support for 47-bit physical addresses, but some
  204. * parts of EVO still only accept 40-bit PAs.
  205. *
  206. * To avoid issues on systems with large amounts of RAM, and on
  207. * systems where an IOMMU maps pages at a high address, we need
  208. * to allocate push buffers in VRAM instead.
  209. *
  210. * This appears to match NVIDIA's behaviour on Pascal.
  211. */
  212. if ((nv50_dmac_vram_pushbuf > 0) ||
  213. (nv50_dmac_vram_pushbuf < 0 && device->info.family == NV_DEVICE_INFO_V0_PASCAL))
  214. type |= NVIF_MEM_VRAM;
  215. ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
  216. &dmac->_push.mem);
  217. if (ret)
  218. return ret;
  219. dmac->ptr = dmac->_push.mem.object.map.ptr;
  220. dmac->_push.wait = nv50_dmac_wait;
  221. dmac->_push.kick = nv50_dmac_kick;
  222. dmac->push = &dmac->_push;
  223. dmac->push->bgn = dmac->_push.mem.object.map.ptr;
  224. dmac->push->cur = dmac->push->bgn;
  225. dmac->push->end = dmac->push->bgn;
  226. dmac->max = 0x1000/4 - 1;
  227. /* EVO channels are affected by a HW bug where the last 12 DWORDs
  228. * of the push buffer aren't able to be used safely.
  229. */
  230. if (disp->oclass < GV100_DISP)
  231. dmac->max -= 12;
  232. args->pushbuf = nvif_handle(&dmac->_push.mem.object);
  233. ret = nv50_chan_create(device, disp, oclass, head, data, size,
  234. &dmac->base);
  235. if (ret)
  236. return ret;
  237. if (syncbuf < 0)
  238. return 0;
  239. ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
  240. NV_DMA_IN_MEMORY,
  241. &(struct nv_dma_v0) {
  242. .target = NV_DMA_V0_TARGET_VRAM,
  243. .access = NV_DMA_V0_ACCESS_RDWR,
  244. .start = syncbuf + 0x0000,
  245. .limit = syncbuf + 0x0fff,
  246. }, sizeof(struct nv_dma_v0),
  247. &dmac->sync);
  248. if (ret)
  249. return ret;
  250. ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
  251. NV_DMA_IN_MEMORY,
  252. &(struct nv_dma_v0) {
  253. .target = NV_DMA_V0_TARGET_VRAM,
  254. .access = NV_DMA_V0_ACCESS_RDWR,
  255. .start = 0,
  256. .limit = device->info.ram_user - 1,
  257. }, sizeof(struct nv_dma_v0),
  258. &dmac->vram);
  259. if (ret)
  260. return ret;
  261. return ret;
  262. }
  263. /******************************************************************************
  264. * Output path helpers
  265. *****************************************************************************/
  266. static void
  267. nv50_outp_dump_caps(struct nouveau_drm *drm,
  268. struct nouveau_encoder *outp)
  269. {
  270. NV_DEBUG(drm, "%s caps: dp_interlace=%d\n",
  271. outp->base.base.name, outp->caps.dp_interlace);
  272. }
  273. static void
  274. nv50_outp_release(struct nouveau_encoder *nv_encoder)
  275. {
  276. struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
  277. struct {
  278. struct nv50_disp_mthd_v1 base;
  279. } args = {
  280. .base.version = 1,
  281. .base.method = NV50_DISP_MTHD_V1_RELEASE,
  282. .base.hasht = nv_encoder->dcb->hasht,
  283. .base.hashm = nv_encoder->dcb->hashm,
  284. };
  285. nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
  286. nv_encoder->or = -1;
  287. nv_encoder->link = 0;
  288. }
  289. static int
  290. nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
  291. {
  292. struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
  293. struct nv50_disp *disp = nv50_disp(drm->dev);
  294. struct {
  295. struct nv50_disp_mthd_v1 base;
  296. struct nv50_disp_acquire_v0 info;
  297. } args = {
  298. .base.version = 1,
  299. .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
  300. .base.hasht = nv_encoder->dcb->hasht,
  301. .base.hashm = nv_encoder->dcb->hashm,
  302. .info.hda = hda,
  303. };
  304. int ret;
  305. ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
  306. if (ret) {
  307. NV_ERROR(drm, "error acquiring output path: %d\n", ret);
  308. return ret;
  309. }
  310. nv_encoder->or = args.info.or;
  311. nv_encoder->link = args.info.link;
  312. return 0;
  313. }
  314. static int
  315. nv50_outp_atomic_check_view(struct drm_encoder *encoder,
  316. struct drm_crtc_state *crtc_state,
  317. struct drm_connector_state *conn_state,
  318. struct drm_display_mode *native_mode)
  319. {
  320. struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
  321. struct drm_display_mode *mode = &crtc_state->mode;
  322. struct drm_connector *connector = conn_state->connector;
  323. struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
  324. struct nouveau_drm *drm = nouveau_drm(encoder->dev);
  325. NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
  326. asyc->scaler.full = false;
  327. if (!native_mode)
  328. return 0;
  329. if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
  330. switch (connector->connector_type) {
  331. case DRM_MODE_CONNECTOR_LVDS:
  332. case DRM_MODE_CONNECTOR_eDP:
  333. /* Don't force scaler for EDID modes with
  334. * same size as the native one (e.g. different
  335. * refresh rate)
  336. */
  337. if (mode->hdisplay == native_mode->hdisplay &&
  338. mode->vdisplay == native_mode->vdisplay &&
  339. mode->type & DRM_MODE_TYPE_DRIVER)
  340. break;
  341. mode = native_mode;
  342. asyc->scaler.full = true;
  343. break;
  344. default:
  345. break;
  346. }
  347. } else {
  348. mode = native_mode;
  349. }
  350. if (!drm_mode_equal(adjusted_mode, mode)) {
  351. drm_mode_copy(adjusted_mode, mode);
  352. crtc_state->mode_changed = true;
  353. }
  354. return 0;
  355. }
  356. static void
  357. nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
  358. {
  359. struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
  360. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  361. struct drm_display_mode *mode = &asyh->state.adjusted_mode;
  362. unsigned int max_rate, mode_rate;
  363. switch (nv_encoder->dcb->type) {
  364. case DCB_OUTPUT_DP:
  365. max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
  366. /* we don't support more than 10 anyway */
  367. asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
  368. /* reduce the bpc until it works out */
  369. while (asyh->or.bpc > 6) {
  370. mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
  371. if (mode_rate <= max_rate)
  372. break;
  373. asyh->or.bpc -= 2;
  374. }
  375. break;
  376. default:
  377. break;
  378. }
  379. }
  380. static int
  381. nv50_outp_atomic_check(struct drm_encoder *encoder,
  382. struct drm_crtc_state *crtc_state,
  383. struct drm_connector_state *conn_state)
  384. {
  385. struct drm_connector *connector = conn_state->connector;
  386. struct nouveau_connector *nv_connector = nouveau_connector(connector);
  387. struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
  388. int ret;
  389. ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
  390. nv_connector->native_mode);
  391. if (ret)
  392. return ret;
  393. if (crtc_state->mode_changed || crtc_state->connectors_changed)
  394. asyh->or.bpc = connector->display_info.bpc;
  395. /* We might have to reduce the bpc */
  396. nv50_outp_atomic_fix_depth(encoder, crtc_state);
  397. return 0;
  398. }
  399. struct nouveau_connector *
  400. nv50_outp_get_new_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
  401. {
  402. struct drm_connector *connector;
  403. struct drm_connector_state *connector_state;
  404. struct drm_encoder *encoder = to_drm_encoder(outp);
  405. int i;
  406. for_each_new_connector_in_state(state, connector, connector_state, i) {
  407. if (connector_state->best_encoder == encoder)
  408. return nouveau_connector(connector);
  409. }
  410. return NULL;
  411. }
  412. struct nouveau_connector *
  413. nv50_outp_get_old_connector(struct drm_atomic_state *state, struct nouveau_encoder *outp)
  414. {
  415. struct drm_connector *connector;
  416. struct drm_connector_state *connector_state;
  417. struct drm_encoder *encoder = to_drm_encoder(outp);
  418. int i;
  419. for_each_old_connector_in_state(state, connector, connector_state, i) {
  420. if (connector_state->best_encoder == encoder)
  421. return nouveau_connector(connector);
  422. }
  423. return NULL;
  424. }
  425. static struct nouveau_crtc *
  426. nv50_outp_get_new_crtc(const struct drm_atomic_state *state, const struct nouveau_encoder *outp)
  427. {
  428. struct drm_crtc *crtc;
  429. struct drm_crtc_state *crtc_state;
  430. const u32 mask = drm_encoder_mask(&outp->base.base);
  431. int i;
  432. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  433. if (crtc_state->encoder_mask & mask)
  434. return nouveau_crtc(crtc);
  435. }
  436. return NULL;
  437. }
  438. /******************************************************************************
  439. * DAC
  440. *****************************************************************************/
  441. static void
  442. nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  443. {
  444. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  445. struct nv50_core *core = nv50_disp(encoder->dev)->core;
  446. const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
  447. core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
  448. nv_encoder->crtc = NULL;
  449. nv50_outp_release(nv_encoder);
  450. }
  451. static void
  452. nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  453. {
  454. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  455. struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
  456. struct nv50_head_atom *asyh =
  457. nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
  458. struct nv50_core *core = nv50_disp(encoder->dev)->core;
  459. u32 ctrl = 0;
  460. switch (nv_crtc->index) {
  461. case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
  462. case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
  463. case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
  464. case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
  465. default:
  466. WARN_ON(1);
  467. break;
  468. }
  469. ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
  470. nv50_outp_acquire(nv_encoder, false);
  471. core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
  472. asyh->or.depth = 0;
  473. nv_encoder->crtc = &nv_crtc->base;
  474. }
  475. static enum drm_connector_status
  476. nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
  477. {
  478. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  479. u32 loadval;
  480. int ret;
  481. loadval = nouveau_drm(encoder->dev)->vbios.dactestval;
  482. if (loadval == 0)
  483. loadval = 340;
  484. ret = nvif_outp_load_detect(&nv_encoder->outp, loadval);
  485. if (ret <= 0)
  486. return connector_status_disconnected;
  487. return connector_status_connected;
  488. }
  489. static const struct drm_encoder_helper_funcs
  490. nv50_dac_help = {
  491. .atomic_check = nv50_outp_atomic_check,
  492. .atomic_enable = nv50_dac_atomic_enable,
  493. .atomic_disable = nv50_dac_atomic_disable,
  494. .detect = nv50_dac_detect
  495. };
  496. static void
  497. nv50_dac_destroy(struct drm_encoder *encoder)
  498. {
  499. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  500. nvif_outp_dtor(&nv_encoder->outp);
  501. drm_encoder_cleanup(encoder);
  502. kfree(encoder);
  503. }
  504. static const struct drm_encoder_funcs
  505. nv50_dac_func = {
  506. .destroy = nv50_dac_destroy,
  507. };
  508. static int
  509. nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
  510. {
  511. struct nouveau_drm *drm = nouveau_drm(connector->dev);
  512. struct nv50_disp *disp = nv50_disp(connector->dev);
  513. struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
  514. struct nvkm_i2c_bus *bus;
  515. struct nouveau_encoder *nv_encoder;
  516. struct drm_encoder *encoder;
  517. int type = DRM_MODE_ENCODER_DAC;
  518. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  519. if (!nv_encoder)
  520. return -ENOMEM;
  521. nv_encoder->dcb = dcbe;
  522. bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
  523. if (bus)
  524. nv_encoder->i2c = &bus->i2c;
  525. encoder = to_drm_encoder(nv_encoder);
  526. encoder->possible_crtcs = dcbe->heads;
  527. encoder->possible_clones = 0;
  528. drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
  529. "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
  530. drm_encoder_helper_add(encoder, &nv50_dac_help);
  531. drm_connector_attach_encoder(connector, encoder);
  532. return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
  533. }
  534. /*
  535. * audio component binding for ELD notification
  536. */
  537. static void
  538. nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
  539. int dev_id)
  540. {
  541. if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
  542. acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
  543. port, dev_id);
  544. }
  545. static int
  546. nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
  547. bool *enabled, unsigned char *buf, int max_bytes)
  548. {
  549. struct drm_device *drm_dev = dev_get_drvdata(kdev);
  550. struct nouveau_drm *drm = nouveau_drm(drm_dev);
  551. struct drm_encoder *encoder;
  552. struct nouveau_encoder *nv_encoder;
  553. struct nouveau_crtc *nv_crtc;
  554. int ret = 0;
  555. *enabled = false;
  556. mutex_lock(&drm->audio.lock);
  557. drm_for_each_encoder(encoder, drm->dev) {
  558. struct nouveau_connector *nv_connector = NULL;
  559. if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
  560. continue; /* TODO */
  561. nv_encoder = nouveau_encoder(encoder);
  562. nv_connector = nouveau_connector(nv_encoder->audio.connector);
  563. nv_crtc = nouveau_crtc(nv_encoder->crtc);
  564. if (!nv_crtc || nv_encoder->or != port || nv_crtc->index != dev_id)
  565. continue;
  566. *enabled = nv_encoder->audio.enabled;
  567. if (*enabled) {
  568. ret = drm_eld_size(nv_connector->base.eld);
  569. memcpy(buf, nv_connector->base.eld,
  570. min(max_bytes, ret));
  571. }
  572. break;
  573. }
  574. mutex_unlock(&drm->audio.lock);
  575. return ret;
  576. }
  577. static const struct drm_audio_component_ops nv50_audio_component_ops = {
  578. .get_eld = nv50_audio_component_get_eld,
  579. };
  580. static int
  581. nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
  582. void *data)
  583. {
  584. struct drm_device *drm_dev = dev_get_drvdata(kdev);
  585. struct nouveau_drm *drm = nouveau_drm(drm_dev);
  586. struct drm_audio_component *acomp = data;
  587. if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
  588. return -ENOMEM;
  589. drm_modeset_lock_all(drm_dev);
  590. acomp->ops = &nv50_audio_component_ops;
  591. acomp->dev = kdev;
  592. drm->audio.component = acomp;
  593. drm_modeset_unlock_all(drm_dev);
  594. return 0;
  595. }
  596. static void
  597. nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
  598. void *data)
  599. {
  600. struct drm_device *drm_dev = dev_get_drvdata(kdev);
  601. struct nouveau_drm *drm = nouveau_drm(drm_dev);
  602. struct drm_audio_component *acomp = data;
  603. drm_modeset_lock_all(drm_dev);
  604. drm->audio.component = NULL;
  605. acomp->ops = NULL;
  606. acomp->dev = NULL;
  607. drm_modeset_unlock_all(drm_dev);
  608. }
  609. static const struct component_ops nv50_audio_component_bind_ops = {
  610. .bind = nv50_audio_component_bind,
  611. .unbind = nv50_audio_component_unbind,
  612. };
  613. static void
  614. nv50_audio_component_init(struct nouveau_drm *drm)
  615. {
  616. if (component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
  617. return;
  618. drm->audio.component_registered = true;
  619. mutex_init(&drm->audio.lock);
  620. }
  621. static void
  622. nv50_audio_component_fini(struct nouveau_drm *drm)
  623. {
  624. if (!drm->audio.component_registered)
  625. return;
  626. component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
  627. drm->audio.component_registered = false;
  628. mutex_destroy(&drm->audio.lock);
  629. }
  630. /******************************************************************************
  631. * Audio
  632. *****************************************************************************/
  633. static void
  634. nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
  635. {
  636. struct nouveau_drm *drm = nouveau_drm(encoder->dev);
  637. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  638. struct nv50_disp *disp = nv50_disp(encoder->dev);
  639. struct {
  640. struct nv50_disp_mthd_v1 base;
  641. struct nv50_disp_sor_hda_eld_v0 eld;
  642. } args = {
  643. .base.version = 1,
  644. .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
  645. .base.hasht = nv_encoder->dcb->hasht,
  646. .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
  647. (0x0100 << nv_crtc->index),
  648. };
  649. mutex_lock(&drm->audio.lock);
  650. if (nv_encoder->audio.enabled) {
  651. nv_encoder->audio.enabled = false;
  652. nv_encoder->audio.connector = NULL;
  653. nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
  654. }
  655. mutex_unlock(&drm->audio.lock);
  656. nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
  657. nv_crtc->index);
  658. }
  659. static void
  660. nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
  661. struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
  662. struct drm_display_mode *mode)
  663. {
  664. struct nouveau_drm *drm = nouveau_drm(encoder->dev);
  665. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  666. struct nv50_disp *disp = nv50_disp(encoder->dev);
  667. struct __packed {
  668. struct {
  669. struct nv50_disp_mthd_v1 mthd;
  670. struct nv50_disp_sor_hda_eld_v0 eld;
  671. } base;
  672. u8 data[sizeof(nv_connector->base.eld)];
  673. } args = {
  674. .base.mthd.version = 1,
  675. .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
  676. .base.mthd.hasht = nv_encoder->dcb->hasht,
  677. .base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
  678. (0x0100 << nv_crtc->index),
  679. };
  680. if (!drm_detect_monitor_audio(nv_connector->edid))
  681. return;
  682. mutex_lock(&drm->audio.lock);
  683. memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
  684. nvif_mthd(&disp->disp->object, 0, &args,
  685. sizeof(args.base) + drm_eld_size(args.data));
  686. nv_encoder->audio.enabled = true;
  687. nv_encoder->audio.connector = &nv_connector->base;
  688. mutex_unlock(&drm->audio.lock);
  689. nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
  690. nv_crtc->index);
  691. }
  692. /******************************************************************************
  693. * HDMI
  694. *****************************************************************************/
  695. static void
  696. nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
  697. {
  698. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  699. struct nv50_disp *disp = nv50_disp(encoder->dev);
  700. struct {
  701. struct nv50_disp_mthd_v1 base;
  702. struct nv50_disp_sor_hdmi_pwr_v0 pwr;
  703. } args = {
  704. .base.version = 1,
  705. .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
  706. .base.hasht = nv_encoder->dcb->hasht,
  707. .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
  708. (0x0100 << nv_crtc->index),
  709. };
  710. nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
  711. }
  712. static void
  713. nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
  714. struct nouveau_connector *nv_connector, struct drm_atomic_state *state,
  715. struct drm_display_mode *mode)
  716. {
  717. struct nouveau_drm *drm = nouveau_drm(encoder->dev);
  718. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  719. struct nv50_disp *disp = nv50_disp(encoder->dev);
  720. struct {
  721. struct nv50_disp_mthd_v1 base;
  722. struct nv50_disp_sor_hdmi_pwr_v0 pwr;
  723. u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
  724. } args = {
  725. .base.version = 1,
  726. .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
  727. .base.hasht = nv_encoder->dcb->hasht,
  728. .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
  729. (0x0100 << nv_crtc->index),
  730. .pwr.state = 1,
  731. .pwr.rekey = 56, /* binary driver, and tegra, constant */
  732. };
  733. struct drm_hdmi_info *hdmi;
  734. u32 max_ac_packet;
  735. union hdmi_infoframe avi_frame;
  736. union hdmi_infoframe vendor_frame;
  737. bool high_tmds_clock_ratio = false, scrambling = false;
  738. u8 config;
  739. int ret;
  740. int size;
  741. if (!drm_detect_hdmi_monitor(nv_connector->edid))
  742. return;
  743. hdmi = &nv_connector->base.display_info.hdmi;
  744. ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
  745. &nv_connector->base, mode);
  746. if (!ret) {
  747. drm_hdmi_avi_infoframe_quant_range(&avi_frame.avi,
  748. &nv_connector->base, mode,
  749. HDMI_QUANTIZATION_RANGE_FULL);
  750. /* We have an AVI InfoFrame, populate it to the display */
  751. args.pwr.avi_infoframe_length
  752. = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
  753. }
  754. ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
  755. &nv_connector->base, mode);
  756. if (!ret) {
  757. /* We have a Vendor InfoFrame, populate it to the display */
  758. args.pwr.vendor_infoframe_length
  759. = hdmi_infoframe_pack(&vendor_frame,
  760. args.infoframes
  761. + args.pwr.avi_infoframe_length,
  762. 17);
  763. }
  764. max_ac_packet = mode->htotal - mode->hdisplay;
  765. max_ac_packet -= args.pwr.rekey;
  766. max_ac_packet -= 18; /* constant from tegra */
  767. args.pwr.max_ac_packet = max_ac_packet / 32;
  768. if (hdmi->scdc.scrambling.supported) {
  769. high_tmds_clock_ratio = mode->clock > 340000;
  770. scrambling = high_tmds_clock_ratio ||
  771. hdmi->scdc.scrambling.low_rates;
  772. }
  773. args.pwr.scdc =
  774. NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE * scrambling |
  775. NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 * high_tmds_clock_ratio;
  776. size = sizeof(args.base)
  777. + sizeof(args.pwr)
  778. + args.pwr.avi_infoframe_length
  779. + args.pwr.vendor_infoframe_length;
  780. nvif_mthd(&disp->disp->object, 0, &args, size);
  781. nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
  782. /* If SCDC is supported by the downstream monitor, update
  783. * divider / scrambling settings to what we programmed above.
  784. */
  785. if (!hdmi->scdc.scrambling.supported)
  786. return;
  787. ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config);
  788. if (ret < 0) {
  789. NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
  790. return;
  791. }
  792. config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
  793. config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 * high_tmds_clock_ratio;
  794. config |= SCDC_SCRAMBLING_ENABLE * scrambling;
  795. ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config);
  796. if (ret < 0)
  797. NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
  798. config, ret);
  799. }
  800. /******************************************************************************
  801. * MST
  802. *****************************************************************************/
  803. #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
  804. #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
  805. #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
  806. struct nv50_mstc {
  807. struct nv50_mstm *mstm;
  808. struct drm_dp_mst_port *port;
  809. struct drm_connector connector;
  810. struct drm_display_mode *native;
  811. struct edid *edid;
  812. };
  813. struct nv50_msto {
  814. struct drm_encoder encoder;
  815. /* head is statically assigned on msto creation */
  816. struct nv50_head *head;
  817. struct nv50_mstc *mstc;
  818. bool disabled;
  819. bool enabled;
  820. };
  821. struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
  822. {
  823. struct nv50_msto *msto;
  824. if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
  825. return nouveau_encoder(encoder);
  826. msto = nv50_msto(encoder);
  827. if (!msto->mstc)
  828. return NULL;
  829. return msto->mstc->mstm->outp;
  830. }
  831. static void
  832. nv50_msto_cleanup(struct drm_atomic_state *state,
  833. struct drm_dp_mst_topology_state *mst_state,
  834. struct drm_dp_mst_topology_mgr *mgr,
  835. struct nv50_msto *msto)
  836. {
  837. struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
  838. struct drm_dp_mst_atomic_payload *payload =
  839. drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port);
  840. NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
  841. if (msto->disabled) {
  842. msto->mstc = NULL;
  843. msto->disabled = false;
  844. } else if (msto->enabled) {
  845. drm_dp_add_payload_part2(mgr, state, payload);
  846. msto->enabled = false;
  847. }
  848. }
  849. static void
  850. nv50_msto_prepare(struct drm_atomic_state *state,
  851. struct drm_dp_mst_topology_state *mst_state,
  852. struct drm_dp_mst_topology_mgr *mgr,
  853. struct nv50_msto *msto)
  854. {
  855. struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
  856. struct nv50_mstc *mstc = msto->mstc;
  857. struct nv50_mstm *mstm = mstc->mstm;
  858. struct drm_dp_mst_atomic_payload *payload;
  859. struct {
  860. struct nv50_disp_mthd_v1 base;
  861. struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
  862. } args = {
  863. .base.version = 1,
  864. .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
  865. .base.hasht = mstm->outp->dcb->hasht,
  866. .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
  867. (0x0100 << msto->head->base.index),
  868. };
  869. NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
  870. payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
  871. // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
  872. if (msto->disabled) {
  873. drm_dp_remove_payload(mgr, mst_state, payload, payload);
  874. } else {
  875. if (msto->enabled)
  876. drm_dp_add_payload_part1(mgr, mst_state, payload);
  877. args.vcpi.start_slot = payload->vc_start_slot;
  878. args.vcpi.num_slots = payload->time_slots;
  879. args.vcpi.pbn = payload->pbn;
  880. args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div;
  881. }
  882. NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
  883. msto->encoder.name, msto->head->base.base.name,
  884. args.vcpi.start_slot, args.vcpi.num_slots,
  885. args.vcpi.pbn, args.vcpi.aligned_pbn);
  886. nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
  887. }
  888. static int
  889. nv50_msto_atomic_check(struct drm_encoder *encoder,
  890. struct drm_crtc_state *crtc_state,
  891. struct drm_connector_state *conn_state)
  892. {
  893. struct drm_atomic_state *state = crtc_state->state;
  894. struct drm_connector *connector = conn_state->connector;
  895. struct drm_dp_mst_topology_state *mst_state;
  896. struct nv50_mstc *mstc = nv50_mstc(connector);
  897. struct nv50_mstm *mstm = mstc->mstm;
  898. struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
  899. int slots;
  900. int ret;
  901. ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
  902. mstc->native);
  903. if (ret)
  904. return ret;
  905. if (!drm_atomic_crtc_needs_modeset(crtc_state))
  906. return 0;
  907. /*
  908. * When restoring duplicated states, we need to make sure that the bw
  909. * remains the same and avoid recalculating it, as the connector's bpc
  910. * may have changed after the state was duplicated
  911. */
  912. if (!state->duplicated) {
  913. const int clock = crtc_state->adjusted_mode.clock;
  914. asyh->or.bpc = connector->display_info.bpc;
  915. asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
  916. false);
  917. }
  918. mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr);
  919. if (IS_ERR(mst_state))
  920. return PTR_ERR(mst_state);
  921. if (!mst_state->pbn_div) {
  922. struct nouveau_encoder *outp = mstc->mstm->outp;
  923. mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
  924. outp->dp.link_bw, outp->dp.link_nr);
  925. }
  926. slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);
  927. if (slots < 0)
  928. return slots;
  929. asyh->dp.tu = slots;
  930. return 0;
  931. }
  932. static u8
  933. nv50_dp_bpc_to_depth(unsigned int bpc)
  934. {
  935. switch (bpc) {
  936. case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
  937. case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
  938. case 10:
  939. default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
  940. }
  941. }
  942. static void
  943. nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  944. {
  945. struct nv50_msto *msto = nv50_msto(encoder);
  946. struct nv50_head *head = msto->head;
  947. struct nv50_head_atom *asyh =
  948. nv50_head_atom(drm_atomic_get_new_crtc_state(state, &head->base.base));
  949. struct nv50_mstc *mstc = NULL;
  950. struct nv50_mstm *mstm = NULL;
  951. struct drm_connector *connector;
  952. struct drm_connector_list_iter conn_iter;
  953. u8 proto;
  954. drm_connector_list_iter_begin(encoder->dev, &conn_iter);
  955. drm_for_each_connector_iter(connector, &conn_iter) {
  956. if (connector->state->best_encoder == &msto->encoder) {
  957. mstc = nv50_mstc(connector);
  958. mstm = mstc->mstm;
  959. break;
  960. }
  961. }
  962. drm_connector_list_iter_end(&conn_iter);
  963. if (WARN_ON(!mstc))
  964. return;
  965. if (!mstm->links++)
  966. nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
  967. if (mstm->outp->link & 1)
  968. proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
  969. else
  970. proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
  971. mstm->outp->update(mstm->outp, head->base.index, asyh, proto,
  972. nv50_dp_bpc_to_depth(asyh->or.bpc));
  973. msto->mstc = mstc;
  974. msto->enabled = true;
  975. mstm->modified = true;
  976. }
  977. static void
  978. nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  979. {
  980. struct nv50_msto *msto = nv50_msto(encoder);
  981. struct nv50_mstc *mstc = msto->mstc;
  982. struct nv50_mstm *mstm = mstc->mstm;
  983. mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
  984. mstm->modified = true;
  985. if (!--mstm->links)
  986. mstm->disabled = true;
  987. msto->disabled = true;
  988. }
  989. static const struct drm_encoder_helper_funcs
  990. nv50_msto_help = {
  991. .atomic_disable = nv50_msto_atomic_disable,
  992. .atomic_enable = nv50_msto_atomic_enable,
  993. .atomic_check = nv50_msto_atomic_check,
  994. };
  995. static void
  996. nv50_msto_destroy(struct drm_encoder *encoder)
  997. {
  998. struct nv50_msto *msto = nv50_msto(encoder);
  999. drm_encoder_cleanup(&msto->encoder);
  1000. kfree(msto);
  1001. }
  1002. static const struct drm_encoder_funcs
  1003. nv50_msto = {
  1004. .destroy = nv50_msto_destroy,
  1005. };
  1006. static struct nv50_msto *
  1007. nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
  1008. {
  1009. struct nv50_msto *msto;
  1010. int ret;
  1011. msto = kzalloc(sizeof(*msto), GFP_KERNEL);
  1012. if (!msto)
  1013. return ERR_PTR(-ENOMEM);
  1014. ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
  1015. DRM_MODE_ENCODER_DPMST, "mst-%d", id);
  1016. if (ret) {
  1017. kfree(msto);
  1018. return ERR_PTR(ret);
  1019. }
  1020. drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
  1021. msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
  1022. msto->head = head;
  1023. return msto;
  1024. }
  1025. static struct drm_encoder *
  1026. nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
  1027. struct drm_atomic_state *state)
  1028. {
  1029. struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
  1030. connector);
  1031. struct nv50_mstc *mstc = nv50_mstc(connector);
  1032. struct drm_crtc *crtc = connector_state->crtc;
  1033. if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
  1034. return NULL;
  1035. return &nv50_head(crtc)->msto->encoder;
  1036. }
  1037. static enum drm_mode_status
  1038. nv50_mstc_mode_valid(struct drm_connector *connector,
  1039. struct drm_display_mode *mode)
  1040. {
  1041. struct nv50_mstc *mstc = nv50_mstc(connector);
  1042. struct nouveau_encoder *outp = mstc->mstm->outp;
  1043. /* TODO: calculate the PBN from the dotclock and validate against the
  1044. * MSTB's max possible PBN
  1045. */
  1046. return nv50_dp_mode_valid(connector, outp, mode, NULL);
  1047. }
  1048. static int
  1049. nv50_mstc_get_modes(struct drm_connector *connector)
  1050. {
  1051. struct nv50_mstc *mstc = nv50_mstc(connector);
  1052. int ret = 0;
  1053. mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
  1054. drm_connector_update_edid_property(&mstc->connector, mstc->edid);
  1055. if (mstc->edid)
  1056. ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
  1057. /*
  1058. * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
  1059. * to 8 to save bandwidth on the topology. In the future, we'll want
  1060. * to properly fix this by dynamically selecting the highest possible
  1061. * bpc that would fit in the topology
  1062. */
  1063. if (connector->display_info.bpc)
  1064. connector->display_info.bpc =
  1065. clamp(connector->display_info.bpc, 6U, 8U);
  1066. else
  1067. connector->display_info.bpc = 8;
  1068. if (mstc->native)
  1069. drm_mode_destroy(mstc->connector.dev, mstc->native);
  1070. mstc->native = nouveau_conn_native_mode(&mstc->connector);
  1071. return ret;
  1072. }
  1073. static int
  1074. nv50_mstc_atomic_check(struct drm_connector *connector,
  1075. struct drm_atomic_state *state)
  1076. {
  1077. struct nv50_mstc *mstc = nv50_mstc(connector);
  1078. struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
  1079. return drm_dp_atomic_release_time_slots(state, mgr, mstc->port);
  1080. }
  1081. static int
  1082. nv50_mstc_detect(struct drm_connector *connector,
  1083. struct drm_modeset_acquire_ctx *ctx, bool force)
  1084. {
  1085. struct nv50_mstc *mstc = nv50_mstc(connector);
  1086. int ret;
  1087. if (drm_connector_is_unregistered(connector))
  1088. return connector_status_disconnected;
  1089. ret = pm_runtime_get_sync(connector->dev->dev);
  1090. if (ret < 0 && ret != -EACCES) {
  1091. pm_runtime_put_autosuspend(connector->dev->dev);
  1092. return connector_status_disconnected;
  1093. }
  1094. ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
  1095. mstc->port);
  1096. if (ret != connector_status_connected)
  1097. goto out;
  1098. out:
  1099. pm_runtime_mark_last_busy(connector->dev->dev);
  1100. pm_runtime_put_autosuspend(connector->dev->dev);
  1101. return ret;
  1102. }
  1103. static const struct drm_connector_helper_funcs
  1104. nv50_mstc_help = {
  1105. .get_modes = nv50_mstc_get_modes,
  1106. .mode_valid = nv50_mstc_mode_valid,
  1107. .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
  1108. .atomic_check = nv50_mstc_atomic_check,
  1109. .detect_ctx = nv50_mstc_detect,
  1110. };
  1111. static void
  1112. nv50_mstc_destroy(struct drm_connector *connector)
  1113. {
  1114. struct nv50_mstc *mstc = nv50_mstc(connector);
  1115. drm_connector_cleanup(&mstc->connector);
  1116. drm_dp_mst_put_port_malloc(mstc->port);
  1117. kfree(mstc);
  1118. }
  1119. static const struct drm_connector_funcs
  1120. nv50_mstc = {
  1121. .reset = nouveau_conn_reset,
  1122. .fill_modes = drm_helper_probe_single_connector_modes,
  1123. .destroy = nv50_mstc_destroy,
  1124. .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
  1125. .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
  1126. .atomic_set_property = nouveau_conn_atomic_set_property,
  1127. .atomic_get_property = nouveau_conn_atomic_get_property,
  1128. };
  1129. static int
  1130. nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
  1131. const char *path, struct nv50_mstc **pmstc)
  1132. {
  1133. struct drm_device *dev = mstm->outp->base.base.dev;
  1134. struct drm_crtc *crtc;
  1135. struct nv50_mstc *mstc;
  1136. int ret;
  1137. if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
  1138. return -ENOMEM;
  1139. mstc->mstm = mstm;
  1140. mstc->port = port;
  1141. ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
  1142. DRM_MODE_CONNECTOR_DisplayPort);
  1143. if (ret) {
  1144. kfree(*pmstc);
  1145. *pmstc = NULL;
  1146. return ret;
  1147. }
  1148. drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
  1149. mstc->connector.funcs->reset(&mstc->connector);
  1150. nouveau_conn_attach_properties(&mstc->connector);
  1151. drm_for_each_crtc(crtc, dev) {
  1152. if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
  1153. continue;
  1154. drm_connector_attach_encoder(&mstc->connector,
  1155. &nv50_head(crtc)->msto->encoder);
  1156. }
  1157. drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
  1158. drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
  1159. drm_connector_set_path_property(&mstc->connector, path);
  1160. drm_dp_mst_get_port_malloc(port);
  1161. return 0;
  1162. }
  1163. static void
  1164. nv50_mstm_cleanup(struct drm_atomic_state *state,
  1165. struct drm_dp_mst_topology_state *mst_state,
  1166. struct nv50_mstm *mstm)
  1167. {
  1168. struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
  1169. struct drm_encoder *encoder;
  1170. NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
  1171. drm_dp_check_act_status(&mstm->mgr);
  1172. drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
  1173. if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
  1174. struct nv50_msto *msto = nv50_msto(encoder);
  1175. struct nv50_mstc *mstc = msto->mstc;
  1176. if (mstc && mstc->mstm == mstm)
  1177. nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto);
  1178. }
  1179. }
  1180. mstm->modified = false;
  1181. }
  1182. static void
  1183. nv50_mstm_prepare(struct drm_atomic_state *state,
  1184. struct drm_dp_mst_topology_state *mst_state,
  1185. struct nv50_mstm *mstm)
  1186. {
  1187. struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
  1188. struct drm_encoder *encoder;
  1189. NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
  1190. /* Disable payloads first */
  1191. drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
  1192. if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
  1193. struct nv50_msto *msto = nv50_msto(encoder);
  1194. struct nv50_mstc *mstc = msto->mstc;
  1195. if (mstc && mstc->mstm == mstm && msto->disabled)
  1196. nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
  1197. }
  1198. }
  1199. /* Add payloads for new heads, while also updating the start slots of any unmodified (but
  1200. * active) heads that may have had their VC slots shifted left after the previous step
  1201. */
  1202. drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
  1203. if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
  1204. struct nv50_msto *msto = nv50_msto(encoder);
  1205. struct nv50_mstc *mstc = msto->mstc;
  1206. if (mstc && mstc->mstm == mstm && !msto->disabled)
  1207. nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
  1208. }
  1209. }
  1210. if (mstm->disabled) {
  1211. if (!mstm->links)
  1212. nv50_outp_release(mstm->outp);
  1213. mstm->disabled = false;
  1214. }
  1215. }
  1216. static struct drm_connector *
  1217. nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
  1218. struct drm_dp_mst_port *port, const char *path)
  1219. {
  1220. struct nv50_mstm *mstm = nv50_mstm(mgr);
  1221. struct nv50_mstc *mstc;
  1222. int ret;
  1223. ret = nv50_mstc_new(mstm, port, path, &mstc);
  1224. if (ret)
  1225. return NULL;
  1226. return &mstc->connector;
  1227. }
  1228. static const struct drm_dp_mst_topology_cbs
  1229. nv50_mstm = {
  1230. .add_connector = nv50_mstm_add_connector,
  1231. };
  1232. bool
  1233. nv50_mstm_service(struct nouveau_drm *drm,
  1234. struct nouveau_connector *nv_connector,
  1235. struct nv50_mstm *mstm)
  1236. {
  1237. struct drm_dp_aux *aux = &nv_connector->aux;
  1238. bool handled = true, ret = true;
  1239. int rc;
  1240. u8 esi[8] = {};
  1241. while (handled) {
  1242. u8 ack[8] = {};
  1243. rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
  1244. if (rc != 8) {
  1245. ret = false;
  1246. break;
  1247. }
  1248. drm_dp_mst_hpd_irq_handle_event(&mstm->mgr, esi, ack, &handled);
  1249. if (!handled)
  1250. break;
  1251. rc = drm_dp_dpcd_writeb(aux, DP_SINK_COUNT_ESI + 1, ack[1]);
  1252. if (rc != 1) {
  1253. ret = false;
  1254. break;
  1255. }
  1256. drm_dp_mst_hpd_irq_send_new_request(&mstm->mgr);
  1257. }
  1258. if (!ret)
  1259. NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
  1260. nv_connector->base.name, rc);
  1261. return ret;
  1262. }
  1263. void
  1264. nv50_mstm_remove(struct nv50_mstm *mstm)
  1265. {
  1266. mstm->is_mst = false;
  1267. drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
  1268. }
  1269. static int
  1270. nv50_mstm_enable(struct nv50_mstm *mstm, int state)
  1271. {
  1272. struct nouveau_encoder *outp = mstm->outp;
  1273. struct {
  1274. struct nv50_disp_mthd_v1 base;
  1275. struct nv50_disp_sor_dp_mst_link_v0 mst;
  1276. } args = {
  1277. .base.version = 1,
  1278. .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
  1279. .base.hasht = outp->dcb->hasht,
  1280. .base.hashm = outp->dcb->hashm,
  1281. .mst.state = state,
  1282. };
  1283. struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
  1284. struct nvif_object *disp = &drm->display->disp.object;
  1285. return nvif_mthd(disp, 0, &args, sizeof(args));
  1286. }
  1287. int
  1288. nv50_mstm_detect(struct nouveau_encoder *outp)
  1289. {
  1290. struct nv50_mstm *mstm = outp->dp.mstm;
  1291. struct drm_dp_aux *aux;
  1292. int ret;
  1293. if (!mstm || !mstm->can_mst)
  1294. return 0;
  1295. aux = mstm->mgr.aux;
  1296. /* Clear any leftover MST state we didn't set ourselves by first
  1297. * disabling MST if it was already enabled
  1298. */
  1299. ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
  1300. if (ret < 0)
  1301. return ret;
  1302. /* And start enabling */
  1303. ret = nv50_mstm_enable(mstm, true);
  1304. if (ret)
  1305. return ret;
  1306. ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
  1307. if (ret) {
  1308. nv50_mstm_enable(mstm, false);
  1309. return ret;
  1310. }
  1311. mstm->is_mst = true;
  1312. return 1;
  1313. }
  1314. static void
  1315. nv50_mstm_fini(struct nouveau_encoder *outp)
  1316. {
  1317. struct nv50_mstm *mstm = outp->dp.mstm;
  1318. if (!mstm)
  1319. return;
  1320. /* Don't change the MST state of this connector until we've finished
  1321. * resuming, since we can't safely grab hpd_irq_lock in our resume
  1322. * path to protect mstm->is_mst without potentially deadlocking
  1323. */
  1324. mutex_lock(&outp->dp.hpd_irq_lock);
  1325. mstm->suspended = true;
  1326. mutex_unlock(&outp->dp.hpd_irq_lock);
  1327. if (mstm->is_mst)
  1328. drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
  1329. }
  1330. static void
  1331. nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
  1332. {
  1333. struct nv50_mstm *mstm = outp->dp.mstm;
  1334. int ret = 0;
  1335. if (!mstm)
  1336. return;
  1337. if (mstm->is_mst) {
  1338. ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
  1339. if (ret == -1)
  1340. nv50_mstm_remove(mstm);
  1341. }
  1342. mutex_lock(&outp->dp.hpd_irq_lock);
  1343. mstm->suspended = false;
  1344. mutex_unlock(&outp->dp.hpd_irq_lock);
  1345. if (ret == -1)
  1346. drm_kms_helper_hotplug_event(mstm->mgr.dev);
  1347. }
  1348. static void
  1349. nv50_mstm_del(struct nv50_mstm **pmstm)
  1350. {
  1351. struct nv50_mstm *mstm = *pmstm;
  1352. if (mstm) {
  1353. drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
  1354. kfree(*pmstm);
  1355. *pmstm = NULL;
  1356. }
  1357. }
  1358. static int
  1359. nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
  1360. int conn_base_id, struct nv50_mstm **pmstm)
  1361. {
  1362. const int max_payloads = hweight8(outp->dcb->heads);
  1363. struct drm_device *dev = outp->base.base.dev;
  1364. struct nv50_mstm *mstm;
  1365. int ret;
  1366. if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
  1367. return -ENOMEM;
  1368. mstm->outp = outp;
  1369. mstm->mgr.cbs = &nv50_mstm;
  1370. ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
  1371. max_payloads, conn_base_id);
  1372. if (ret)
  1373. return ret;
  1374. return 0;
  1375. }
  1376. /******************************************************************************
  1377. * SOR
  1378. *****************************************************************************/
  1379. static void
  1380. nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
  1381. struct nv50_head_atom *asyh, u8 proto, u8 depth)
  1382. {
  1383. struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
  1384. struct nv50_core *core = disp->core;
  1385. if (!asyh) {
  1386. nv_encoder->ctrl &= ~BIT(head);
  1387. if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
  1388. nv_encoder->ctrl = 0;
  1389. } else {
  1390. nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
  1391. nv_encoder->ctrl |= BIT(head);
  1392. asyh->or.depth = depth;
  1393. }
  1394. core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
  1395. }
  1396. /* TODO: Should we extend this to PWM-only backlights?
  1397. * As well, should we add a DRM helper for waiting for the backlight to acknowledge
  1398. * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
  1399. * fixed time delay from the vbios…
  1400. */
  1401. static void
  1402. nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  1403. {
  1404. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1405. struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
  1406. struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
  1407. #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
  1408. struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
  1409. struct nouveau_backlight *backlight = nv_connector->backlight;
  1410. #endif
  1411. struct drm_dp_aux *aux = &nv_connector->aux;
  1412. int ret;
  1413. u8 pwr;
  1414. #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
  1415. if (backlight && backlight->uses_dpcd) {
  1416. ret = drm_edp_backlight_disable(aux, &backlight->edp_info);
  1417. if (ret < 0)
  1418. NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
  1419. nv_connector->base.base.id, nv_connector->base.name, ret);
  1420. }
  1421. #endif
  1422. if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
  1423. ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
  1424. if (ret == 0) {
  1425. pwr &= ~DP_SET_POWER_MASK;
  1426. pwr |= DP_SET_POWER_D3;
  1427. drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
  1428. }
  1429. }
  1430. nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
  1431. nv50_audio_disable(encoder, nv_crtc);
  1432. nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
  1433. nv50_outp_release(nv_encoder);
  1434. nv_encoder->crtc = NULL;
  1435. }
  1436. static void
  1437. nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  1438. {
  1439. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1440. struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
  1441. struct nv50_head_atom *asyh =
  1442. nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
  1443. struct drm_display_mode *mode = &asyh->state.adjusted_mode;
  1444. struct {
  1445. struct nv50_disp_mthd_v1 base;
  1446. struct nv50_disp_sor_lvds_script_v0 lvds;
  1447. } lvds = {
  1448. .base.version = 1,
  1449. .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
  1450. .base.hasht = nv_encoder->dcb->hasht,
  1451. .base.hashm = nv_encoder->dcb->hashm,
  1452. };
  1453. struct nv50_disp *disp = nv50_disp(encoder->dev);
  1454. struct drm_device *dev = encoder->dev;
  1455. struct nouveau_drm *drm = nouveau_drm(dev);
  1456. struct nouveau_connector *nv_connector;
  1457. #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
  1458. struct nouveau_backlight *backlight;
  1459. #endif
  1460. struct nvbios *bios = &drm->vbios;
  1461. bool hda = false;
  1462. u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
  1463. u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
  1464. nv_connector = nv50_outp_get_new_connector(state, nv_encoder);
  1465. nv_encoder->crtc = &nv_crtc->base;
  1466. if ((disp->disp->object.oclass == GT214_DISP ||
  1467. disp->disp->object.oclass >= GF110_DISP) &&
  1468. drm_detect_monitor_audio(nv_connector->edid))
  1469. hda = true;
  1470. nv50_outp_acquire(nv_encoder, hda);
  1471. switch (nv_encoder->dcb->type) {
  1472. case DCB_OUTPUT_TMDS:
  1473. if (nv_encoder->link & 1) {
  1474. proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
  1475. /* Only enable dual-link if:
  1476. * - Need to (i.e. rate > 165MHz)
  1477. * - DCB says we can
  1478. * - Not an HDMI monitor, since there's no dual-link
  1479. * on HDMI.
  1480. */
  1481. if (mode->clock >= 165000 &&
  1482. nv_encoder->dcb->duallink_possible &&
  1483. !drm_detect_hdmi_monitor(nv_connector->edid))
  1484. proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
  1485. } else {
  1486. proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
  1487. }
  1488. nv50_hdmi_enable(&nv_encoder->base.base, nv_crtc, nv_connector, state, mode);
  1489. break;
  1490. case DCB_OUTPUT_LVDS:
  1491. proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
  1492. if (bios->fp_no_ddc) {
  1493. if (bios->fp.dual_link)
  1494. lvds.lvds.script |= 0x0100;
  1495. if (bios->fp.if_is_24bit)
  1496. lvds.lvds.script |= 0x0200;
  1497. } else {
  1498. if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
  1499. if (((u8 *)nv_connector->edid)[121] == 2)
  1500. lvds.lvds.script |= 0x0100;
  1501. } else
  1502. if (mode->clock >= bios->fp.duallink_transition_clk) {
  1503. lvds.lvds.script |= 0x0100;
  1504. }
  1505. if (lvds.lvds.script & 0x0100) {
  1506. if (bios->fp.strapless_is_24bit & 2)
  1507. lvds.lvds.script |= 0x0200;
  1508. } else {
  1509. if (bios->fp.strapless_is_24bit & 1)
  1510. lvds.lvds.script |= 0x0200;
  1511. }
  1512. if (asyh->or.bpc == 8)
  1513. lvds.lvds.script |= 0x0200;
  1514. }
  1515. nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
  1516. break;
  1517. case DCB_OUTPUT_DP:
  1518. depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
  1519. if (nv_encoder->link & 1)
  1520. proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
  1521. else
  1522. proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
  1523. nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
  1524. #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
  1525. backlight = nv_connector->backlight;
  1526. if (backlight && backlight->uses_dpcd)
  1527. drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
  1528. (u16)backlight->dev->props.brightness);
  1529. #endif
  1530. break;
  1531. default:
  1532. BUG();
  1533. break;
  1534. }
  1535. nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
  1536. }
  1537. static const struct drm_encoder_helper_funcs
  1538. nv50_sor_help = {
  1539. .atomic_check = nv50_outp_atomic_check,
  1540. .atomic_enable = nv50_sor_atomic_enable,
  1541. .atomic_disable = nv50_sor_atomic_disable,
  1542. };
  1543. static void
  1544. nv50_sor_destroy(struct drm_encoder *encoder)
  1545. {
  1546. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1547. nvif_outp_dtor(&nv_encoder->outp);
  1548. nv50_mstm_del(&nv_encoder->dp.mstm);
  1549. drm_encoder_cleanup(encoder);
  1550. if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
  1551. mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
  1552. kfree(encoder);
  1553. }
  1554. static const struct drm_encoder_funcs
  1555. nv50_sor_func = {
  1556. .destroy = nv50_sor_destroy,
  1557. };
  1558. bool nv50_has_mst(struct nouveau_drm *drm)
  1559. {
  1560. struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
  1561. u32 data;
  1562. u8 ver, hdr, cnt, len;
  1563. data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
  1564. return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
  1565. }
  1566. static int
  1567. nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
  1568. {
  1569. struct nouveau_connector *nv_connector = nouveau_connector(connector);
  1570. struct nouveau_drm *drm = nouveau_drm(connector->dev);
  1571. struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
  1572. struct nouveau_encoder *nv_encoder;
  1573. struct drm_encoder *encoder;
  1574. struct nv50_disp *disp = nv50_disp(connector->dev);
  1575. int type, ret;
  1576. switch (dcbe->type) {
  1577. case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
  1578. case DCB_OUTPUT_TMDS:
  1579. case DCB_OUTPUT_DP:
  1580. default:
  1581. type = DRM_MODE_ENCODER_TMDS;
  1582. break;
  1583. }
  1584. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  1585. if (!nv_encoder)
  1586. return -ENOMEM;
  1587. nv_encoder->dcb = dcbe;
  1588. nv_encoder->update = nv50_sor_update;
  1589. encoder = to_drm_encoder(nv_encoder);
  1590. encoder->possible_crtcs = dcbe->heads;
  1591. encoder->possible_clones = 0;
  1592. drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
  1593. "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
  1594. drm_encoder_helper_add(encoder, &nv50_sor_help);
  1595. drm_connector_attach_encoder(connector, encoder);
  1596. disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
  1597. nv50_outp_dump_caps(drm, nv_encoder);
  1598. if (dcbe->type == DCB_OUTPUT_DP) {
  1599. struct nvkm_i2c_aux *aux =
  1600. nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
  1601. mutex_init(&nv_encoder->dp.hpd_irq_lock);
  1602. if (aux) {
  1603. if (disp->disp->object.oclass < GF110_DISP) {
  1604. /* HW has no support for address-only
  1605. * transactions, so we're required to
  1606. * use custom I2C-over-AUX code.
  1607. */
  1608. nv_encoder->i2c = &aux->i2c;
  1609. } else {
  1610. nv_encoder->i2c = &nv_connector->aux.ddc;
  1611. }
  1612. nv_encoder->aux = aux;
  1613. }
  1614. if (nv_connector->type != DCB_CONNECTOR_eDP &&
  1615. nv50_has_mst(drm)) {
  1616. ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
  1617. 16, nv_connector->base.base.id,
  1618. &nv_encoder->dp.mstm);
  1619. if (ret)
  1620. return ret;
  1621. }
  1622. } else {
  1623. struct nvkm_i2c_bus *bus =
  1624. nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
  1625. if (bus)
  1626. nv_encoder->i2c = &bus->i2c;
  1627. }
  1628. return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
  1629. }
  1630. /******************************************************************************
  1631. * PIOR
  1632. *****************************************************************************/
  1633. static int
  1634. nv50_pior_atomic_check(struct drm_encoder *encoder,
  1635. struct drm_crtc_state *crtc_state,
  1636. struct drm_connector_state *conn_state)
  1637. {
  1638. int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
  1639. if (ret)
  1640. return ret;
  1641. crtc_state->adjusted_mode.clock *= 2;
  1642. return 0;
  1643. }
  1644. static void
  1645. nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  1646. {
  1647. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1648. struct nv50_core *core = nv50_disp(encoder->dev)->core;
  1649. const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
  1650. core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
  1651. nv_encoder->crtc = NULL;
  1652. nv50_outp_release(nv_encoder);
  1653. }
  1654. static void
  1655. nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  1656. {
  1657. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1658. struct nouveau_crtc *nv_crtc = nv50_outp_get_new_crtc(state, nv_encoder);
  1659. struct nv50_head_atom *asyh =
  1660. nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
  1661. struct nv50_core *core = nv50_disp(encoder->dev)->core;
  1662. u32 ctrl = 0;
  1663. switch (nv_crtc->index) {
  1664. case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
  1665. case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
  1666. default:
  1667. WARN_ON(1);
  1668. break;
  1669. }
  1670. nv50_outp_acquire(nv_encoder, false);
  1671. switch (asyh->or.bpc) {
  1672. case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
  1673. case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
  1674. case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
  1675. default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
  1676. }
  1677. switch (nv_encoder->dcb->type) {
  1678. case DCB_OUTPUT_TMDS:
  1679. case DCB_OUTPUT_DP:
  1680. ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
  1681. break;
  1682. default:
  1683. BUG();
  1684. break;
  1685. }
  1686. core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh);
  1687. nv_encoder->crtc = &nv_crtc->base;
  1688. }
  1689. static const struct drm_encoder_helper_funcs
  1690. nv50_pior_help = {
  1691. .atomic_check = nv50_pior_atomic_check,
  1692. .atomic_enable = nv50_pior_atomic_enable,
  1693. .atomic_disable = nv50_pior_atomic_disable,
  1694. };
  1695. static void
  1696. nv50_pior_destroy(struct drm_encoder *encoder)
  1697. {
  1698. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  1699. nvif_outp_dtor(&nv_encoder->outp);
  1700. drm_encoder_cleanup(encoder);
  1701. kfree(encoder);
  1702. }
  1703. static const struct drm_encoder_funcs
  1704. nv50_pior_func = {
  1705. .destroy = nv50_pior_destroy,
  1706. };
  1707. static int
  1708. nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
  1709. {
  1710. struct drm_device *dev = connector->dev;
  1711. struct nouveau_drm *drm = nouveau_drm(dev);
  1712. struct nv50_disp *disp = nv50_disp(dev);
  1713. struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
  1714. struct nvkm_i2c_bus *bus = NULL;
  1715. struct nvkm_i2c_aux *aux = NULL;
  1716. struct i2c_adapter *ddc;
  1717. struct nouveau_encoder *nv_encoder;
  1718. struct drm_encoder *encoder;
  1719. int type;
  1720. switch (dcbe->type) {
  1721. case DCB_OUTPUT_TMDS:
  1722. bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
  1723. ddc = bus ? &bus->i2c : NULL;
  1724. type = DRM_MODE_ENCODER_TMDS;
  1725. break;
  1726. case DCB_OUTPUT_DP:
  1727. aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
  1728. ddc = aux ? &aux->i2c : NULL;
  1729. type = DRM_MODE_ENCODER_TMDS;
  1730. break;
  1731. default:
  1732. return -ENODEV;
  1733. }
  1734. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  1735. if (!nv_encoder)
  1736. return -ENOMEM;
  1737. nv_encoder->dcb = dcbe;
  1738. nv_encoder->i2c = ddc;
  1739. nv_encoder->aux = aux;
  1740. encoder = to_drm_encoder(nv_encoder);
  1741. encoder->possible_crtcs = dcbe->heads;
  1742. encoder->possible_clones = 0;
  1743. drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
  1744. "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
  1745. drm_encoder_helper_add(encoder, &nv50_pior_help);
  1746. drm_connector_attach_encoder(connector, encoder);
  1747. disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
  1748. nv50_outp_dump_caps(drm, nv_encoder);
  1749. return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
  1750. }
  1751. /******************************************************************************
  1752. * Atomic
  1753. *****************************************************************************/
  1754. static void
  1755. nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
  1756. {
  1757. struct drm_dp_mst_topology_mgr *mgr;
  1758. struct drm_dp_mst_topology_state *mst_state;
  1759. struct nouveau_drm *drm = nouveau_drm(state->dev);
  1760. struct nv50_disp *disp = nv50_disp(drm->dev);
  1761. struct nv50_core *core = disp->core;
  1762. struct nv50_mstm *mstm;
  1763. int i;
  1764. NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
  1765. for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
  1766. mstm = nv50_mstm(mgr);
  1767. if (mstm->modified)
  1768. nv50_mstm_prepare(state, mst_state, mstm);
  1769. }
  1770. core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
  1771. core->func->update(core, interlock, true);
  1772. if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
  1773. disp->core->chan.base.device))
  1774. NV_ERROR(drm, "core notifier timeout\n");
  1775. for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
  1776. mstm = nv50_mstm(mgr);
  1777. if (mstm->modified)
  1778. nv50_mstm_cleanup(state, mst_state, mstm);
  1779. }
  1780. }
  1781. static void
  1782. nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
  1783. {
  1784. struct drm_plane_state *new_plane_state;
  1785. struct drm_plane *plane;
  1786. int i;
  1787. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1788. struct nv50_wndw *wndw = nv50_wndw(plane);
  1789. if (interlock[wndw->interlock.type] & wndw->interlock.data) {
  1790. if (wndw->func->update)
  1791. wndw->func->update(wndw, interlock);
  1792. }
  1793. }
  1794. }
  1795. static void
  1796. nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
  1797. {
  1798. struct drm_device *dev = state->dev;
  1799. struct drm_crtc_state *new_crtc_state, *old_crtc_state;
  1800. struct drm_crtc *crtc;
  1801. struct drm_plane_state *new_plane_state;
  1802. struct drm_plane *plane;
  1803. struct nouveau_drm *drm = nouveau_drm(dev);
  1804. struct nv50_disp *disp = nv50_disp(dev);
  1805. struct nv50_atom *atom = nv50_atom(state);
  1806. struct nv50_core *core = disp->core;
  1807. struct nv50_outp_atom *outp, *outt;
  1808. u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
  1809. int i;
  1810. bool flushed = false;
  1811. NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
  1812. nv50_crc_atomic_stop_reporting(state);
  1813. drm_atomic_helper_wait_for_fences(dev, state, false);
  1814. drm_atomic_helper_wait_for_dependencies(state);
  1815. drm_dp_mst_atomic_wait_for_dependencies(state);
  1816. drm_atomic_helper_update_legacy_modeset_state(dev, state);
  1817. drm_atomic_helper_calc_timestamping_constants(state);
  1818. if (atom->lock_core)
  1819. mutex_lock(&disp->mutex);
  1820. /* Disable head(s). */
  1821. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1822. struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
  1823. struct nv50_head *head = nv50_head(crtc);
  1824. NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
  1825. asyh->clr.mask, asyh->set.mask);
  1826. if (old_crtc_state->active && !new_crtc_state->active) {
  1827. pm_runtime_put_noidle(dev->dev);
  1828. drm_crtc_vblank_off(crtc);
  1829. }
  1830. if (asyh->clr.mask) {
  1831. nv50_head_flush_clr(head, asyh, atom->flush_disable);
  1832. interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
  1833. }
  1834. }
  1835. /* Disable plane(s). */
  1836. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1837. struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
  1838. struct nv50_wndw *wndw = nv50_wndw(plane);
  1839. NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
  1840. asyw->clr.mask, asyw->set.mask);
  1841. if (!asyw->clr.mask)
  1842. continue;
  1843. nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
  1844. }
  1845. /* Disable output path(s). */
  1846. list_for_each_entry(outp, &atom->outp, head) {
  1847. const struct drm_encoder_helper_funcs *help;
  1848. struct drm_encoder *encoder;
  1849. encoder = outp->encoder;
  1850. help = encoder->helper_private;
  1851. NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
  1852. outp->clr.mask, outp->set.mask);
  1853. if (outp->clr.mask) {
  1854. help->atomic_disable(encoder, state);
  1855. interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
  1856. if (outp->flush_disable) {
  1857. nv50_disp_atomic_commit_wndw(state, interlock);
  1858. nv50_disp_atomic_commit_core(state, interlock);
  1859. memset(interlock, 0x00, sizeof(interlock));
  1860. flushed = true;
  1861. }
  1862. }
  1863. }
  1864. /* Flush disable. */
  1865. if (interlock[NV50_DISP_INTERLOCK_CORE]) {
  1866. if (atom->flush_disable) {
  1867. nv50_disp_atomic_commit_wndw(state, interlock);
  1868. nv50_disp_atomic_commit_core(state, interlock);
  1869. memset(interlock, 0x00, sizeof(interlock));
  1870. flushed = true;
  1871. }
  1872. }
  1873. if (flushed)
  1874. nv50_crc_atomic_release_notifier_contexts(state);
  1875. nv50_crc_atomic_init_notifier_contexts(state);
  1876. /* Update output path(s). */
  1877. list_for_each_entry_safe(outp, outt, &atom->outp, head) {
  1878. const struct drm_encoder_helper_funcs *help;
  1879. struct drm_encoder *encoder;
  1880. encoder = outp->encoder;
  1881. help = encoder->helper_private;
  1882. NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
  1883. outp->set.mask, outp->clr.mask);
  1884. if (outp->set.mask) {
  1885. help->atomic_enable(encoder, state);
  1886. interlock[NV50_DISP_INTERLOCK_CORE] = 1;
  1887. }
  1888. list_del(&outp->head);
  1889. kfree(outp);
  1890. }
  1891. /* Update head(s). */
  1892. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1893. struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
  1894. struct nv50_head *head = nv50_head(crtc);
  1895. NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
  1896. asyh->set.mask, asyh->clr.mask);
  1897. if (asyh->set.mask) {
  1898. nv50_head_flush_set(head, asyh);
  1899. interlock[NV50_DISP_INTERLOCK_CORE] = 1;
  1900. }
  1901. if (new_crtc_state->active) {
  1902. if (!old_crtc_state->active) {
  1903. drm_crtc_vblank_on(crtc);
  1904. pm_runtime_get_noresume(dev->dev);
  1905. }
  1906. if (new_crtc_state->event)
  1907. drm_crtc_vblank_get(crtc);
  1908. }
  1909. }
  1910. /* Update window->head assignment.
  1911. *
  1912. * This has to happen in an update that's not interlocked with
  1913. * any window channels to avoid hitting HW error checks.
  1914. *
  1915. *TODO: Proper handling of window ownership (Turing apparently
  1916. * supports non-fixed mappings).
  1917. */
  1918. if (core->assign_windows) {
  1919. core->func->wndw.owner(core);
  1920. nv50_disp_atomic_commit_core(state, interlock);
  1921. core->assign_windows = false;
  1922. interlock[NV50_DISP_INTERLOCK_CORE] = 0;
  1923. }
  1924. /* Finish updating head(s)...
  1925. *
  1926. * NVD is rather picky about both where window assignments can change,
  1927. * *and* about certain core and window channel states matching.
  1928. *
  1929. * The EFI GOP driver on newer GPUs configures window channels with a
  1930. * different output format to what we do, and the core channel update
  1931. * in the assign_windows case above would result in a state mismatch.
  1932. *
  1933. * Delay some of the head update until after that point to workaround
  1934. * the issue. This only affects the initial modeset.
  1935. *
  1936. * TODO: handle this better when adding flexible window mapping
  1937. */
  1938. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  1939. struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
  1940. struct nv50_head *head = nv50_head(crtc);
  1941. NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
  1942. asyh->set.mask, asyh->clr.mask);
  1943. if (asyh->set.mask) {
  1944. nv50_head_flush_set_wndw(head, asyh);
  1945. interlock[NV50_DISP_INTERLOCK_CORE] = 1;
  1946. }
  1947. }
  1948. /* Update plane(s). */
  1949. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1950. struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
  1951. struct nv50_wndw *wndw = nv50_wndw(plane);
  1952. NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
  1953. asyw->set.mask, asyw->clr.mask);
  1954. if ( !asyw->set.mask &&
  1955. (!asyw->clr.mask || atom->flush_disable))
  1956. continue;
  1957. nv50_wndw_flush_set(wndw, interlock, asyw);
  1958. }
  1959. /* Flush update. */
  1960. nv50_disp_atomic_commit_wndw(state, interlock);
  1961. if (interlock[NV50_DISP_INTERLOCK_CORE]) {
  1962. if (interlock[NV50_DISP_INTERLOCK_BASE] ||
  1963. interlock[NV50_DISP_INTERLOCK_OVLY] ||
  1964. interlock[NV50_DISP_INTERLOCK_WNDW] ||
  1965. !atom->state.legacy_cursor_update)
  1966. nv50_disp_atomic_commit_core(state, interlock);
  1967. else
  1968. disp->core->func->update(disp->core, interlock, false);
  1969. }
  1970. if (atom->lock_core)
  1971. mutex_unlock(&disp->mutex);
  1972. /* Wait for HW to signal completion. */
  1973. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  1974. struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
  1975. struct nv50_wndw *wndw = nv50_wndw(plane);
  1976. int ret = nv50_wndw_wait_armed(wndw, asyw);
  1977. if (ret)
  1978. NV_ERROR(drm, "%s: timeout\n", plane->name);
  1979. }
  1980. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  1981. if (new_crtc_state->event) {
  1982. unsigned long flags;
  1983. /* Get correct count/ts if racing with vblank irq */
  1984. if (new_crtc_state->active)
  1985. drm_crtc_accurate_vblank_count(crtc);
  1986. spin_lock_irqsave(&crtc->dev->event_lock, flags);
  1987. drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
  1988. spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
  1989. new_crtc_state->event = NULL;
  1990. if (new_crtc_state->active)
  1991. drm_crtc_vblank_put(crtc);
  1992. }
  1993. }
  1994. nv50_crc_atomic_start_reporting(state);
  1995. if (!flushed)
  1996. nv50_crc_atomic_release_notifier_contexts(state);
  1997. drm_atomic_helper_commit_hw_done(state);
  1998. drm_atomic_helper_cleanup_planes(dev, state);
  1999. drm_atomic_helper_commit_cleanup_done(state);
  2000. drm_atomic_state_put(state);
  2001. /* Drop the RPM ref we got from nv50_disp_atomic_commit() */
  2002. pm_runtime_mark_last_busy(dev->dev);
  2003. pm_runtime_put_autosuspend(dev->dev);
  2004. }
  2005. static void
  2006. nv50_disp_atomic_commit_work(struct work_struct *work)
  2007. {
  2008. struct drm_atomic_state *state =
  2009. container_of(work, typeof(*state), commit_work);
  2010. nv50_disp_atomic_commit_tail(state);
  2011. }
  2012. static int
  2013. nv50_disp_atomic_commit(struct drm_device *dev,
  2014. struct drm_atomic_state *state, bool nonblock)
  2015. {
  2016. struct drm_plane_state *new_plane_state;
  2017. struct drm_plane *plane;
  2018. int ret, i;
  2019. ret = pm_runtime_get_sync(dev->dev);
  2020. if (ret < 0 && ret != -EACCES) {
  2021. pm_runtime_put_autosuspend(dev->dev);
  2022. return ret;
  2023. }
  2024. ret = drm_atomic_helper_setup_commit(state, nonblock);
  2025. if (ret)
  2026. goto done;
  2027. INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
  2028. ret = drm_atomic_helper_prepare_planes(dev, state);
  2029. if (ret)
  2030. goto done;
  2031. if (!nonblock) {
  2032. ret = drm_atomic_helper_wait_for_fences(dev, state, true);
  2033. if (ret)
  2034. goto err_cleanup;
  2035. }
  2036. ret = drm_atomic_helper_swap_state(state, true);
  2037. if (ret)
  2038. goto err_cleanup;
  2039. for_each_new_plane_in_state(state, plane, new_plane_state, i) {
  2040. struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
  2041. struct nv50_wndw *wndw = nv50_wndw(plane);
  2042. if (asyw->set.image)
  2043. nv50_wndw_ntfy_enable(wndw, asyw);
  2044. }
  2045. drm_atomic_state_get(state);
  2046. /*
  2047. * Grab another RPM ref for the commit tail, which will release the
  2048. * ref when it's finished
  2049. */
  2050. pm_runtime_get_noresume(dev->dev);
  2051. if (nonblock)
  2052. queue_work(system_unbound_wq, &state->commit_work);
  2053. else
  2054. nv50_disp_atomic_commit_tail(state);
  2055. err_cleanup:
  2056. if (ret)
  2057. drm_atomic_helper_cleanup_planes(dev, state);
  2058. done:
  2059. pm_runtime_put_autosuspend(dev->dev);
  2060. return ret;
  2061. }
  2062. static struct nv50_outp_atom *
  2063. nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
  2064. {
  2065. struct nv50_outp_atom *outp;
  2066. list_for_each_entry(outp, &atom->outp, head) {
  2067. if (outp->encoder == encoder)
  2068. return outp;
  2069. }
  2070. outp = kzalloc(sizeof(*outp), GFP_KERNEL);
  2071. if (!outp)
  2072. return ERR_PTR(-ENOMEM);
  2073. list_add(&outp->head, &atom->outp);
  2074. outp->encoder = encoder;
  2075. return outp;
  2076. }
  2077. static int
  2078. nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
  2079. struct drm_connector_state *old_connector_state)
  2080. {
  2081. struct drm_encoder *encoder = old_connector_state->best_encoder;
  2082. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2083. struct drm_crtc *crtc;
  2084. struct nv50_outp_atom *outp;
  2085. if (!(crtc = old_connector_state->crtc))
  2086. return 0;
  2087. old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
  2088. new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
  2089. if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
  2090. outp = nv50_disp_outp_atomic_add(atom, encoder);
  2091. if (IS_ERR(outp))
  2092. return PTR_ERR(outp);
  2093. if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
  2094. outp->flush_disable = true;
  2095. atom->flush_disable = true;
  2096. }
  2097. outp->clr.ctrl = true;
  2098. atom->lock_core = true;
  2099. }
  2100. return 0;
  2101. }
  2102. static int
  2103. nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
  2104. struct drm_connector_state *connector_state)
  2105. {
  2106. struct drm_encoder *encoder = connector_state->best_encoder;
  2107. struct drm_crtc_state *new_crtc_state;
  2108. struct drm_crtc *crtc;
  2109. struct nv50_outp_atom *outp;
  2110. if (!(crtc = connector_state->crtc))
  2111. return 0;
  2112. new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
  2113. if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
  2114. outp = nv50_disp_outp_atomic_add(atom, encoder);
  2115. if (IS_ERR(outp))
  2116. return PTR_ERR(outp);
  2117. outp->set.ctrl = true;
  2118. atom->lock_core = true;
  2119. }
  2120. return 0;
  2121. }
  2122. static int
  2123. nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
  2124. {
  2125. struct nv50_atom *atom = nv50_atom(state);
  2126. struct nv50_core *core = nv50_disp(dev)->core;
  2127. struct drm_connector_state *old_connector_state, *new_connector_state;
  2128. struct drm_connector *connector;
  2129. struct drm_crtc_state *new_crtc_state;
  2130. struct drm_crtc *crtc;
  2131. struct nv50_head *head;
  2132. struct nv50_head_atom *asyh;
  2133. int ret, i;
  2134. if (core->assign_windows && core->func->head->static_wndw_map) {
  2135. drm_for_each_crtc(crtc, dev) {
  2136. new_crtc_state = drm_atomic_get_crtc_state(state,
  2137. crtc);
  2138. if (IS_ERR(new_crtc_state))
  2139. return PTR_ERR(new_crtc_state);
  2140. head = nv50_head(crtc);
  2141. asyh = nv50_head_atom(new_crtc_state);
  2142. core->func->head->static_wndw_map(head, asyh);
  2143. }
  2144. }
  2145. /* We need to handle colour management on a per-plane basis. */
  2146. for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
  2147. if (new_crtc_state->color_mgmt_changed) {
  2148. ret = drm_atomic_add_affected_planes(state, crtc);
  2149. if (ret)
  2150. return ret;
  2151. }
  2152. }
  2153. ret = drm_atomic_helper_check(dev, state);
  2154. if (ret)
  2155. return ret;
  2156. for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
  2157. ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
  2158. if (ret)
  2159. return ret;
  2160. ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
  2161. if (ret)
  2162. return ret;
  2163. }
  2164. ret = drm_dp_mst_atomic_check(state);
  2165. if (ret)
  2166. return ret;
  2167. nv50_crc_atomic_check_outp(atom);
  2168. return 0;
  2169. }
  2170. static void
  2171. nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
  2172. {
  2173. struct nv50_atom *atom = nv50_atom(state);
  2174. struct nv50_outp_atom *outp, *outt;
  2175. list_for_each_entry_safe(outp, outt, &atom->outp, head) {
  2176. list_del(&outp->head);
  2177. kfree(outp);
  2178. }
  2179. drm_atomic_state_default_clear(state);
  2180. }
  2181. static void
  2182. nv50_disp_atomic_state_free(struct drm_atomic_state *state)
  2183. {
  2184. struct nv50_atom *atom = nv50_atom(state);
  2185. drm_atomic_state_default_release(&atom->state);
  2186. kfree(atom);
  2187. }
  2188. static struct drm_atomic_state *
  2189. nv50_disp_atomic_state_alloc(struct drm_device *dev)
  2190. {
  2191. struct nv50_atom *atom;
  2192. if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
  2193. drm_atomic_state_init(dev, &atom->state) < 0) {
  2194. kfree(atom);
  2195. return NULL;
  2196. }
  2197. INIT_LIST_HEAD(&atom->outp);
  2198. return &atom->state;
  2199. }
  2200. static const struct drm_mode_config_funcs
  2201. nv50_disp_func = {
  2202. .fb_create = nouveau_user_framebuffer_create,
  2203. .output_poll_changed = nouveau_fbcon_output_poll_changed,
  2204. .atomic_check = nv50_disp_atomic_check,
  2205. .atomic_commit = nv50_disp_atomic_commit,
  2206. .atomic_state_alloc = nv50_disp_atomic_state_alloc,
  2207. .atomic_state_clear = nv50_disp_atomic_state_clear,
  2208. .atomic_state_free = nv50_disp_atomic_state_free,
  2209. };
  2210. static const struct drm_mode_config_helper_funcs
  2211. nv50_disp_helper_func = {
  2212. .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
  2213. };
  2214. /******************************************************************************
  2215. * Init
  2216. *****************************************************************************/
  2217. static void
  2218. nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
  2219. {
  2220. struct nouveau_drm *drm = nouveau_drm(dev);
  2221. struct drm_encoder *encoder;
  2222. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  2223. if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
  2224. nv50_mstm_fini(nouveau_encoder(encoder));
  2225. }
  2226. if (!runtime)
  2227. cancel_work_sync(&drm->hpd_work);
  2228. }
  2229. static int
  2230. nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
  2231. {
  2232. struct nv50_core *core = nv50_disp(dev)->core;
  2233. struct drm_encoder *encoder;
  2234. if (resume || runtime)
  2235. core->func->init(core);
  2236. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  2237. if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
  2238. struct nouveau_encoder *nv_encoder =
  2239. nouveau_encoder(encoder);
  2240. nv50_mstm_init(nv_encoder, runtime);
  2241. }
  2242. }
  2243. return 0;
  2244. }
  2245. static void
  2246. nv50_display_destroy(struct drm_device *dev)
  2247. {
  2248. struct nv50_disp *disp = nv50_disp(dev);
  2249. nv50_audio_component_fini(nouveau_drm(dev));
  2250. nvif_object_unmap(&disp->caps);
  2251. nvif_object_dtor(&disp->caps);
  2252. nv50_core_del(&disp->core);
  2253. nouveau_bo_unmap(disp->sync);
  2254. if (disp->sync)
  2255. nouveau_bo_unpin(disp->sync);
  2256. nouveau_bo_ref(NULL, &disp->sync);
  2257. nouveau_display(dev)->priv = NULL;
  2258. kfree(disp);
  2259. }
  2260. int
  2261. nv50_display_create(struct drm_device *dev)
  2262. {
  2263. struct nvif_device *device = &nouveau_drm(dev)->client.device;
  2264. struct nouveau_drm *drm = nouveau_drm(dev);
  2265. struct dcb_table *dcb = &drm->vbios.dcb;
  2266. struct drm_connector *connector, *tmp;
  2267. struct nv50_disp *disp;
  2268. struct dcb_output *dcbe;
  2269. int crtcs, ret, i;
  2270. bool has_mst = nv50_has_mst(drm);
  2271. disp = kzalloc(sizeof(*disp), GFP_KERNEL);
  2272. if (!disp)
  2273. return -ENOMEM;
  2274. mutex_init(&disp->mutex);
  2275. nouveau_display(dev)->priv = disp;
  2276. nouveau_display(dev)->dtor = nv50_display_destroy;
  2277. nouveau_display(dev)->init = nv50_display_init;
  2278. nouveau_display(dev)->fini = nv50_display_fini;
  2279. disp->disp = &nouveau_display(dev)->disp;
  2280. dev->mode_config.funcs = &nv50_disp_func;
  2281. dev->mode_config.helper_private = &nv50_disp_helper_func;
  2282. dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
  2283. dev->mode_config.normalize_zpos = true;
  2284. /* small shared memory area we use for notifiers and semaphores */
  2285. ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
  2286. NOUVEAU_GEM_DOMAIN_VRAM,
  2287. 0, 0x0000, NULL, NULL, &disp->sync);
  2288. if (!ret) {
  2289. ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
  2290. if (!ret) {
  2291. ret = nouveau_bo_map(disp->sync);
  2292. if (ret)
  2293. nouveau_bo_unpin(disp->sync);
  2294. }
  2295. if (ret)
  2296. nouveau_bo_ref(NULL, &disp->sync);
  2297. }
  2298. if (ret)
  2299. goto out;
  2300. /* allocate master evo channel */
  2301. ret = nv50_core_new(drm, &disp->core);
  2302. if (ret)
  2303. goto out;
  2304. disp->core->func->init(disp->core);
  2305. if (disp->core->func->caps_init) {
  2306. ret = disp->core->func->caps_init(drm, disp);
  2307. if (ret)
  2308. goto out;
  2309. }
  2310. /* Assign the correct format modifiers */
  2311. if (disp->disp->object.oclass >= TU102_DISP)
  2312. nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
  2313. else
  2314. if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
  2315. nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
  2316. else
  2317. nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
  2318. /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
  2319. * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
  2320. * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
  2321. * small page allocations in prepare_fb(). When this is implemented, we should also force
  2322. * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
  2323. * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
  2324. * large pages.
  2325. */
  2326. if (disp->disp->object.oclass >= GM107_DISP) {
  2327. dev->mode_config.cursor_width = 256;
  2328. dev->mode_config.cursor_height = 256;
  2329. } else if (disp->disp->object.oclass >= GK104_DISP) {
  2330. dev->mode_config.cursor_width = 128;
  2331. dev->mode_config.cursor_height = 128;
  2332. } else {
  2333. dev->mode_config.cursor_width = 64;
  2334. dev->mode_config.cursor_height = 64;
  2335. }
  2336. /* create crtc objects to represent the hw heads */
  2337. if (disp->disp->object.oclass >= GV100_DISP)
  2338. crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
  2339. else
  2340. if (disp->disp->object.oclass >= GF110_DISP)
  2341. crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
  2342. else
  2343. crtcs = 0x3;
  2344. for (i = 0; i < fls(crtcs); i++) {
  2345. struct nv50_head *head;
  2346. if (!(crtcs & (1 << i)))
  2347. continue;
  2348. head = nv50_head_create(dev, i);
  2349. if (IS_ERR(head)) {
  2350. ret = PTR_ERR(head);
  2351. goto out;
  2352. }
  2353. if (has_mst) {
  2354. head->msto = nv50_msto_new(dev, head, i);
  2355. if (IS_ERR(head->msto)) {
  2356. ret = PTR_ERR(head->msto);
  2357. head->msto = NULL;
  2358. goto out;
  2359. }
  2360. /*
  2361. * FIXME: This is a hack to workaround the following
  2362. * issues:
  2363. *
  2364. * https://gitlab.gnome.org/GNOME/mutter/issues/759
  2365. * https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
  2366. *
  2367. * Once these issues are closed, this should be
  2368. * removed
  2369. */
  2370. head->msto->encoder.possible_crtcs = crtcs;
  2371. }
  2372. }
  2373. /* create encoder/connector objects based on VBIOS DCB table */
  2374. for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
  2375. connector = nouveau_connector_create(dev, dcbe);
  2376. if (IS_ERR(connector))
  2377. continue;
  2378. if (dcbe->location == DCB_LOC_ON_CHIP) {
  2379. switch (dcbe->type) {
  2380. case DCB_OUTPUT_TMDS:
  2381. case DCB_OUTPUT_LVDS:
  2382. case DCB_OUTPUT_DP:
  2383. ret = nv50_sor_create(connector, dcbe);
  2384. break;
  2385. case DCB_OUTPUT_ANALOG:
  2386. ret = nv50_dac_create(connector, dcbe);
  2387. break;
  2388. default:
  2389. ret = -ENODEV;
  2390. break;
  2391. }
  2392. } else {
  2393. ret = nv50_pior_create(connector, dcbe);
  2394. }
  2395. if (ret) {
  2396. NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
  2397. dcbe->location, dcbe->type,
  2398. ffs(dcbe->or) - 1, ret);
  2399. ret = 0;
  2400. }
  2401. }
  2402. /* cull any connectors we created that don't have an encoder */
  2403. list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
  2404. if (connector->possible_encoders)
  2405. continue;
  2406. NV_WARN(drm, "%s has no encoders, removing\n",
  2407. connector->name);
  2408. connector->funcs->destroy(connector);
  2409. }
  2410. /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
  2411. dev->vblank_disable_immediate = true;
  2412. nv50_audio_component_init(drm);
  2413. out:
  2414. if (ret)
  2415. nv50_display_destroy(dev);
  2416. return ret;
  2417. }
  2418. /******************************************************************************
  2419. * Format modifiers
  2420. *****************************************************************************/
  2421. /****************************************************************
  2422. * Log2(block height) ----------------------------+ *
  2423. * Page Kind ----------------------------------+ | *
  2424. * Gob Height/Page Kind Generation ------+ | | *
  2425. * Sector layout -------+ | | | *
  2426. * Compression ------+ | | | | */
  2427. const u64 disp50xx_modifiers[] = { /* | | | | | */
  2428. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
  2429. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
  2430. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
  2431. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
  2432. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
  2433. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
  2434. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
  2435. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
  2436. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
  2437. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
  2438. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
  2439. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
  2440. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
  2441. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
  2442. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
  2443. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
  2444. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
  2445. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
  2446. DRM_FORMAT_MOD_LINEAR,
  2447. DRM_FORMAT_MOD_INVALID
  2448. };
  2449. /****************************************************************
  2450. * Log2(block height) ----------------------------+ *
  2451. * Page Kind ----------------------------------+ | *
  2452. * Gob Height/Page Kind Generation ------+ | | *
  2453. * Sector layout -------+ | | | *
  2454. * Compression ------+ | | | | */
  2455. const u64 disp90xx_modifiers[] = { /* | | | | | */
  2456. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
  2457. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
  2458. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
  2459. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
  2460. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
  2461. DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
  2462. DRM_FORMAT_MOD_LINEAR,
  2463. DRM_FORMAT_MOD_INVALID
  2464. };