sde_rm.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
  6. #include "sde_kms.h"
  7. #include "sde_hw_lm.h"
  8. #include "sde_hw_ctl.h"
  9. #include "sde_hw_cdm.h"
  10. #include "sde_hw_dspp.h"
  11. #include "sde_hw_ds.h"
  12. #include "sde_hw_pingpong.h"
  13. #include "sde_hw_intf.h"
  14. #include "sde_hw_wb.h"
  15. #include "sde_encoder.h"
  16. #include "sde_connector.h"
  17. #include "sde_hw_dsc.h"
  18. #include "sde_crtc.h"
  19. #define RESERVED_BY_OTHER(h, r) \
  20. (((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) ||\
  21. ((h)->rsvp_nxt && ((h)->rsvp_nxt->enc_id != (r)->enc_id)))
  22. #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
  23. #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
  24. #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
  25. #define RM_RQ_DS(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DS))
  26. #define RM_RQ_CWB(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_CWB))
  27. #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
  28. (t).num_comp_enc == (r).num_enc && \
  29. (t).num_intf == (r).num_intf)
  30. /**
  31. * toplogy information to be used when ctl path version does not
  32. * support driving more than one interface per ctl_path
  33. */
  34. static const struct sde_rm_topology_def g_top_table[] = {
  35. { SDE_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
  36. { SDE_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
  37. { SDE_RM_TOPOLOGY_SINGLEPIPE_DSC, 1, 1, 1, 1, false },
  38. { SDE_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
  39. { SDE_RM_TOPOLOGY_DUALPIPE_DSC, 2, 2, 2, 2, true },
  40. { SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
  41. { SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
  42. { SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE, 2, 2, 1, 1, false },
  43. { SDE_RM_TOPOLOGY_PPSPLIT, 1, 0, 2, 1, true },
  44. };
  45. /**
  46. * topology information to be used when the ctl path version
  47. * is SDE_CTL_CFG_VERSION_1_0_0
  48. */
  49. static const struct sde_rm_topology_def g_ctl_ver_1_top_table[] = {
  50. { SDE_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
  51. { SDE_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
  52. { SDE_RM_TOPOLOGY_SINGLEPIPE_DSC, 1, 1, 1, 1, false },
  53. { SDE_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 1, true },
  54. { SDE_RM_TOPOLOGY_DUALPIPE_DSC, 2, 2, 2, 1, true },
  55. { SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
  56. { SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC, 2, 1, 1, 1, false },
  57. { SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE, 2, 2, 1, 1, false },
  58. { SDE_RM_TOPOLOGY_PPSPLIT, 1, 0, 2, 1, true },
  59. };
  60. /**
  61. * struct sde_rm_requirements - Reservation requirements parameter bundle
  62. * @top_ctrl: topology control preference from kernel client
  63. * @top: selected topology for the display
  64. * @hw_res: Hardware resources required as reported by the encoders
  65. */
  66. struct sde_rm_requirements {
  67. uint64_t top_ctrl;
  68. const struct sde_rm_topology_def *topology;
  69. struct sde_encoder_hw_resources hw_res;
  70. };
  71. /**
  72. * struct sde_rm_rsvp - Use Case Reservation tagging structure
  73. * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
  74. * By using as a tag, rather than lists of pointers to HW blocks used
  75. * we can avoid some list management since we don't know how many blocks
  76. * of each type a given use case may require.
  77. * @list: List head for list of all reservations
  78. * @seq: Global RSVP sequence number for debugging, especially for
  79. * differentiating differenct allocations for same encoder.
  80. * @enc_id: Reservations are tracked by Encoder DRM object ID.
  81. * CRTCs may be connected to multiple Encoders.
  82. * An encoder or connector id identifies the display path.
  83. * @topology DRM<->HW topology use case
  84. */
  85. struct sde_rm_rsvp {
  86. struct list_head list;
  87. uint32_t seq;
  88. uint32_t enc_id;
  89. enum sde_rm_topology_name topology;
  90. };
  91. /**
  92. * struct sde_rm_hw_blk - hardware block tracking list member
  93. * @list: List head for list of all hardware blocks tracking items
  94. * @rsvp: Pointer to use case reservation if reserved by a client
  95. * @rsvp_nxt: Temporary pointer used during reservation to the incoming
  96. * request. Will be swapped into rsvp if proposal is accepted
  97. * @type: Type of hardware block this structure tracks
  98. * @id: Hardware ID number, within it's own space, ie. LM_X
  99. * @catalog: Pointer to the hardware catalog entry for this block
  100. * @hw: Pointer to the hardware register access object for this block
  101. */
  102. struct sde_rm_hw_blk {
  103. struct list_head list;
  104. struct sde_rm_rsvp *rsvp;
  105. struct sde_rm_rsvp *rsvp_nxt;
  106. enum sde_hw_blk_type type;
  107. uint32_t id;
  108. struct sde_hw_blk *hw;
  109. };
  110. /**
  111. * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
  112. */
  113. enum sde_rm_dbg_rsvp_stage {
  114. SDE_RM_STAGE_BEGIN,
  115. SDE_RM_STAGE_AFTER_CLEAR,
  116. SDE_RM_STAGE_AFTER_RSVPNEXT,
  117. SDE_RM_STAGE_FINAL
  118. };
  119. static void _sde_rm_print_rsvps(
  120. struct sde_rm *rm,
  121. enum sde_rm_dbg_rsvp_stage stage)
  122. {
  123. struct sde_rm_rsvp *rsvp;
  124. struct sde_rm_hw_blk *blk;
  125. enum sde_hw_blk_type type;
  126. SDE_DEBUG("%d\n", stage);
  127. list_for_each_entry(rsvp, &rm->rsvps, list) {
  128. SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
  129. rsvp->enc_id, rsvp->topology);
  130. SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
  131. }
  132. for (type = 0; type < SDE_HW_BLK_MAX; type++) {
  133. list_for_each_entry(blk, &rm->hw_blks[type], list) {
  134. if (!blk->rsvp && !blk->rsvp_nxt)
  135. continue;
  136. SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
  137. (blk->rsvp) ? blk->rsvp->seq : 0,
  138. (blk->rsvp) ? blk->rsvp->enc_id : 0,
  139. (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
  140. (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
  141. blk->type, blk->id);
  142. SDE_EVT32(stage,
  143. (blk->rsvp) ? blk->rsvp->seq : 0,
  144. (blk->rsvp) ? blk->rsvp->enc_id : 0,
  145. (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
  146. (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
  147. blk->type, blk->id);
  148. }
  149. }
  150. }
  151. struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
  152. {
  153. return rm->hw_mdp;
  154. }
  155. void sde_rm_init_hw_iter(
  156. struct sde_rm_hw_iter *iter,
  157. uint32_t enc_id,
  158. enum sde_hw_blk_type type)
  159. {
  160. memset(iter, 0, sizeof(*iter));
  161. iter->enc_id = enc_id;
  162. iter->type = type;
  163. }
  164. enum sde_rm_topology_name sde_rm_get_topology_name(
  165. struct msm_display_topology topology)
  166. {
  167. int i;
  168. for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
  169. if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
  170. return g_top_table[i].top_name;
  171. return SDE_RM_TOPOLOGY_NONE;
  172. }
  173. static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
  174. {
  175. struct list_head *blk_list;
  176. if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
  177. SDE_ERROR("invalid rm\n");
  178. return false;
  179. }
  180. i->hw = NULL;
  181. blk_list = &rm->hw_blks[i->type];
  182. if (i->blk && (&i->blk->list == blk_list)) {
  183. SDE_DEBUG("attempt resume iteration past last\n");
  184. return false;
  185. }
  186. i->blk = list_prepare_entry(i->blk, blk_list, list);
  187. list_for_each_entry_continue(i->blk, blk_list, list) {
  188. struct sde_rm_rsvp *rsvp = i->blk->rsvp;
  189. if (i->blk->type != i->type) {
  190. SDE_ERROR("found incorrect block type %d on %d list\n",
  191. i->blk->type, i->type);
  192. return false;
  193. }
  194. if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
  195. i->hw = i->blk->hw;
  196. SDE_DEBUG("found type %d id %d for enc %d\n",
  197. i->type, i->blk->id, i->enc_id);
  198. return true;
  199. }
  200. }
  201. SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
  202. return false;
  203. }
  204. static bool _sde_rm_request_hw_blk_locked(struct sde_rm *rm,
  205. struct sde_rm_hw_request *hw_blk_info)
  206. {
  207. struct list_head *blk_list;
  208. struct sde_rm_hw_blk *blk = NULL;
  209. if (!rm || !hw_blk_info || hw_blk_info->type >= SDE_HW_BLK_MAX) {
  210. SDE_ERROR("invalid rm\n");
  211. return false;
  212. }
  213. hw_blk_info->hw = NULL;
  214. blk_list = &rm->hw_blks[hw_blk_info->type];
  215. blk = list_prepare_entry(blk, blk_list, list);
  216. list_for_each_entry_continue(blk, blk_list, list) {
  217. if (blk->type != hw_blk_info->type) {
  218. SDE_ERROR("found incorrect block type %d on %d list\n",
  219. blk->type, hw_blk_info->type);
  220. return false;
  221. }
  222. if (blk->hw->id == hw_blk_info->id) {
  223. hw_blk_info->hw = blk->hw;
  224. SDE_DEBUG("found type %d id %d\n",
  225. blk->type, blk->id);
  226. return true;
  227. }
  228. }
  229. SDE_DEBUG("no match, type %d id %d\n", hw_blk_info->type,
  230. hw_blk_info->id);
  231. return false;
  232. }
  233. bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
  234. {
  235. bool ret;
  236. mutex_lock(&rm->rm_lock);
  237. ret = _sde_rm_get_hw_locked(rm, i);
  238. mutex_unlock(&rm->rm_lock);
  239. return ret;
  240. }
  241. bool sde_rm_request_hw_blk(struct sde_rm *rm, struct sde_rm_hw_request *hw)
  242. {
  243. bool ret;
  244. mutex_lock(&rm->rm_lock);
  245. ret = _sde_rm_request_hw_blk_locked(rm, hw);
  246. mutex_unlock(&rm->rm_lock);
  247. return ret;
  248. }
  249. static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
  250. {
  251. switch (type) {
  252. case SDE_HW_BLK_LM:
  253. sde_hw_lm_destroy(hw);
  254. break;
  255. case SDE_HW_BLK_DSPP:
  256. sde_hw_dspp_destroy(hw);
  257. break;
  258. case SDE_HW_BLK_DS:
  259. sde_hw_ds_destroy(hw);
  260. break;
  261. case SDE_HW_BLK_CTL:
  262. sde_hw_ctl_destroy(hw);
  263. break;
  264. case SDE_HW_BLK_CDM:
  265. sde_hw_cdm_destroy(hw);
  266. break;
  267. case SDE_HW_BLK_PINGPONG:
  268. sde_hw_pingpong_destroy(hw);
  269. break;
  270. case SDE_HW_BLK_INTF:
  271. sde_hw_intf_destroy(hw);
  272. break;
  273. case SDE_HW_BLK_WB:
  274. sde_hw_wb_destroy(hw);
  275. break;
  276. case SDE_HW_BLK_DSC:
  277. sde_hw_dsc_destroy(hw);
  278. break;
  279. case SDE_HW_BLK_SSPP:
  280. /* SSPPs are not managed by the resource manager */
  281. case SDE_HW_BLK_TOP:
  282. /* Top is a singleton, not managed in hw_blks list */
  283. case SDE_HW_BLK_MAX:
  284. default:
  285. SDE_ERROR("unsupported block type %d\n", type);
  286. break;
  287. }
  288. }
  289. int sde_rm_destroy(struct sde_rm *rm)
  290. {
  291. struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
  292. struct sde_rm_hw_blk *hw_cur, *hw_nxt;
  293. enum sde_hw_blk_type type;
  294. if (!rm) {
  295. SDE_ERROR("invalid rm\n");
  296. return -EINVAL;
  297. }
  298. list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
  299. list_del(&rsvp_cur->list);
  300. kfree(rsvp_cur);
  301. }
  302. for (type = 0; type < SDE_HW_BLK_MAX; type++) {
  303. list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
  304. list) {
  305. list_del(&hw_cur->list);
  306. _sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
  307. kfree(hw_cur);
  308. }
  309. }
  310. sde_hw_mdp_destroy(rm->hw_mdp);
  311. rm->hw_mdp = NULL;
  312. mutex_destroy(&rm->rm_lock);
  313. return 0;
  314. }
  315. static int _sde_rm_hw_blk_create(
  316. struct sde_rm *rm,
  317. struct sde_mdss_cfg *cat,
  318. void __iomem *mmio,
  319. enum sde_hw_blk_type type,
  320. uint32_t id,
  321. void *hw_catalog_info)
  322. {
  323. struct sde_rm_hw_blk *blk;
  324. struct sde_hw_mdp *hw_mdp;
  325. void *hw;
  326. hw_mdp = rm->hw_mdp;
  327. switch (type) {
  328. case SDE_HW_BLK_LM:
  329. hw = sde_hw_lm_init(id, mmio, cat);
  330. break;
  331. case SDE_HW_BLK_DSPP:
  332. hw = sde_hw_dspp_init(id, mmio, cat);
  333. break;
  334. case SDE_HW_BLK_DS:
  335. hw = sde_hw_ds_init(id, mmio, cat);
  336. break;
  337. case SDE_HW_BLK_CTL:
  338. hw = sde_hw_ctl_init(id, mmio, cat);
  339. break;
  340. case SDE_HW_BLK_CDM:
  341. hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
  342. break;
  343. case SDE_HW_BLK_PINGPONG:
  344. hw = sde_hw_pingpong_init(id, mmio, cat);
  345. break;
  346. case SDE_HW_BLK_INTF:
  347. hw = sde_hw_intf_init(id, mmio, cat);
  348. break;
  349. case SDE_HW_BLK_WB:
  350. hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
  351. break;
  352. case SDE_HW_BLK_DSC:
  353. hw = sde_hw_dsc_init(id, mmio, cat);
  354. break;
  355. case SDE_HW_BLK_SSPP:
  356. /* SSPPs are not managed by the resource manager */
  357. case SDE_HW_BLK_TOP:
  358. /* Top is a singleton, not managed in hw_blks list */
  359. case SDE_HW_BLK_MAX:
  360. default:
  361. SDE_ERROR("unsupported block type %d\n", type);
  362. return -EINVAL;
  363. }
  364. if (IS_ERR_OR_NULL(hw)) {
  365. SDE_ERROR("failed hw object creation: type %d, err %ld\n",
  366. type, PTR_ERR(hw));
  367. return -EFAULT;
  368. }
  369. blk = kzalloc(sizeof(*blk), GFP_KERNEL);
  370. if (!blk) {
  371. _sde_rm_hw_destroy(type, hw);
  372. return -ENOMEM;
  373. }
  374. blk->type = type;
  375. blk->id = id;
  376. blk->hw = hw;
  377. list_add_tail(&blk->list, &rm->hw_blks[type]);
  378. return 0;
  379. }
  380. static int _sde_rm_hw_blk_create_new(struct sde_rm *rm,
  381. struct sde_mdss_cfg *cat,
  382. void __iomem *mmio)
  383. {
  384. int i, rc = 0;
  385. for (i = 0; i < cat->dspp_count; i++) {
  386. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
  387. cat->dspp[i].id, &cat->dspp[i]);
  388. if (rc) {
  389. SDE_ERROR("failed: dspp hw not available\n");
  390. goto fail;
  391. }
  392. }
  393. if (cat->mdp[0].has_dest_scaler) {
  394. for (i = 0; i < cat->ds_count; i++) {
  395. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DS,
  396. cat->ds[i].id, &cat->ds[i]);
  397. if (rc) {
  398. SDE_ERROR("failed: ds hw not available\n");
  399. goto fail;
  400. }
  401. }
  402. }
  403. for (i = 0; i < cat->pingpong_count; i++) {
  404. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
  405. cat->pingpong[i].id, &cat->pingpong[i]);
  406. if (rc) {
  407. SDE_ERROR("failed: pp hw not available\n");
  408. goto fail;
  409. }
  410. }
  411. for (i = 0; i < cat->dsc_count; i++) {
  412. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSC,
  413. cat->dsc[i].id, &cat->dsc[i]);
  414. if (rc) {
  415. SDE_ERROR("failed: dsc hw not available\n");
  416. goto fail;
  417. }
  418. }
  419. for (i = 0; i < cat->intf_count; i++) {
  420. if (cat->intf[i].type == INTF_NONE) {
  421. SDE_DEBUG("skip intf %d with type none\n", i);
  422. continue;
  423. }
  424. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
  425. cat->intf[i].id, &cat->intf[i]);
  426. if (rc) {
  427. SDE_ERROR("failed: intf hw not available\n");
  428. goto fail;
  429. }
  430. }
  431. for (i = 0; i < cat->wb_count; i++) {
  432. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
  433. cat->wb[i].id, &cat->wb[i]);
  434. if (rc) {
  435. SDE_ERROR("failed: wb hw not available\n");
  436. goto fail;
  437. }
  438. }
  439. for (i = 0; i < cat->ctl_count; i++) {
  440. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
  441. cat->ctl[i].id, &cat->ctl[i]);
  442. if (rc) {
  443. SDE_ERROR("failed: ctl hw not available\n");
  444. goto fail;
  445. }
  446. }
  447. for (i = 0; i < cat->cdm_count; i++) {
  448. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
  449. cat->cdm[i].id, &cat->cdm[i]);
  450. if (rc) {
  451. SDE_ERROR("failed: cdm hw not available\n");
  452. goto fail;
  453. }
  454. }
  455. fail:
  456. return rc;
  457. }
  458. int sde_rm_init(struct sde_rm *rm,
  459. struct sde_mdss_cfg *cat,
  460. void __iomem *mmio,
  461. struct drm_device *dev)
  462. {
  463. int i, rc = 0;
  464. enum sde_hw_blk_type type;
  465. if (!rm || !cat || !mmio || !dev) {
  466. SDE_ERROR("invalid input params\n");
  467. return -EINVAL;
  468. }
  469. /* Clear, setup lists */
  470. memset(rm, 0, sizeof(*rm));
  471. mutex_init(&rm->rm_lock);
  472. INIT_LIST_HEAD(&rm->rsvps);
  473. for (type = 0; type < SDE_HW_BLK_MAX; type++)
  474. INIT_LIST_HEAD(&rm->hw_blks[type]);
  475. rm->dev = dev;
  476. if (IS_SDE_CTL_REV_100(cat->ctl_rev))
  477. rm->topology_tbl = g_ctl_ver_1_top_table;
  478. else
  479. rm->topology_tbl = g_top_table;
  480. /* Some of the sub-blocks require an mdptop to be created */
  481. rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
  482. if (IS_ERR_OR_NULL(rm->hw_mdp)) {
  483. rc = PTR_ERR(rm->hw_mdp);
  484. rm->hw_mdp = NULL;
  485. SDE_ERROR("failed: mdp hw not available\n");
  486. goto fail;
  487. }
  488. /* Interrogate HW catalog and create tracking items for hw blocks */
  489. for (i = 0; i < cat->mixer_count; i++) {
  490. struct sde_lm_cfg *lm = &cat->mixer[i];
  491. if (lm->pingpong == PINGPONG_MAX) {
  492. SDE_ERROR("mixer %d without pingpong\n", lm->id);
  493. goto fail;
  494. }
  495. rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
  496. cat->mixer[i].id, &cat->mixer[i]);
  497. if (rc) {
  498. SDE_ERROR("failed: lm hw not available\n");
  499. goto fail;
  500. }
  501. if (!rm->lm_max_width) {
  502. rm->lm_max_width = lm->sblk->maxwidth;
  503. } else if (rm->lm_max_width != lm->sblk->maxwidth) {
  504. /*
  505. * Don't expect to have hw where lm max widths differ.
  506. * If found, take the min.
  507. */
  508. SDE_ERROR("unsupported: lm maxwidth differs\n");
  509. if (rm->lm_max_width > lm->sblk->maxwidth)
  510. rm->lm_max_width = lm->sblk->maxwidth;
  511. }
  512. }
  513. rc = _sde_rm_hw_blk_create_new(rm, cat, mmio);
  514. if (!rc)
  515. return 0;
  516. fail:
  517. sde_rm_destroy(rm);
  518. return rc;
  519. }
  520. static bool _sde_rm_check_lm(
  521. struct sde_rm *rm,
  522. struct sde_rm_rsvp *rsvp,
  523. struct sde_rm_requirements *reqs,
  524. const struct sde_lm_cfg *lm_cfg,
  525. struct sde_rm_hw_blk *lm,
  526. struct sde_rm_hw_blk **dspp,
  527. struct sde_rm_hw_blk **ds,
  528. struct sde_rm_hw_blk **pp)
  529. {
  530. bool is_valid_dspp, is_valid_ds, ret;
  531. is_valid_dspp = (lm_cfg->dspp != DSPP_MAX) ? true : false;
  532. is_valid_ds = (lm_cfg->ds != DS_MAX) ? true : false;
  533. /**
  534. * RM_RQ_X: specification of which LMs to choose
  535. * is_valid_X: indicates whether LM is tied with block X
  536. * ret: true if given LM matches the user requirement,
  537. * false otherwise
  538. */
  539. if (RM_RQ_DSPP(reqs) && RM_RQ_DS(reqs))
  540. ret = (is_valid_dspp && is_valid_ds);
  541. else if (RM_RQ_DSPP(reqs))
  542. ret = is_valid_dspp;
  543. else if (RM_RQ_DS(reqs))
  544. ret = is_valid_ds;
  545. else
  546. ret = !(is_valid_dspp || is_valid_ds);
  547. if (!ret) {
  548. SDE_DEBUG(
  549. "fail:lm(%d)req_dspp(%d)dspp(%d)req_ds(%d)ds(%d)\n",
  550. lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
  551. lm_cfg->dspp, (bool)(RM_RQ_DS(reqs)),
  552. lm_cfg->ds);
  553. return ret;
  554. }
  555. return true;
  556. }
  557. static bool _sde_rm_reserve_dspp(
  558. struct sde_rm *rm,
  559. struct sde_rm_rsvp *rsvp,
  560. const struct sde_lm_cfg *lm_cfg,
  561. struct sde_rm_hw_blk *lm,
  562. struct sde_rm_hw_blk **dspp)
  563. {
  564. struct sde_rm_hw_iter iter;
  565. if (lm_cfg->dspp != DSPP_MAX) {
  566. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
  567. while (_sde_rm_get_hw_locked(rm, &iter)) {
  568. if (iter.blk->id == lm_cfg->dspp) {
  569. *dspp = iter.blk;
  570. break;
  571. }
  572. }
  573. if (!*dspp) {
  574. SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
  575. lm_cfg->dspp);
  576. return false;
  577. }
  578. if (RESERVED_BY_OTHER(*dspp, rsvp)) {
  579. SDE_DEBUG("lm %d dspp %d already reserved\n",
  580. lm->id, (*dspp)->id);
  581. return false;
  582. }
  583. }
  584. return true;
  585. }
  586. static bool _sde_rm_reserve_ds(
  587. struct sde_rm *rm,
  588. struct sde_rm_rsvp *rsvp,
  589. const struct sde_lm_cfg *lm_cfg,
  590. struct sde_rm_hw_blk *lm,
  591. struct sde_rm_hw_blk **ds)
  592. {
  593. struct sde_rm_hw_iter iter;
  594. if (lm_cfg->ds != DS_MAX) {
  595. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DS);
  596. while (_sde_rm_get_hw_locked(rm, &iter)) {
  597. if (iter.blk->id == lm_cfg->ds) {
  598. *ds = iter.blk;
  599. break;
  600. }
  601. }
  602. if (!*ds) {
  603. SDE_DEBUG("lm %d failed to retrieve ds %d\n", lm->id,
  604. lm_cfg->ds);
  605. return false;
  606. }
  607. if (RESERVED_BY_OTHER(*ds, rsvp)) {
  608. SDE_DEBUG("lm %d ds %d already reserved\n",
  609. lm->id, (*ds)->id);
  610. return false;
  611. }
  612. }
  613. return true;
  614. }
  615. static bool _sde_rm_reserve_pp(
  616. struct sde_rm *rm,
  617. struct sde_rm_rsvp *rsvp,
  618. struct sde_rm_requirements *reqs,
  619. const struct sde_lm_cfg *lm_cfg,
  620. const struct sde_pingpong_cfg *pp_cfg,
  621. struct sde_rm_hw_blk *lm,
  622. struct sde_rm_hw_blk **dspp,
  623. struct sde_rm_hw_blk **ds,
  624. struct sde_rm_hw_blk **pp)
  625. {
  626. struct sde_rm_hw_iter iter;
  627. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
  628. while (_sde_rm_get_hw_locked(rm, &iter)) {
  629. if (iter.blk->id == lm_cfg->pingpong) {
  630. *pp = iter.blk;
  631. break;
  632. }
  633. }
  634. if (!*pp) {
  635. SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
  636. return false;
  637. }
  638. if (RESERVED_BY_OTHER(*pp, rsvp)) {
  639. SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
  640. (*pp)->id);
  641. *dspp = NULL;
  642. *ds = NULL;
  643. return false;
  644. }
  645. pp_cfg = to_sde_hw_pingpong((*pp)->hw)->caps;
  646. if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
  647. !(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
  648. SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
  649. *dspp = NULL;
  650. *ds = NULL;
  651. return false;
  652. }
  653. return true;
  654. }
  655. /**
  656. * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
  657. * proposed use case requirements, incl. hardwired dependent blocks like
  658. * pingpong, and dspp.
  659. * @rm: sde resource manager handle
  660. * @rsvp: reservation currently being created
  661. * @reqs: proposed use case requirements
  662. * @lm: proposed layer mixer, function checks if lm, and all other hardwired
  663. * blocks connected to the lm (pp, dspp) are available and appropriate
  664. * @dspp: output parameter, dspp block attached to the layer mixer.
  665. * NULL if dspp was not available, or not matching requirements.
  666. * @pp: output parameter, pingpong block attached to the layer mixer.
  667. * NULL if dspp was not available, or not matching requirements.
  668. * @primary_lm: if non-null, this function check if lm is compatible primary_lm
  669. * as well as satisfying all other requirements
  670. * @Return: true if lm matches all requirements, false otherwise
  671. */
  672. static bool _sde_rm_check_lm_and_get_connected_blks(
  673. struct sde_rm *rm,
  674. struct sde_rm_rsvp *rsvp,
  675. struct sde_rm_requirements *reqs,
  676. struct sde_rm_hw_blk *lm,
  677. struct sde_rm_hw_blk **dspp,
  678. struct sde_rm_hw_blk **ds,
  679. struct sde_rm_hw_blk **pp,
  680. struct sde_rm_hw_blk *primary_lm)
  681. {
  682. const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
  683. const struct sde_pingpong_cfg *pp_cfg;
  684. bool ret;
  685. u32 display_pref, cwb_pref;
  686. *dspp = NULL;
  687. *ds = NULL;
  688. *pp = NULL;
  689. display_pref = lm_cfg->features & BIT(SDE_DISP_PRIMARY_PREF) ||
  690. lm_cfg->features & BIT(SDE_DISP_SECONDARY_PREF);
  691. cwb_pref = lm_cfg->features & BIT(SDE_DISP_CWB_PREF);
  692. SDE_DEBUG("check lm %d: dspp %d ds %d pp %d disp_pref: %d cwb_pref%d\n",
  693. lm_cfg->id, lm_cfg->dspp, lm_cfg->ds,
  694. lm_cfg->pingpong, display_pref, cwb_pref);
  695. /* Check if this layer mixer is a peer of the proposed primary LM */
  696. if (primary_lm) {
  697. const struct sde_lm_cfg *prim_lm_cfg =
  698. to_sde_hw_mixer(primary_lm->hw)->cap;
  699. if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
  700. SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
  701. prim_lm_cfg->id);
  702. return false;
  703. }
  704. }
  705. /* bypass rest of the checks if LM for primary display is found */
  706. if (!display_pref) {
  707. /* Check lm for valid requirements */
  708. ret = _sde_rm_check_lm(rm, rsvp, reqs, lm_cfg, lm,
  709. dspp, ds, pp);
  710. if (!ret)
  711. return ret;
  712. /**
  713. * If CWB is enabled and LM is not CWB supported
  714. * then return false.
  715. */
  716. if (RM_RQ_CWB(reqs) && !cwb_pref) {
  717. SDE_DEBUG("fail: cwb supported lm not allocated\n");
  718. return false;
  719. }
  720. } else if (!(reqs->hw_res.display_type && display_pref)) {
  721. SDE_DEBUG(
  722. "display preference is not met. display_type: %d display_pref: %d\n",
  723. (int)reqs->hw_res.display_type, (int)display_pref);
  724. return false;
  725. }
  726. /* Already reserved? */
  727. if (RESERVED_BY_OTHER(lm, rsvp)) {
  728. SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
  729. return false;
  730. }
  731. /* Reserve dspp */
  732. ret = _sde_rm_reserve_dspp(rm, rsvp, lm_cfg, lm, dspp);
  733. if (!ret)
  734. return ret;
  735. /* Reserve ds */
  736. ret = _sde_rm_reserve_ds(rm, rsvp, lm_cfg, lm, ds);
  737. if (!ret)
  738. return ret;
  739. /* Reserve pp */
  740. ret = _sde_rm_reserve_pp(rm, rsvp, reqs, lm_cfg, pp_cfg, lm,
  741. dspp, ds, pp);
  742. if (!ret)
  743. return ret;
  744. return true;
  745. }
  746. static int _sde_rm_reserve_lms(
  747. struct sde_rm *rm,
  748. struct sde_rm_rsvp *rsvp,
  749. struct sde_rm_requirements *reqs,
  750. u8 *_lm_ids)
  751. {
  752. struct sde_rm_hw_blk *lm[MAX_BLOCKS];
  753. struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
  754. struct sde_rm_hw_blk *ds[MAX_BLOCKS];
  755. struct sde_rm_hw_blk *pp[MAX_BLOCKS];
  756. struct sde_rm_hw_iter iter_i, iter_j;
  757. int lm_count = 0;
  758. int i, rc = 0;
  759. if (!reqs->topology->num_lm) {
  760. SDE_DEBUG("invalid number of lm: %d\n", reqs->topology->num_lm);
  761. return 0;
  762. }
  763. /* Find a primary mixer */
  764. sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
  765. while (lm_count != reqs->topology->num_lm &&
  766. _sde_rm_get_hw_locked(rm, &iter_i)) {
  767. memset(&lm, 0, sizeof(lm));
  768. memset(&dspp, 0, sizeof(dspp));
  769. memset(&ds, 0, sizeof(ds));
  770. memset(&pp, 0, sizeof(pp));
  771. lm_count = 0;
  772. lm[lm_count] = iter_i.blk;
  773. SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
  774. iter_i.blk->id,
  775. lm_count,
  776. _lm_ids ? _lm_ids[lm_count] : -1);
  777. if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
  778. continue;
  779. if (!_sde_rm_check_lm_and_get_connected_blks(
  780. rm, rsvp, reqs, lm[lm_count],
  781. &dspp[lm_count], &ds[lm_count],
  782. &pp[lm_count], NULL))
  783. continue;
  784. ++lm_count;
  785. /* Valid primary mixer found, find matching peers */
  786. sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
  787. while (lm_count != reqs->topology->num_lm &&
  788. _sde_rm_get_hw_locked(rm, &iter_j)) {
  789. if (iter_i.blk == iter_j.blk)
  790. continue;
  791. if (!_sde_rm_check_lm_and_get_connected_blks(
  792. rm, rsvp, reqs, iter_j.blk,
  793. &dspp[lm_count], &ds[lm_count],
  794. &pp[lm_count], iter_i.blk))
  795. continue;
  796. lm[lm_count] = iter_j.blk;
  797. SDE_DEBUG("blk id = %d, _lm_ids[%d] = %d\n",
  798. iter_i.blk->id,
  799. lm_count,
  800. _lm_ids ? _lm_ids[lm_count] : -1);
  801. if (_lm_ids && (lm[lm_count])->id != _lm_ids[lm_count])
  802. continue;
  803. ++lm_count;
  804. }
  805. }
  806. if (lm_count != reqs->topology->num_lm) {
  807. SDE_DEBUG("unable to find appropriate mixers\n");
  808. return -ENAVAIL;
  809. }
  810. for (i = 0; i < ARRAY_SIZE(lm); i++) {
  811. if (!lm[i])
  812. break;
  813. lm[i]->rsvp_nxt = rsvp;
  814. pp[i]->rsvp_nxt = rsvp;
  815. if (dspp[i])
  816. dspp[i]->rsvp_nxt = rsvp;
  817. if (ds[i])
  818. ds[i]->rsvp_nxt = rsvp;
  819. SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
  820. dspp[i] ? dspp[i]->id : 0,
  821. ds[i] ? ds[i]->id : 0);
  822. }
  823. if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
  824. /* reserve a free PINGPONG_SLAVE block */
  825. rc = -ENAVAIL;
  826. sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
  827. while (_sde_rm_get_hw_locked(rm, &iter_i)) {
  828. const struct sde_hw_pingpong *pp =
  829. to_sde_hw_pingpong(iter_i.blk->hw);
  830. const struct sde_pingpong_cfg *pp_cfg = pp->caps;
  831. if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
  832. continue;
  833. if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
  834. continue;
  835. iter_i.blk->rsvp_nxt = rsvp;
  836. rc = 0;
  837. break;
  838. }
  839. }
  840. return rc;
  841. }
  842. static int _sde_rm_reserve_ctls(
  843. struct sde_rm *rm,
  844. struct sde_rm_rsvp *rsvp,
  845. struct sde_rm_requirements *reqs,
  846. const struct sde_rm_topology_def *top,
  847. u8 *_ctl_ids)
  848. {
  849. struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
  850. struct sde_rm_hw_iter iter;
  851. int i = 0;
  852. if (!top->num_ctl) {
  853. SDE_DEBUG("invalid number of ctl: %d\n", top->num_ctl);
  854. return 0;
  855. }
  856. memset(&ctls, 0, sizeof(ctls));
  857. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
  858. while (_sde_rm_get_hw_locked(rm, &iter)) {
  859. const struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter.blk->hw);
  860. unsigned long features = ctl->caps->features;
  861. bool has_split_display, has_ppsplit, primary_pref;
  862. if (RESERVED_BY_OTHER(iter.blk, rsvp))
  863. continue;
  864. has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & features;
  865. has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & features;
  866. primary_pref = BIT(SDE_CTL_PRIMARY_PREF) & features;
  867. SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
  868. /*
  869. * bypass rest feature checks on finding CTL preferred
  870. * for primary displays.
  871. */
  872. if (!primary_pref && !_ctl_ids) {
  873. if (top->needs_split_display != has_split_display)
  874. continue;
  875. if (top->top_name == SDE_RM_TOPOLOGY_PPSPLIT &&
  876. !has_ppsplit)
  877. continue;
  878. } else if (!(reqs->hw_res.display_type ==
  879. SDE_CONNECTOR_PRIMARY && primary_pref) && !_ctl_ids) {
  880. SDE_DEBUG(
  881. "display pref not met. display_type: %d primary_pref: %d\n",
  882. reqs->hw_res.display_type, primary_pref);
  883. continue;
  884. }
  885. ctls[i] = iter.blk;
  886. SDE_DEBUG("blk id = %d, _ctl_ids[%d] = %d\n",
  887. iter.blk->id, i,
  888. _ctl_ids ? _ctl_ids[i] : -1);
  889. if (_ctl_ids && (ctls[i]->id != _ctl_ids[i]))
  890. continue;
  891. SDE_DEBUG("ctl %d match\n", iter.blk->id);
  892. if (++i == top->num_ctl)
  893. break;
  894. }
  895. if (i != top->num_ctl)
  896. return -ENAVAIL;
  897. for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
  898. ctls[i]->rsvp_nxt = rsvp;
  899. SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
  900. }
  901. return 0;
  902. }
  903. static int _sde_rm_reserve_dsc(
  904. struct sde_rm *rm,
  905. struct sde_rm_rsvp *rsvp,
  906. const struct sde_rm_topology_def *top,
  907. u8 *_dsc_ids)
  908. {
  909. struct sde_rm_hw_iter iter;
  910. int alloc_count = 0;
  911. int num_dsc_enc = top->num_lm;
  912. if (!top->num_comp_enc)
  913. return 0;
  914. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
  915. while (_sde_rm_get_hw_locked(rm, &iter)) {
  916. if (RESERVED_BY_OTHER(iter.blk, rsvp))
  917. continue;
  918. SDE_DEBUG("blk id = %d, _dsc_ids[%d] = %d\n",
  919. iter.blk->id,
  920. alloc_count,
  921. _dsc_ids ? _dsc_ids[alloc_count] : -1);
  922. if (_dsc_ids && (iter.blk->id != _dsc_ids[alloc_count]))
  923. continue;
  924. iter.blk->rsvp_nxt = rsvp;
  925. SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
  926. if (++alloc_count == num_dsc_enc)
  927. return 0;
  928. }
  929. SDE_ERROR("couldn't reserve %d dsc blocks for enc id %d\n",
  930. num_dsc_enc, rsvp->enc_id);
  931. return -ENAVAIL;
  932. }
  933. static int _sde_rm_reserve_cdm(
  934. struct sde_rm *rm,
  935. struct sde_rm_rsvp *rsvp,
  936. uint32_t id,
  937. enum sde_hw_blk_type type)
  938. {
  939. struct sde_rm_hw_iter iter;
  940. sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
  941. while (_sde_rm_get_hw_locked(rm, &iter)) {
  942. const struct sde_hw_cdm *cdm = to_sde_hw_cdm(iter.blk->hw);
  943. const struct sde_cdm_cfg *caps = cdm->caps;
  944. bool match = false;
  945. if (RESERVED_BY_OTHER(iter.blk, rsvp))
  946. continue;
  947. if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
  948. match = test_bit(id, &caps->intf_connect);
  949. else if (type == SDE_HW_BLK_WB && id != WB_MAX)
  950. match = test_bit(id, &caps->wb_connect);
  951. SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
  952. type, id, caps->intf_connect, caps->wb_connect,
  953. match);
  954. if (!match)
  955. continue;
  956. iter.blk->rsvp_nxt = rsvp;
  957. SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
  958. break;
  959. }
  960. if (!iter.hw) {
  961. SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
  962. return -ENAVAIL;
  963. }
  964. return 0;
  965. }
  966. static int _sde_rm_reserve_intf_or_wb(
  967. struct sde_rm *rm,
  968. struct sde_rm_rsvp *rsvp,
  969. uint32_t id,
  970. enum sde_hw_blk_type type,
  971. bool needs_cdm)
  972. {
  973. struct sde_rm_hw_iter iter;
  974. int ret = 0;
  975. /* Find the block entry in the rm, and note the reservation */
  976. sde_rm_init_hw_iter(&iter, 0, type);
  977. while (_sde_rm_get_hw_locked(rm, &iter)) {
  978. if (iter.blk->id != id)
  979. continue;
  980. if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
  981. SDE_ERROR("type %d id %d already reserved\n", type, id);
  982. return -ENAVAIL;
  983. }
  984. iter.blk->rsvp_nxt = rsvp;
  985. SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
  986. break;
  987. }
  988. /* Shouldn't happen since wbs / intfs are fixed at probe */
  989. if (!iter.hw) {
  990. SDE_ERROR("couldn't find type %d id %d\n", type, id);
  991. return -EINVAL;
  992. }
  993. /* Expected only one intf or wb will request cdm */
  994. if (needs_cdm)
  995. ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
  996. return ret;
  997. }
  998. static int _sde_rm_reserve_intf_related_hw(
  999. struct sde_rm *rm,
  1000. struct sde_rm_rsvp *rsvp,
  1001. struct sde_encoder_hw_resources *hw_res)
  1002. {
  1003. int i, ret = 0;
  1004. u32 id;
  1005. for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
  1006. if (hw_res->intfs[i] == INTF_MODE_NONE)
  1007. continue;
  1008. id = i + INTF_0;
  1009. ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
  1010. SDE_HW_BLK_INTF, hw_res->needs_cdm);
  1011. if (ret)
  1012. return ret;
  1013. }
  1014. for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
  1015. if (hw_res->wbs[i] == INTF_MODE_NONE)
  1016. continue;
  1017. id = i + WB_0;
  1018. ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
  1019. SDE_HW_BLK_WB, hw_res->needs_cdm);
  1020. if (ret)
  1021. return ret;
  1022. }
  1023. return ret;
  1024. }
  1025. static bool _sde_rm_is_display_in_cont_splash(struct sde_kms *sde_kms,
  1026. struct drm_encoder *enc)
  1027. {
  1028. int i;
  1029. struct sde_splash_display *splash_dpy;
  1030. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1031. splash_dpy = &sde_kms->splash_data.splash_display[i];
  1032. if (splash_dpy->encoder == enc)
  1033. return splash_dpy->cont_splash_enabled;
  1034. }
  1035. return false;
  1036. }
  1037. static int _sde_rm_make_lm_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
  1038. struct sde_rm_requirements *reqs,
  1039. struct sde_splash_display *splash_display)
  1040. {
  1041. int ret, i;
  1042. u8 *hw_ids = NULL;
  1043. /* Check if splash data provided lm_ids */
  1044. if (splash_display) {
  1045. hw_ids = splash_display->lm_ids;
  1046. for (i = 0; i < splash_display->lm_cnt; i++)
  1047. SDE_DEBUG("splash_display->lm_ids[%d] = %d\n",
  1048. i, splash_display->lm_ids[i]);
  1049. if (splash_display->lm_cnt != reqs->topology->num_lm)
  1050. SDE_DEBUG("Configured splash LMs != needed LM cnt\n");
  1051. }
  1052. /*
  1053. * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
  1054. * Do assignment preferring to give away low-resource mixers first:
  1055. * - Check mixers without DSPPs
  1056. * - Only then allow to grab from mixers with DSPP capability
  1057. */
  1058. ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
  1059. if (ret && !RM_RQ_DSPP(reqs)) {
  1060. reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
  1061. ret = _sde_rm_reserve_lms(rm, rsvp, reqs, hw_ids);
  1062. }
  1063. return ret;
  1064. }
  1065. static int _sde_rm_make_ctl_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
  1066. struct sde_rm_requirements *reqs,
  1067. struct sde_splash_display *splash_display)
  1068. {
  1069. int ret, i;
  1070. u8 *hw_ids = NULL;
  1071. struct sde_rm_topology_def topology;
  1072. /* Check if splash data provided ctl_ids */
  1073. if (splash_display) {
  1074. hw_ids = splash_display->ctl_ids;
  1075. for (i = 0; i < splash_display->ctl_cnt; i++)
  1076. SDE_DEBUG("splash_display->ctl_ids[%d] = %d\n",
  1077. i, splash_display->ctl_ids[i]);
  1078. }
  1079. /*
  1080. * Do assignment preferring to give away low-resource CTLs first:
  1081. * - Check mixers without Split Display
  1082. * - Only then allow to grab from CTLs with split display capability
  1083. */
  1084. ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, reqs->topology, hw_ids);
  1085. if (ret && !reqs->topology->needs_split_display &&
  1086. reqs->topology->num_ctl > SINGLE_CTL) {
  1087. memcpy(&topology, reqs->topology, sizeof(topology));
  1088. topology.needs_split_display = true;
  1089. ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, &topology, hw_ids);
  1090. }
  1091. return ret;
  1092. }
  1093. static int _sde_rm_make_dsc_rsvp(struct sde_rm *rm, struct sde_rm_rsvp *rsvp,
  1094. struct sde_rm_requirements *reqs,
  1095. struct sde_splash_display *splash_display)
  1096. {
  1097. int ret, i;
  1098. u8 *hw_ids = NULL;
  1099. /* Check if splash data provided dsc_ids */
  1100. if (splash_display) {
  1101. hw_ids = splash_display->dsc_ids;
  1102. for (i = 0; i < splash_display->dsc_cnt; i++)
  1103. SDE_DEBUG("splash_data.dsc_ids[%d] = %d\n",
  1104. i, splash_display->dsc_ids[i]);
  1105. }
  1106. ret = _sde_rm_reserve_dsc(rm, rsvp, reqs->topology, hw_ids);
  1107. return ret;
  1108. }
  1109. static int _sde_rm_make_next_rsvp(struct sde_rm *rm, struct drm_encoder *enc,
  1110. struct drm_crtc_state *crtc_state,
  1111. struct drm_connector_state *conn_state,
  1112. struct sde_rm_rsvp *rsvp,
  1113. struct sde_rm_requirements *reqs)
  1114. {
  1115. struct msm_drm_private *priv;
  1116. struct sde_kms *sde_kms;
  1117. struct sde_splash_display *splash_display = NULL;
  1118. struct sde_splash_data *splash_data;
  1119. int i, ret;
  1120. priv = enc->dev->dev_private;
  1121. sde_kms = to_sde_kms(priv->kms);
  1122. splash_data = &sde_kms->splash_data;
  1123. if (_sde_rm_is_display_in_cont_splash(sde_kms, enc)) {
  1124. for (i = 0; i < ARRAY_SIZE(splash_data->splash_display); i++) {
  1125. if (enc == splash_data->splash_display[i].encoder)
  1126. splash_display =
  1127. &splash_data->splash_display[i];
  1128. }
  1129. if (!splash_display) {
  1130. SDE_ERROR("rm is in cont_splash but data not found\n");
  1131. return -EINVAL;
  1132. }
  1133. }
  1134. /* Create reservation info, tag reserved blocks with it as we go */
  1135. rsvp->seq = ++rm->rsvp_next_seq;
  1136. rsvp->enc_id = enc->base.id;
  1137. rsvp->topology = reqs->topology->top_name;
  1138. list_add_tail(&rsvp->list, &rm->rsvps);
  1139. ret = _sde_rm_make_lm_rsvp(rm, rsvp, reqs, splash_display);
  1140. if (ret) {
  1141. SDE_ERROR("unable to find appropriate mixers\n");
  1142. return ret;
  1143. }
  1144. ret = _sde_rm_make_ctl_rsvp(rm, rsvp, reqs, splash_display);
  1145. if (ret) {
  1146. SDE_ERROR("unable to find appropriate CTL\n");
  1147. return ret;
  1148. }
  1149. /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
  1150. ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
  1151. if (ret)
  1152. return ret;
  1153. ret = _sde_rm_make_dsc_rsvp(rm, rsvp, reqs, splash_display);
  1154. if (ret)
  1155. return ret;
  1156. return ret;
  1157. }
  1158. /**
  1159. * _sde_rm_get_hw_blk_for_cont_splash - retrieve the LM blocks on given CTL
  1160. * and populate the connected HW blk ids in sde_splash_display
  1161. * @rm: Pointer to resource manager structure
  1162. * @ctl: Pointer to CTL hardware block
  1163. * @splash_display: Pointer to struct sde_splash_display
  1164. * return: number of active LM blocks for this CTL block
  1165. */
  1166. static int _sde_rm_get_hw_blk_for_cont_splash(struct sde_rm *rm,
  1167. struct sde_hw_ctl *ctl,
  1168. struct sde_splash_display *splash_display)
  1169. {
  1170. u32 lm_reg;
  1171. struct sde_rm_hw_iter iter_lm, iter_pp;
  1172. struct sde_hw_pingpong *pp;
  1173. if (!rm || !ctl || !splash_display) {
  1174. SDE_ERROR("invalid input parameters\n");
  1175. return 0;
  1176. }
  1177. sde_rm_init_hw_iter(&iter_lm, 0, SDE_HW_BLK_LM);
  1178. sde_rm_init_hw_iter(&iter_pp, 0, SDE_HW_BLK_PINGPONG);
  1179. while (_sde_rm_get_hw_locked(rm, &iter_lm)) {
  1180. _sde_rm_get_hw_locked(rm, &iter_pp);
  1181. if (splash_display->lm_cnt >= MAX_DATA_PATH_PER_DSIPLAY)
  1182. break;
  1183. lm_reg = ctl->ops.read_ctl_layers(ctl, iter_lm.blk->id);
  1184. if (!lm_reg)
  1185. continue;
  1186. splash_display->lm_ids[splash_display->lm_cnt++] =
  1187. iter_lm.blk->id;
  1188. SDE_DEBUG("lm_cnt=%d lm_reg[%d]=0x%x\n", splash_display->lm_cnt,
  1189. iter_lm.blk->id - LM_0, lm_reg);
  1190. if (ctl->ops.get_staged_sspp &&
  1191. ctl->ops.get_staged_sspp(ctl, iter_lm.blk->id,
  1192. &splash_display->pipes[
  1193. splash_display->pipe_cnt], 1)) {
  1194. splash_display->pipe_cnt++;
  1195. } else {
  1196. SDE_ERROR("no pipe detected on LM-%d\n",
  1197. iter_lm.blk->id - LM_0);
  1198. return 0;
  1199. }
  1200. pp = to_sde_hw_pingpong(iter_pp.blk->hw);
  1201. if (pp && pp->ops.get_dsc_status &&
  1202. pp->ops.get_dsc_status(pp)) {
  1203. splash_display->dsc_ids[splash_display->dsc_cnt++] =
  1204. iter_pp.blk->id;
  1205. SDE_DEBUG("lm/pp[%d] path, using dsc[%d]\n",
  1206. iter_lm.blk->id - LM_0,
  1207. iter_pp.blk->id - DSC_0);
  1208. }
  1209. }
  1210. return splash_display->lm_cnt;
  1211. }
  1212. int sde_rm_cont_splash_res_init(struct msm_drm_private *priv,
  1213. struct sde_rm *rm,
  1214. struct sde_splash_data *splash_data,
  1215. struct sde_mdss_cfg *cat)
  1216. {
  1217. struct sde_rm_hw_iter iter_c;
  1218. int index = 0, ctl_top_cnt;
  1219. struct sde_kms *sde_kms = NULL;
  1220. struct sde_hw_mdp *hw_mdp;
  1221. struct sde_splash_display *splash_display;
  1222. u8 intf_sel;
  1223. if (!priv || !rm || !cat || !splash_data) {
  1224. SDE_ERROR("invalid input parameters\n");
  1225. return -EINVAL;
  1226. }
  1227. SDE_DEBUG("mixer_count=%d, ctl_count=%d, dsc_count=%d\n",
  1228. cat->mixer_count,
  1229. cat->ctl_count,
  1230. cat->dsc_count);
  1231. ctl_top_cnt = cat->ctl_count;
  1232. if (!priv->kms) {
  1233. SDE_ERROR("invalid kms\n");
  1234. return -EINVAL;
  1235. }
  1236. sde_kms = to_sde_kms(priv->kms);
  1237. hw_mdp = sde_rm_get_mdp(rm);
  1238. sde_rm_init_hw_iter(&iter_c, 0, SDE_HW_BLK_CTL);
  1239. while (_sde_rm_get_hw_locked(rm, &iter_c)) {
  1240. struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter_c.blk->hw);
  1241. if (!ctl->ops.get_ctl_intf) {
  1242. SDE_ERROR("get_ctl_intf not initialized\n");
  1243. return -EINVAL;
  1244. }
  1245. intf_sel = ctl->ops.get_ctl_intf(ctl);
  1246. if (intf_sel) {
  1247. splash_display = &splash_data->splash_display[index];
  1248. SDE_DEBUG("finding resources for display=%d ctl=%d\n",
  1249. index, iter_c.blk->id - CTL_0);
  1250. _sde_rm_get_hw_blk_for_cont_splash(rm,
  1251. ctl, splash_display);
  1252. splash_display->cont_splash_enabled = true;
  1253. splash_display->ctl_ids[splash_display->ctl_cnt++] =
  1254. iter_c.blk->id;
  1255. if (hw_mdp && hw_mdp->ops.get_split_flush_status) {
  1256. splash_display->single_flush_en =
  1257. hw_mdp->ops.get_split_flush_status(
  1258. hw_mdp);
  1259. }
  1260. if (!splash_display->single_flush_en ||
  1261. (iter_c.blk->id != CTL_0))
  1262. index++;
  1263. if (index >= ARRAY_SIZE(splash_data->splash_display))
  1264. break;
  1265. }
  1266. }
  1267. if (index != splash_data->num_splash_displays) {
  1268. SDE_DEBUG("mismatch active displays vs actually enabled :%d/%d",
  1269. splash_data->num_splash_displays, index);
  1270. return -EINVAL;
  1271. }
  1272. return 0;
  1273. }
  1274. static int _sde_rm_populate_requirements(
  1275. struct sde_rm *rm,
  1276. struct drm_encoder *enc,
  1277. struct drm_crtc_state *crtc_state,
  1278. struct drm_connector_state *conn_state,
  1279. struct sde_rm_requirements *reqs)
  1280. {
  1281. const struct drm_display_mode *mode = &crtc_state->mode;
  1282. int i;
  1283. memset(reqs, 0, sizeof(*reqs));
  1284. reqs->top_ctrl = sde_connector_get_property(conn_state,
  1285. CONNECTOR_PROP_TOPOLOGY_CONTROL);
  1286. sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
  1287. for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++) {
  1288. if (RM_IS_TOPOLOGY_MATCH(rm->topology_tbl[i],
  1289. reqs->hw_res.topology)) {
  1290. reqs->topology = &rm->topology_tbl[i];
  1291. break;
  1292. }
  1293. }
  1294. if (!reqs->topology) {
  1295. SDE_ERROR("invalid topology for the display\n");
  1296. return -EINVAL;
  1297. }
  1298. /*
  1299. * select dspp HW block for all dsi displays and ds for only
  1300. * primary dsi display.
  1301. */
  1302. if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) {
  1303. if (!RM_RQ_DSPP(reqs))
  1304. reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
  1305. if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
  1306. sde_encoder_is_primary_display(enc))
  1307. reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS);
  1308. }
  1309. /**
  1310. * Set the requirement for LM which has CWB support if CWB is
  1311. * found enabled.
  1312. */
  1313. if (!RM_RQ_CWB(reqs) && sde_encoder_in_clone_mode(enc)) {
  1314. reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_CWB);
  1315. /*
  1316. * topology selection based on conn mode is not valid for CWB
  1317. * as WB conn populates modes based on max_mixer_width check
  1318. * but primary can be using dual LMs. This topology override for
  1319. * CWB is to check number of datapath active in primary and
  1320. * allocate same number of LM/PP blocks reserved for CWB
  1321. */
  1322. reqs->topology =
  1323. &rm->topology_tbl[SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE];
  1324. if (sde_crtc_get_num_datapath(crtc_state->crtc) == 1)
  1325. reqs->topology =
  1326. &rm->topology_tbl[SDE_RM_TOPOLOGY_SINGLEPIPE];
  1327. }
  1328. SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
  1329. reqs->hw_res.display_num_of_h_tiles);
  1330. SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
  1331. reqs->topology->num_lm, reqs->topology->num_ctl,
  1332. reqs->topology->top_name,
  1333. reqs->topology->needs_split_display);
  1334. SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->topology->num_lm,
  1335. reqs->top_ctrl, reqs->topology->top_name,
  1336. reqs->topology->num_ctl);
  1337. return 0;
  1338. }
  1339. static struct sde_rm_rsvp *_sde_rm_get_rsvp(
  1340. struct sde_rm *rm,
  1341. struct drm_encoder *enc)
  1342. {
  1343. struct sde_rm_rsvp *i;
  1344. if (!rm || !enc) {
  1345. SDE_ERROR("invalid params\n");
  1346. return NULL;
  1347. }
  1348. if (list_empty(&rm->rsvps))
  1349. return NULL;
  1350. list_for_each_entry(i, &rm->rsvps, list)
  1351. if (i->enc_id == enc->base.id)
  1352. return i;
  1353. return NULL;
  1354. }
  1355. static struct sde_rm_rsvp *_sde_rm_get_rsvp_nxt(
  1356. struct sde_rm *rm,
  1357. struct drm_encoder *enc)
  1358. {
  1359. struct sde_rm_rsvp *i;
  1360. if (list_empty(&rm->rsvps))
  1361. return NULL;
  1362. list_for_each_entry(i, &rm->rsvps, list)
  1363. if (i->enc_id == enc->base.id)
  1364. break;
  1365. list_for_each_entry_continue(i, &rm->rsvps, list)
  1366. if (i->enc_id == enc->base.id)
  1367. return i;
  1368. return NULL;
  1369. }
  1370. static struct drm_connector *_sde_rm_get_connector(
  1371. struct drm_encoder *enc)
  1372. {
  1373. struct drm_connector *conn = NULL;
  1374. struct list_head *connector_list =
  1375. &enc->dev->mode_config.connector_list;
  1376. list_for_each_entry(conn, connector_list, head)
  1377. if (conn->encoder == enc)
  1378. return conn;
  1379. return NULL;
  1380. }
  1381. int sde_rm_update_topology(struct drm_connector_state *conn_state,
  1382. struct msm_display_topology *topology)
  1383. {
  1384. int i, ret = 0;
  1385. struct msm_display_topology top;
  1386. enum sde_rm_topology_name top_name = SDE_RM_TOPOLOGY_NONE;
  1387. if (!conn_state)
  1388. return -EINVAL;
  1389. if (topology) {
  1390. top = *topology;
  1391. for (i = 0; i < SDE_RM_TOPOLOGY_MAX; i++)
  1392. if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], top)) {
  1393. top_name = g_top_table[i].top_name;
  1394. break;
  1395. }
  1396. }
  1397. ret = msm_property_set_property(
  1398. sde_connector_get_propinfo(conn_state->connector),
  1399. sde_connector_get_property_state(conn_state),
  1400. CONNECTOR_PROP_TOPOLOGY_NAME, top_name);
  1401. return ret;
  1402. }
  1403. /**
  1404. * _sde_rm_release_rsvp - release resources and release a reservation
  1405. * @rm: KMS handle
  1406. * @rsvp: RSVP pointer to release and release resources for
  1407. */
  1408. static void _sde_rm_release_rsvp(
  1409. struct sde_rm *rm,
  1410. struct sde_rm_rsvp *rsvp,
  1411. struct drm_connector *conn)
  1412. {
  1413. struct sde_rm_rsvp *rsvp_c, *rsvp_n;
  1414. struct sde_rm_hw_blk *blk;
  1415. enum sde_hw_blk_type type;
  1416. if (!rsvp)
  1417. return;
  1418. SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
  1419. list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
  1420. if (rsvp == rsvp_c) {
  1421. list_del(&rsvp_c->list);
  1422. break;
  1423. }
  1424. }
  1425. for (type = 0; type < SDE_HW_BLK_MAX; type++) {
  1426. list_for_each_entry(blk, &rm->hw_blks[type], list) {
  1427. if (blk->rsvp == rsvp) {
  1428. blk->rsvp = NULL;
  1429. SDE_DEBUG("rel rsvp %d enc %d %d %d\n",
  1430. rsvp->seq, rsvp->enc_id,
  1431. blk->type, blk->id);
  1432. }
  1433. if (blk->rsvp_nxt == rsvp) {
  1434. blk->rsvp_nxt = NULL;
  1435. SDE_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
  1436. rsvp->seq, rsvp->enc_id,
  1437. blk->type, blk->id);
  1438. }
  1439. }
  1440. }
  1441. kfree(rsvp);
  1442. }
  1443. void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt)
  1444. {
  1445. struct sde_rm_rsvp *rsvp;
  1446. struct drm_connector *conn;
  1447. struct msm_drm_private *priv;
  1448. struct sde_kms *sde_kms;
  1449. uint64_t top_ctrl;
  1450. if (!rm || !enc) {
  1451. SDE_ERROR("invalid params\n");
  1452. return;
  1453. }
  1454. priv = enc->dev->dev_private;
  1455. if (!priv->kms) {
  1456. SDE_ERROR("invalid kms\n");
  1457. return;
  1458. }
  1459. sde_kms = to_sde_kms(priv->kms);
  1460. mutex_lock(&rm->rm_lock);
  1461. if (nxt)
  1462. rsvp = _sde_rm_get_rsvp_nxt(rm, enc);
  1463. else
  1464. rsvp = _sde_rm_get_rsvp(rm, enc);
  1465. if (!rsvp) {
  1466. SDE_DEBUG("failed to find rsvp for enc %d, nxt %d",
  1467. enc->base.id, nxt);
  1468. goto end;
  1469. }
  1470. if (_sde_rm_is_display_in_cont_splash(sde_kms, enc)) {
  1471. _sde_rm_release_rsvp(rm, rsvp, conn);
  1472. goto end;
  1473. }
  1474. conn = _sde_rm_get_connector(enc);
  1475. if (!conn) {
  1476. SDE_ERROR("failed to get connector for enc %d, nxt %d",
  1477. enc->base.id, nxt);
  1478. goto end;
  1479. }
  1480. top_ctrl = sde_connector_get_property(conn->state,
  1481. CONNECTOR_PROP_TOPOLOGY_CONTROL);
  1482. if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
  1483. SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
  1484. rsvp->seq, rsvp->enc_id);
  1485. } else {
  1486. SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
  1487. rsvp->enc_id);
  1488. _sde_rm_release_rsvp(rm, rsvp, conn);
  1489. }
  1490. end:
  1491. mutex_unlock(&rm->rm_lock);
  1492. }
  1493. static int _sde_rm_commit_rsvp(
  1494. struct sde_rm *rm,
  1495. struct sde_rm_rsvp *rsvp,
  1496. struct drm_connector_state *conn_state)
  1497. {
  1498. struct sde_rm_hw_blk *blk;
  1499. enum sde_hw_blk_type type;
  1500. int ret = 0;
  1501. /* Swap next rsvp to be the active */
  1502. for (type = 0; type < SDE_HW_BLK_MAX; type++) {
  1503. list_for_each_entry(blk, &rm->hw_blks[type], list) {
  1504. if (blk->rsvp_nxt) {
  1505. blk->rsvp = blk->rsvp_nxt;
  1506. blk->rsvp_nxt = NULL;
  1507. }
  1508. }
  1509. }
  1510. if (!ret) {
  1511. SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
  1512. rsvp->topology);
  1513. SDE_EVT32(rsvp->enc_id, rsvp->topology);
  1514. }
  1515. return ret;
  1516. }
  1517. int sde_rm_reserve(
  1518. struct sde_rm *rm,
  1519. struct drm_encoder *enc,
  1520. struct drm_crtc_state *crtc_state,
  1521. struct drm_connector_state *conn_state,
  1522. bool test_only)
  1523. {
  1524. struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
  1525. struct sde_rm_requirements reqs;
  1526. struct msm_drm_private *priv;
  1527. struct sde_kms *sde_kms;
  1528. int ret;
  1529. if (!rm || !enc || !crtc_state || !conn_state) {
  1530. SDE_ERROR("invalid arguments\n");
  1531. return -EINVAL;
  1532. }
  1533. if (!enc->dev || !enc->dev->dev_private) {
  1534. SDE_ERROR("drm device invalid\n");
  1535. return -EINVAL;
  1536. }
  1537. priv = enc->dev->dev_private;
  1538. if (!priv->kms) {
  1539. SDE_ERROR("invalid kms\n");
  1540. return -EINVAL;
  1541. }
  1542. sde_kms = to_sde_kms(priv->kms);
  1543. /* Check if this is just a page-flip */
  1544. if (!_sde_rm_is_display_in_cont_splash(sde_kms, enc) &&
  1545. !drm_atomic_crtc_needs_modeset(crtc_state))
  1546. return 0;
  1547. SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
  1548. conn_state->connector->base.id, enc->base.id,
  1549. crtc_state->crtc->base.id, test_only);
  1550. SDE_EVT32(enc->base.id, conn_state->connector->base.id);
  1551. mutex_lock(&rm->rm_lock);
  1552. _sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
  1553. rsvp_cur = _sde_rm_get_rsvp(rm, enc);
  1554. rsvp_nxt = _sde_rm_get_rsvp_nxt(rm, enc);
  1555. if (!test_only && rsvp_nxt)
  1556. goto commit_rsvp;
  1557. ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
  1558. conn_state, &reqs);
  1559. if (ret) {
  1560. SDE_ERROR("failed to populate hw requirements\n");
  1561. goto end;
  1562. }
  1563. /*
  1564. * We only support one active reservation per-hw-block. But to implement
  1565. * transactional semantics for test-only, and for allowing failure while
  1566. * modifying your existing reservation, over the course of this
  1567. * function we can have two reservations:
  1568. * Current: Existing reservation
  1569. * Next: Proposed reservation. The proposed reservation may fail, or may
  1570. * be discarded if in test-only mode.
  1571. * If reservation is successful, and we're not in test-only, then we
  1572. * replace the current with the next.
  1573. */
  1574. rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
  1575. if (!rsvp_nxt) {
  1576. ret = -ENOMEM;
  1577. goto end;
  1578. }
  1579. /*
  1580. * User can request that we clear out any reservation during the
  1581. * atomic_check phase by using this CLEAR bit
  1582. */
  1583. if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
  1584. SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
  1585. rsvp_cur->seq, rsvp_cur->enc_id);
  1586. _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
  1587. rsvp_cur = NULL;
  1588. _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
  1589. }
  1590. /* Check the proposed reservation, store it in hw's "next" field */
  1591. ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
  1592. rsvp_nxt, &reqs);
  1593. _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
  1594. if (ret) {
  1595. SDE_ERROR("failed to reserve hw resources: %d, test_only %d\n",
  1596. ret, test_only);
  1597. _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
  1598. goto end;
  1599. } else if (test_only && !RM_RQ_LOCK(&reqs)) {
  1600. /*
  1601. * Normally, if test_only, test the reservation and then undo
  1602. * However, if the user requests LOCK, then keep the reservation
  1603. * made during the atomic_check phase.
  1604. */
  1605. SDE_DEBUG("test_only: rsvp[s%de%d]\n",
  1606. rsvp_nxt->seq, rsvp_nxt->enc_id);
  1607. goto end;
  1608. } else {
  1609. if (test_only && RM_RQ_LOCK(&reqs))
  1610. SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
  1611. rsvp_nxt->seq, rsvp_nxt->enc_id);
  1612. }
  1613. commit_rsvp:
  1614. _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
  1615. ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
  1616. end:
  1617. _sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
  1618. mutex_unlock(&rm->rm_lock);
  1619. return ret;
  1620. }
  1621. int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
  1622. struct sde_hw_blk *hw, struct drm_encoder *enc)
  1623. {
  1624. struct sde_rm_hw_blk *blk;
  1625. struct sde_rm_rsvp *rsvp;
  1626. int ret = 0;
  1627. if (!rm || !hw || !enc) {
  1628. SDE_ERROR("invalid parameters\n");
  1629. return -EINVAL;
  1630. }
  1631. if (hw->type >= SDE_HW_BLK_MAX) {
  1632. SDE_ERROR("invalid HW type\n");
  1633. return -EINVAL;
  1634. }
  1635. mutex_lock(&rm->rm_lock);
  1636. rsvp = _sde_rm_get_rsvp(rm, enc);
  1637. if (!rsvp) {
  1638. rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
  1639. if (!rsvp) {
  1640. ret = -ENOMEM;
  1641. goto end;
  1642. }
  1643. rsvp->seq = ++rm->rsvp_next_seq;
  1644. rsvp->enc_id = enc->base.id;
  1645. list_add_tail(&rsvp->list, &rm->rsvps);
  1646. SDE_DEBUG("create rsvp %d for enc %d\n",
  1647. rsvp->seq, rsvp->enc_id);
  1648. }
  1649. blk = kzalloc(sizeof(*blk), GFP_KERNEL);
  1650. if (!blk) {
  1651. ret = -ENOMEM;
  1652. goto end;
  1653. }
  1654. blk->type = hw->type;
  1655. blk->id = hw->id;
  1656. blk->hw = hw;
  1657. blk->rsvp = rsvp;
  1658. list_add_tail(&blk->list, &rm->hw_blks[hw->type]);
  1659. SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
  1660. rsvp->seq, rsvp->enc_id);
  1661. end:
  1662. mutex_unlock(&rm->rm_lock);
  1663. return ret;
  1664. }
  1665. int sde_rm_ext_blk_destroy(struct sde_rm *rm,
  1666. struct drm_encoder *enc)
  1667. {
  1668. struct sde_rm_hw_blk *blk = NULL, *p;
  1669. struct sde_rm_rsvp *rsvp;
  1670. enum sde_hw_blk_type type;
  1671. int ret = 0;
  1672. if (!rm || !enc) {
  1673. SDE_ERROR("invalid parameters\n");
  1674. return -EINVAL;
  1675. }
  1676. mutex_lock(&rm->rm_lock);
  1677. rsvp = _sde_rm_get_rsvp(rm, enc);
  1678. if (!rsvp) {
  1679. ret = -ENOENT;
  1680. SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
  1681. goto end;
  1682. }
  1683. for (type = 0; type < SDE_HW_BLK_MAX; type++) {
  1684. list_for_each_entry_safe(blk, p, &rm->hw_blks[type], list) {
  1685. if (blk->rsvp == rsvp) {
  1686. list_del(&blk->list);
  1687. SDE_DEBUG("del blk %d %d from rsvp %d enc %d\n",
  1688. blk->type, blk->id,
  1689. rsvp->seq, rsvp->enc_id);
  1690. kfree(blk);
  1691. }
  1692. }
  1693. }
  1694. SDE_DEBUG("del rsvp %d\n", rsvp->seq);
  1695. list_del(&rsvp->list);
  1696. kfree(rsvp);
  1697. end:
  1698. mutex_unlock(&rm->rm_lock);
  1699. return ret;
  1700. }