msm_drv.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168
  1. /*
  2. * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. /*
  19. * Copyright (c) 2016 Intel Corporation
  20. *
  21. * Permission to use, copy, modify, distribute, and sell this software and its
  22. * documentation for any purpose is hereby granted without fee, provided that
  23. * the above copyright notice appear in all copies and that both that copyright
  24. * notice and this permission notice appear in supporting documentation, and
  25. * that the name of the copyright holders not be used in advertising or
  26. * publicity pertaining to distribution of the software without specific,
  27. * written prior permission. The copyright holders make no representations
  28. * about the suitability of this software for any purpose. It is provided "as
  29. * is" without express or implied warranty.
  30. *
  31. * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
  32. * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
  33. * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
  34. * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
  35. * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  36. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
  37. * OF THIS SOFTWARE.
  38. */
  39. #include <linux/of_address.h>
  40. #include <linux/kthread.h>
  41. #include <uapi/linux/sched/types.h>
  42. #include <drm/drm_of.h>
  43. #include <drm/drm_probe_helper.h>
  44. #include "msm_drv.h"
  45. #include "msm_gem.h"
  46. #include "msm_kms.h"
  47. #include "msm_mmu.h"
  48. #include "sde_wb.h"
  49. #include "sde_dbg.h"
  50. /*
  51. * MSM driver version:
  52. * - 1.0.0 - initial interface
  53. * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  54. * - 1.2.0 - adds explicit fence support for submit ioctl
  55. * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  56. * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  57. * MSM_GEM_INFO ioctl.
  58. * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
  59. * GEM object's debug name
  60. */
  61. #define MSM_VERSION_MAJOR 1
  62. #define MSM_VERSION_MINOR 4
  63. #define MSM_VERSION_PATCHLEVEL 0
  64. #define LASTCLOSE_TIMEOUT_MS 500
  65. #define msm_wait_event_timeout(waitq, cond, timeout_ms, ret) \
  66. do { \
  67. ktime_t cur_ktime; \
  68. ktime_t exp_ktime; \
  69. s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms); \
  70. \
  71. exp_ktime = ktime_add_ms(ktime_get(), timeout_ms); \
  72. do { \
  73. ret = wait_event_timeout(waitq, cond, \
  74. wait_time_jiffies); \
  75. cur_ktime = ktime_get(); \
  76. } while ((!cond) && (ret == 0) && \
  77. (ktime_compare_safe(exp_ktime, cur_ktime) > 0));\
  78. } while (0)
  79. static void msm_fb_output_poll_changed(struct drm_device *dev)
  80. {
  81. struct msm_drm_private *priv = NULL;
  82. if (!dev) {
  83. DRM_ERROR("output_poll_changed failed, invalid input\n");
  84. return;
  85. }
  86. priv = dev->dev_private;
  87. if (priv->fbdev)
  88. drm_fb_helper_hotplug_event(priv->fbdev);
  89. }
  90. /**
  91. * msm_atomic_helper_check - validate state object
  92. * @dev: DRM device
  93. * @state: the driver state object
  94. *
  95. * This is a wrapper for the drm_atomic_helper_check to check the modeset
  96. * and state checking for planes. Additionally it checks if any secure
  97. * transition(moving CRTC and planes between secure and non-secure states and
  98. * vice versa) is allowed or not. When going to secure state, planes
  99. * with fb_mode as dir translated only can be staged on the CRTC, and only one
  100. * CRTC should be active.
  101. * Also mixing of secure and non-secure is not allowed.
  102. *
  103. * RETURNS
  104. * Zero for success or -errorno.
  105. */
  106. int msm_atomic_check(struct drm_device *dev,
  107. struct drm_atomic_state *state)
  108. {
  109. struct msm_drm_private *priv;
  110. priv = dev->dev_private;
  111. if (priv && priv->kms && priv->kms->funcs &&
  112. priv->kms->funcs->atomic_check)
  113. return priv->kms->funcs->atomic_check(priv->kms, state);
  114. return drm_atomic_helper_check(dev, state);
  115. }
  116. static const struct drm_mode_config_funcs mode_config_funcs = {
  117. .fb_create = msm_framebuffer_create,
  118. .output_poll_changed = msm_fb_output_poll_changed,
  119. .atomic_check = msm_atomic_check,
  120. .atomic_commit = msm_atomic_commit,
  121. .atomic_state_alloc = msm_atomic_state_alloc,
  122. .atomic_state_clear = msm_atomic_state_clear,
  123. .atomic_state_free = msm_atomic_state_free,
  124. };
  125. static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
  126. .atomic_commit_tail = msm_atomic_commit_tail,
  127. };
  128. #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
  129. static bool reglog = false;
  130. MODULE_PARM_DESC(reglog, "Enable register read/write logging");
  131. module_param(reglog, bool, 0600);
  132. #else
  133. #define reglog 0
  134. #endif
  135. #ifdef CONFIG_DRM_FBDEV_EMULATION
  136. static bool fbdev = true;
  137. MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
  138. module_param(fbdev, bool, 0600);
  139. #endif
  140. static char *vram = "16m";
  141. MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
  142. module_param(vram, charp, 0);
  143. bool dumpstate = false;
  144. MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
  145. module_param(dumpstate, bool, 0600);
  146. static bool modeset = true;
  147. MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
  148. module_param(modeset, bool, 0600);
  149. /*
  150. * Util/helpers:
  151. */
  152. int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
  153. {
  154. struct property *prop;
  155. const char *name;
  156. struct clk_bulk_data *local;
  157. int i = 0, ret, count;
  158. count = of_property_count_strings(dev->of_node, "clock-names");
  159. if (count < 1)
  160. return 0;
  161. local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
  162. count, GFP_KERNEL);
  163. if (!local)
  164. return -ENOMEM;
  165. of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
  166. local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
  167. if (!local[i].id) {
  168. devm_kfree(dev, local);
  169. return -ENOMEM;
  170. }
  171. i++;
  172. }
  173. ret = devm_clk_bulk_get(dev, count, local);
  174. if (ret) {
  175. for (i = 0; i < count; i++)
  176. devm_kfree(dev, (void *) local[i].id);
  177. devm_kfree(dev, local);
  178. return ret;
  179. }
  180. *bulk = local;
  181. return count;
  182. }
  183. struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
  184. const char *name)
  185. {
  186. int i;
  187. char n[32];
  188. snprintf(n, sizeof(n), "%s_clk", name);
  189. for (i = 0; bulk && i < count; i++) {
  190. if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
  191. return bulk[i].clk;
  192. }
  193. return NULL;
  194. }
  195. struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
  196. {
  197. struct clk *clk;
  198. char name2[32];
  199. clk = devm_clk_get(&pdev->dev, name);
  200. if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
  201. return clk;
  202. snprintf(name2, sizeof(name2), "%s_clk", name);
  203. clk = devm_clk_get(&pdev->dev, name2);
  204. if (!IS_ERR(clk))
  205. dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
  206. "\"%s\" instead of \"%s\"\n", name, name2);
  207. return clk;
  208. }
  209. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
  210. const char *dbgname)
  211. {
  212. struct resource *res;
  213. unsigned long size;
  214. void __iomem *ptr;
  215. if (name)
  216. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  217. else
  218. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  219. if (!res) {
  220. dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
  221. name);
  222. return ERR_PTR(-EINVAL);
  223. }
  224. size = resource_size(res);
  225. ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
  226. if (!ptr) {
  227. dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
  228. return ERR_PTR(-ENOMEM);
  229. }
  230. if (reglog)
  231. dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
  232. dbgname, ptr, size);
  233. return ptr;
  234. }
  235. unsigned long msm_iomap_size(struct platform_device *pdev, const char *name)
  236. {
  237. struct resource *res;
  238. if (name)
  239. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
  240. else
  241. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  242. if (!res) {
  243. dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
  244. name);
  245. return 0;
  246. }
  247. return resource_size(res);
  248. }
  249. void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
  250. {
  251. devm_iounmap(&pdev->dev, addr);
  252. }
  253. void msm_writel(u32 data, void __iomem *addr)
  254. {
  255. if (reglog)
  256. pr_debug("IO:W %pK %08x\n", addr, data);
  257. writel(data, addr);
  258. }
  259. u32 msm_readl(const void __iomem *addr)
  260. {
  261. u32 val = readl(addr);
  262. if (reglog)
  263. pr_err("IO:R %pK %08x\n", addr, val);
  264. return val;
  265. }
  266. int msm_get_src_bpc(int chroma_format,
  267. int bpc)
  268. {
  269. int src_bpp;
  270. switch (chroma_format) {
  271. case MSM_CHROMA_444:
  272. src_bpp = bpc * 3;
  273. break;
  274. case MSM_CHROMA_422:
  275. src_bpp = bpc * 2;
  276. break;
  277. case MSM_CHROMA_420:
  278. src_bpp = mult_frac(bpc, 3, 2);
  279. break;
  280. default:
  281. src_bpp = bpc * 3;
  282. break;
  283. }
  284. return src_bpp;
  285. }
  286. struct vblank_work {
  287. struct kthread_work work;
  288. int crtc_id;
  289. bool enable;
  290. struct msm_drm_private *priv;
  291. };
  292. static void vblank_ctrl_worker(struct kthread_work *work)
  293. {
  294. struct vblank_work *cur_work = container_of(work,
  295. struct vblank_work, work);
  296. struct msm_drm_private *priv = cur_work->priv;
  297. struct msm_kms *kms = priv->kms;
  298. if (cur_work->enable)
  299. kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
  300. else
  301. kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
  302. kfree(cur_work);
  303. }
  304. static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
  305. int crtc_id, bool enable)
  306. {
  307. struct vblank_work *cur_work;
  308. struct drm_crtc *crtc;
  309. struct kthread_worker *worker;
  310. if (!priv || crtc_id >= priv->num_crtcs)
  311. return -EINVAL;
  312. cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
  313. if (!cur_work)
  314. return -ENOMEM;
  315. crtc = priv->crtcs[crtc_id];
  316. kthread_init_work(&cur_work->work, vblank_ctrl_worker);
  317. cur_work->crtc_id = crtc_id;
  318. cur_work->enable = enable;
  319. cur_work->priv = priv;
  320. worker = &priv->event_thread[crtc_id].worker;
  321. kthread_queue_work(worker, &cur_work->work);
  322. return 0;
  323. }
  324. static int msm_drm_uninit(struct device *dev)
  325. {
  326. struct platform_device *pdev = to_platform_device(dev);
  327. struct drm_device *ddev = platform_get_drvdata(pdev);
  328. struct msm_drm_private *priv = ddev->dev_private;
  329. struct msm_kms *kms = priv->kms;
  330. int i;
  331. /* We must cancel and cleanup any pending vblank enable/disable
  332. * work before drm_irq_uninstall() to avoid work re-enabling an
  333. * irq after uninstall has disabled it.
  334. */
  335. flush_workqueue(priv->wq);
  336. destroy_workqueue(priv->wq);
  337. /* clean up display commit/event worker threads */
  338. for (i = 0; i < priv->num_crtcs; i++) {
  339. if (priv->disp_thread[i].thread) {
  340. kthread_flush_worker(&priv->disp_thread[i].worker);
  341. kthread_stop(priv->disp_thread[i].thread);
  342. priv->disp_thread[i].thread = NULL;
  343. }
  344. if (priv->event_thread[i].thread) {
  345. kthread_flush_worker(&priv->event_thread[i].worker);
  346. kthread_stop(priv->event_thread[i].thread);
  347. priv->event_thread[i].thread = NULL;
  348. }
  349. }
  350. drm_kms_helper_poll_fini(ddev);
  351. drm_mode_config_cleanup(ddev);
  352. if (priv->registered) {
  353. drm_dev_unregister(ddev);
  354. priv->registered = false;
  355. }
  356. #ifdef CONFIG_DRM_FBDEV_EMULATION
  357. if (fbdev && priv->fbdev)
  358. msm_fbdev_free(ddev);
  359. #endif
  360. drm_atomic_helper_shutdown(ddev);
  361. drm_mode_config_cleanup(ddev);
  362. pm_runtime_get_sync(dev);
  363. drm_irq_uninstall(ddev);
  364. pm_runtime_put_sync(dev);
  365. if (kms && kms->funcs)
  366. kms->funcs->destroy(kms);
  367. if (priv->vram.paddr) {
  368. unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
  369. drm_mm_takedown(&priv->vram.mm);
  370. dma_free_attrs(dev, priv->vram.size, NULL,
  371. priv->vram.paddr, attrs);
  372. }
  373. component_unbind_all(dev, ddev);
  374. sde_dbg_destroy();
  375. debugfs_remove_recursive(priv->debug_root);
  376. sde_power_resource_deinit(pdev, &priv->phandle);
  377. msm_mdss_destroy(ddev);
  378. ddev->dev_private = NULL;
  379. kfree(priv);
  380. drm_dev_put(ddev);
  381. return 0;
  382. }
  383. #define KMS_MDP4 4
  384. #define KMS_MDP5 5
  385. #define KMS_SDE 3
  386. static int get_mdp_ver(struct platform_device *pdev)
  387. {
  388. #ifdef CONFIG_OF
  389. static const struct of_device_id match_types[] = { {
  390. .compatible = "qcom,mdss_mdp",
  391. .data = (void *)KMS_MDP5,
  392. },
  393. {
  394. .compatible = "qcom,sde-kms",
  395. .data = (void *)KMS_SDE,
  396. },
  397. {},
  398. };
  399. struct device *dev = &pdev->dev;
  400. const struct of_device_id *match;
  401. match = of_match_node(match_types, dev->of_node);
  402. if (match)
  403. return (int)(unsigned long)match->data;
  404. #endif
  405. return KMS_MDP4;
  406. }
  407. static int msm_init_vram(struct drm_device *dev)
  408. {
  409. struct msm_drm_private *priv = dev->dev_private;
  410. struct device_node *node;
  411. unsigned long size = 0;
  412. int ret = 0;
  413. /* In the device-tree world, we could have a 'memory-region'
  414. * phandle, which gives us a link to our "vram". Allocating
  415. * is all nicely abstracted behind the dma api, but we need
  416. * to know the entire size to allocate it all in one go. There
  417. * are two cases:
  418. * 1) device with no IOMMU, in which case we need exclusive
  419. * access to a VRAM carveout big enough for all gpu
  420. * buffers
  421. * 2) device with IOMMU, but where the bootloader puts up
  422. * a splash screen. In this case, the VRAM carveout
  423. * need only be large enough for fbdev fb. But we need
  424. * exclusive access to the buffer to avoid the kernel
  425. * using those pages for other purposes (which appears
  426. * as corruption on screen before we have a chance to
  427. * load and do initial modeset)
  428. */
  429. node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
  430. if (node) {
  431. struct resource r;
  432. ret = of_address_to_resource(node, 0, &r);
  433. of_node_put(node);
  434. if (ret)
  435. return ret;
  436. size = r.end - r.start;
  437. DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
  438. /* if we have no IOMMU, then we need to use carveout allocator.
  439. * Grab the entire CMA chunk carved out in early startup in
  440. * mach-msm:
  441. */
  442. } else if (!iommu_present(&platform_bus_type)) {
  443. u32 vram_size;
  444. ret = of_property_read_u32(dev->dev->of_node,
  445. "qcom,vram-size", &vram_size);
  446. size = (ret < 0) ? memparse(vram, NULL) : vram_size;
  447. DRM_INFO("using 0x%x VRAM carveout\n", size);
  448. ret = 0;
  449. }
  450. if (size) {
  451. unsigned long attrs = 0;
  452. void *p;
  453. priv->vram.size = size;
  454. drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
  455. spin_lock_init(&priv->vram.lock);
  456. attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
  457. attrs |= DMA_ATTR_WRITE_COMBINE;
  458. /* note that for no-kernel-mapping, the vaddr returned
  459. * is bogus, but non-null if allocation succeeded:
  460. */
  461. p = dma_alloc_attrs(dev->dev, size,
  462. &priv->vram.paddr, GFP_KERNEL, attrs);
  463. if (!p) {
  464. dev_err(dev->dev, "failed to allocate VRAM\n");
  465. priv->vram.paddr = 0;
  466. return -ENOMEM;
  467. }
  468. dev_info(dev->dev, "VRAM: %08x->%08x\n",
  469. (uint32_t)priv->vram.paddr,
  470. (uint32_t)(priv->vram.paddr + size));
  471. }
  472. return ret;
  473. }
  474. #ifdef CONFIG_OF
  475. static int msm_component_bind_all(struct device *dev,
  476. struct drm_device *drm_dev)
  477. {
  478. int ret;
  479. ret = component_bind_all(dev, drm_dev);
  480. if (ret)
  481. DRM_ERROR("component_bind_all failed: %d\n", ret);
  482. return ret;
  483. }
  484. #else
  485. static int msm_component_bind_all(struct device *dev,
  486. struct drm_device *drm_dev)
  487. {
  488. return 0;
  489. }
  490. #endif
  491. static int msm_drm_display_thread_create(struct sched_param param,
  492. struct msm_drm_private *priv, struct drm_device *ddev,
  493. struct device *dev)
  494. {
  495. int i, ret = 0;
  496. /**
  497. * this priority was found during empiric testing to have appropriate
  498. * realtime scheduling to process display updates and interact with
  499. * other real time and normal priority task
  500. */
  501. param.sched_priority = 16;
  502. for (i = 0; i < priv->num_crtcs; i++) {
  503. /* initialize display thread */
  504. priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
  505. kthread_init_worker(&priv->disp_thread[i].worker);
  506. priv->disp_thread[i].dev = ddev;
  507. priv->disp_thread[i].thread =
  508. kthread_run(kthread_worker_fn,
  509. &priv->disp_thread[i].worker,
  510. "crtc_commit:%d", priv->disp_thread[i].crtc_id);
  511. ret = sched_setscheduler(priv->disp_thread[i].thread,
  512. SCHED_FIFO, &param);
  513. if (ret)
  514. pr_warn("display thread priority update failed: %d\n",
  515. ret);
  516. if (IS_ERR(priv->disp_thread[i].thread)) {
  517. dev_err(dev, "failed to create crtc_commit kthread\n");
  518. priv->disp_thread[i].thread = NULL;
  519. }
  520. /* initialize event thread */
  521. priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
  522. kthread_init_worker(&priv->event_thread[i].worker);
  523. priv->event_thread[i].dev = ddev;
  524. priv->event_thread[i].thread =
  525. kthread_run(kthread_worker_fn,
  526. &priv->event_thread[i].worker,
  527. "crtc_event:%d", priv->event_thread[i].crtc_id);
  528. /**
  529. * event thread should also run at same priority as disp_thread
  530. * because it is handling frame_done events. A lower priority
  531. * event thread and higher priority disp_thread can causes
  532. * frame_pending counters beyond 2. This can lead to commit
  533. * failure at crtc commit level.
  534. */
  535. ret = sched_setscheduler(priv->event_thread[i].thread,
  536. SCHED_FIFO, &param);
  537. if (ret)
  538. pr_warn("display event thread priority update failed: %d\n",
  539. ret);
  540. if (IS_ERR(priv->event_thread[i].thread)) {
  541. dev_err(dev, "failed to create crtc_event kthread\n");
  542. priv->event_thread[i].thread = NULL;
  543. }
  544. if ((!priv->disp_thread[i].thread) ||
  545. !priv->event_thread[i].thread) {
  546. /* clean up previously created threads if any */
  547. for ( ; i >= 0; i--) {
  548. if (priv->disp_thread[i].thread) {
  549. kthread_stop(
  550. priv->disp_thread[i].thread);
  551. priv->disp_thread[i].thread = NULL;
  552. }
  553. if (priv->event_thread[i].thread) {
  554. kthread_stop(
  555. priv->event_thread[i].thread);
  556. priv->event_thread[i].thread = NULL;
  557. }
  558. }
  559. return -EINVAL;
  560. }
  561. }
  562. /**
  563. * Since pp interrupt is heavy weight, try to queue the work
  564. * into a dedicated worker thread, so that they dont interrupt
  565. * other important events.
  566. */
  567. kthread_init_worker(&priv->pp_event_worker);
  568. priv->pp_event_thread = kthread_run(kthread_worker_fn,
  569. &priv->pp_event_worker, "pp_event");
  570. ret = sched_setscheduler(priv->pp_event_thread,
  571. SCHED_FIFO, &param);
  572. if (ret)
  573. pr_warn("pp_event thread priority update failed: %d\n",
  574. ret);
  575. if (IS_ERR(priv->pp_event_thread)) {
  576. dev_err(dev, "failed to create pp_event kthread\n");
  577. ret = PTR_ERR(priv->pp_event_thread);
  578. priv->pp_event_thread = NULL;
  579. return ret;
  580. }
  581. return 0;
  582. }
  583. static struct msm_kms *_msm_drm_component_init_helper(
  584. struct msm_drm_private *priv,
  585. struct drm_device *ddev, struct device *dev,
  586. struct platform_device *pdev)
  587. {
  588. int ret;
  589. struct msm_kms *kms;
  590. switch (get_mdp_ver(pdev)) {
  591. case KMS_MDP4:
  592. kms = mdp4_kms_init(ddev);
  593. break;
  594. case KMS_MDP5:
  595. kms = mdp5_kms_init(ddev);
  596. break;
  597. case KMS_SDE:
  598. kms = sde_kms_init(ddev);
  599. break;
  600. default:
  601. kms = ERR_PTR(-ENODEV);
  602. break;
  603. }
  604. if (IS_ERR_OR_NULL(kms)) {
  605. /*
  606. * NOTE: once we have GPU support, having no kms should not
  607. * be considered fatal.. ideally we would still support gpu
  608. * and (for example) use dmabuf/prime to share buffers with
  609. * imx drm driver on iMX5
  610. */
  611. dev_err(dev, "failed to load kms\n");
  612. return kms;
  613. }
  614. priv->kms = kms;
  615. /**
  616. * Since kms->funcs->hw_init(kms) might call
  617. * drm_object_property_set_value to initialize some custom
  618. * properties we need to make sure mode_config.funcs are populated
  619. * beforehand to avoid dereferencing an unset value during the
  620. * drm_drv_uses_atomic_modeset check.
  621. */
  622. ddev->mode_config.funcs = &mode_config_funcs;
  623. ret = (kms)->funcs->hw_init(kms);
  624. if (ret) {
  625. dev_err(dev, "kms hw init failed: %d\n", ret);
  626. return ERR_PTR(ret);
  627. }
  628. return kms;
  629. }
  630. static int msm_drm_device_init(struct platform_device *pdev,
  631. struct drm_driver *drv)
  632. {
  633. struct device *dev = &pdev->dev;
  634. struct drm_device *ddev;
  635. struct msm_drm_private *priv;
  636. int i, ret;
  637. ddev = drm_dev_alloc(drv, dev);
  638. if (IS_ERR(ddev)) {
  639. dev_err(dev, "failed to allocate drm_device\n");
  640. return PTR_ERR(ddev);
  641. }
  642. drm_mode_config_init(ddev);
  643. platform_set_drvdata(pdev, ddev);
  644. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  645. if (!priv) {
  646. ret = -ENOMEM;
  647. goto priv_alloc_fail;
  648. }
  649. ddev->dev_private = priv;
  650. priv->dev = ddev;
  651. ret = sde_power_resource_init(pdev, &priv->phandle);
  652. if (ret) {
  653. pr_err("sde power resource init failed\n");
  654. goto power_init_fail;
  655. }
  656. ret = sde_dbg_init(&pdev->dev);
  657. if (ret) {
  658. dev_err(dev, "failed to init sde dbg: %d\n", ret);
  659. goto dbg_init_fail;
  660. }
  661. pm_runtime_enable(dev);
  662. ret = pm_runtime_get_sync(dev);
  663. if (ret < 0) {
  664. dev_err(dev, "resource enable failed: %d\n", ret);
  665. goto pm_runtime_error;
  666. }
  667. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  668. sde_power_data_bus_set_quota(&priv->phandle, i,
  669. SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA,
  670. SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA);
  671. return ret;
  672. pm_runtime_error:
  673. sde_dbg_destroy();
  674. dbg_init_fail:
  675. sde_power_resource_deinit(pdev, &priv->phandle);
  676. power_init_fail:
  677. priv_alloc_fail:
  678. drm_dev_put(ddev);
  679. kfree(priv);
  680. return ret;
  681. }
  682. static int msm_drm_component_init(struct device *dev)
  683. {
  684. struct platform_device *pdev = to_platform_device(dev);
  685. struct drm_device *ddev = platform_get_drvdata(pdev);
  686. struct msm_drm_private *priv = ddev->dev_private;
  687. struct msm_kms *kms = NULL;
  688. int ret;
  689. struct sched_param param = { 0 };
  690. struct drm_crtc *crtc;
  691. ret = msm_mdss_init(ddev);
  692. if (ret)
  693. goto mdss_init_fail;
  694. priv->wq = alloc_ordered_workqueue("msm_drm", 0);
  695. init_waitqueue_head(&priv->pending_crtcs_event);
  696. INIT_LIST_HEAD(&priv->client_event_list);
  697. INIT_LIST_HEAD(&priv->inactive_list);
  698. /* Bind all our sub-components: */
  699. ret = msm_component_bind_all(dev, ddev);
  700. if (ret)
  701. goto bind_fail;
  702. ret = msm_init_vram(ddev);
  703. if (ret)
  704. goto fail;
  705. ddev->mode_config.funcs = &mode_config_funcs;
  706. ddev->mode_config.helper_private = &mode_config_helper_funcs;
  707. kms = _msm_drm_component_init_helper(priv, ddev, dev, pdev);
  708. if (IS_ERR_OR_NULL(kms)) {
  709. dev_err(dev, "msm_drm_component_init_helper failed\n");
  710. goto fail;
  711. }
  712. ret = msm_drm_display_thread_create(param, priv, ddev, dev);
  713. if (ret) {
  714. dev_err(dev, "msm_drm_display_thread_create failed\n");
  715. goto fail;
  716. }
  717. ret = drm_vblank_init(ddev, priv->num_crtcs);
  718. if (ret < 0) {
  719. dev_err(dev, "failed to initialize vblank\n");
  720. goto fail;
  721. }
  722. drm_for_each_crtc(crtc, ddev)
  723. drm_crtc_vblank_reset(crtc);
  724. if (kms) {
  725. pm_runtime_get_sync(dev);
  726. ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
  727. pm_runtime_put_sync(dev);
  728. if (ret < 0) {
  729. dev_err(dev, "failed to install IRQ handler\n");
  730. goto fail;
  731. }
  732. }
  733. ret = drm_dev_register(ddev, 0);
  734. if (ret)
  735. goto fail;
  736. priv->registered = true;
  737. drm_mode_config_reset(ddev);
  738. if (kms && kms->funcs && kms->funcs->cont_splash_config) {
  739. ret = kms->funcs->cont_splash_config(kms);
  740. if (ret) {
  741. dev_err(dev, "kms cont_splash config failed.\n");
  742. goto fail;
  743. }
  744. }
  745. #ifdef CONFIG_DRM_FBDEV_EMULATION
  746. if (fbdev)
  747. priv->fbdev = msm_fbdev_init(ddev);
  748. #endif
  749. /* create drm client only when fbdev is not supported */
  750. if (!priv->fbdev) {
  751. ret = drm_client_init(ddev, &kms->client, "kms_client", NULL);
  752. if (ret) {
  753. DRM_ERROR("failed to init kms_client: %d\n", ret);
  754. kms->client.dev = NULL;
  755. goto fail;
  756. }
  757. drm_client_register(&kms->client);
  758. }
  759. ret = sde_dbg_debugfs_register(dev);
  760. if (ret) {
  761. dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
  762. goto fail;
  763. }
  764. /* perform subdriver post initialization */
  765. if (kms && kms->funcs && kms->funcs->postinit) {
  766. ret = kms->funcs->postinit(kms);
  767. if (ret) {
  768. pr_err("kms post init failed: %d\n", ret);
  769. goto fail;
  770. }
  771. }
  772. drm_kms_helper_poll_init(ddev);
  773. return 0;
  774. fail:
  775. msm_drm_uninit(dev);
  776. return ret;
  777. bind_fail:
  778. msm_mdss_destroy(ddev);
  779. mdss_init_fail:
  780. sde_dbg_destroy();
  781. sde_power_resource_deinit(pdev, &priv->phandle);
  782. drm_dev_put(ddev);
  783. kfree(priv);
  784. return ret;
  785. }
  786. /*
  787. * DRM operations:
  788. */
  789. static int context_init(struct drm_device *dev, struct drm_file *file)
  790. {
  791. struct msm_file_private *ctx;
  792. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  793. if (!ctx)
  794. return -ENOMEM;
  795. mutex_init(&ctx->power_lock);
  796. file->driver_priv = ctx;
  797. if (dev && dev->dev_private) {
  798. struct msm_drm_private *priv = dev->dev_private;
  799. struct msm_kms *kms;
  800. kms = priv->kms;
  801. if (kms && kms->funcs && kms->funcs->postopen)
  802. kms->funcs->postopen(kms, file);
  803. }
  804. return 0;
  805. }
  806. static int msm_open(struct drm_device *dev, struct drm_file *file)
  807. {
  808. return context_init(dev, file);
  809. }
  810. static void context_close(struct msm_file_private *ctx)
  811. {
  812. kfree(ctx);
  813. }
  814. static void msm_postclose(struct drm_device *dev, struct drm_file *file)
  815. {
  816. struct msm_drm_private *priv = dev->dev_private;
  817. struct msm_file_private *ctx = file->driver_priv;
  818. struct msm_kms *kms = priv->kms;
  819. if (!kms)
  820. return;
  821. if (kms->funcs && kms->funcs->postclose)
  822. kms->funcs->postclose(kms, file);
  823. mutex_lock(&dev->struct_mutex);
  824. if (ctx == priv->lastctx)
  825. priv->lastctx = NULL;
  826. mutex_unlock(&dev->struct_mutex);
  827. mutex_lock(&ctx->power_lock);
  828. if (ctx->enable_refcnt) {
  829. SDE_EVT32(ctx->enable_refcnt);
  830. pm_runtime_put_sync(dev->dev);
  831. }
  832. mutex_unlock(&ctx->power_lock);
  833. context_close(ctx);
  834. }
  835. static void msm_lastclose(struct drm_device *dev)
  836. {
  837. struct msm_drm_private *priv = dev->dev_private;
  838. struct msm_kms *kms = priv->kms;
  839. int i, rc;
  840. if (!kms)
  841. return;
  842. /* check for splash status before triggering cleanup
  843. * if we end up here with splash status ON i.e before first
  844. * commit then ignore the last close call
  845. */
  846. if (kms->funcs && kms->funcs->check_for_splash
  847. && kms->funcs->check_for_splash(kms))
  848. return;
  849. /*
  850. * clean up vblank disable immediately as this is the last close.
  851. */
  852. for (i = 0; i < dev->num_crtcs; i++) {
  853. struct drm_vblank_crtc *vblank = &dev->vblank[i];
  854. struct timer_list *disable_timer = &vblank->disable_timer;
  855. if (del_timer_sync(disable_timer))
  856. disable_timer->function(disable_timer);
  857. }
  858. /* wait for pending vblank requests to be executed by worker thread */
  859. flush_workqueue(priv->wq);
  860. /* wait for any pending crtcs to finish before lastclose commit */
  861. msm_wait_event_timeout(priv->pending_crtcs_event, !priv->pending_crtcs,
  862. LASTCLOSE_TIMEOUT_MS, rc);
  863. if (!rc)
  864. DRM_INFO("wait for crtc mask 0x%x failed, commit anyway...\n",
  865. priv->pending_crtcs);
  866. if (priv->fbdev) {
  867. rc = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
  868. if (rc)
  869. DRM_ERROR("restore FBDEV mode failed: %d\n", rc);
  870. } else if (kms && kms->client.dev) {
  871. rc = drm_client_modeset_commit_force(&kms->client);
  872. if (rc)
  873. DRM_ERROR("client modeset commit failed: %d\n", rc);
  874. }
  875. /* wait again, before kms driver does it's lastclose commit */
  876. msm_wait_event_timeout(priv->pending_crtcs_event, !priv->pending_crtcs,
  877. LASTCLOSE_TIMEOUT_MS, rc);
  878. if (!rc)
  879. DRM_INFO("wait for crtc mask 0x%x failed, commit anyway...\n",
  880. priv->pending_crtcs);
  881. if (kms->funcs && kms->funcs->lastclose)
  882. kms->funcs->lastclose(kms);
  883. }
  884. static irqreturn_t msm_irq(int irq, void *arg)
  885. {
  886. struct drm_device *dev = arg;
  887. struct msm_drm_private *priv = dev->dev_private;
  888. struct msm_kms *kms = priv->kms;
  889. BUG_ON(!kms);
  890. return kms->funcs->irq(kms);
  891. }
  892. static void msm_irq_preinstall(struct drm_device *dev)
  893. {
  894. struct msm_drm_private *priv = dev->dev_private;
  895. struct msm_kms *kms = priv->kms;
  896. BUG_ON(!kms);
  897. kms->funcs->irq_preinstall(kms);
  898. }
  899. static int msm_irq_postinstall(struct drm_device *dev)
  900. {
  901. struct msm_drm_private *priv = dev->dev_private;
  902. struct msm_kms *kms = priv->kms;
  903. BUG_ON(!kms);
  904. if (kms->funcs->irq_postinstall)
  905. return kms->funcs->irq_postinstall(kms);
  906. return 0;
  907. }
  908. static void msm_irq_uninstall(struct drm_device *dev)
  909. {
  910. struct msm_drm_private *priv = dev->dev_private;
  911. struct msm_kms *kms = priv->kms;
  912. BUG_ON(!kms);
  913. kms->funcs->irq_uninstall(kms);
  914. }
  915. static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
  916. {
  917. struct msm_drm_private *priv = dev->dev_private;
  918. struct msm_kms *kms = priv->kms;
  919. if (!kms)
  920. return -ENXIO;
  921. DBG("dev=%pK, crtc=%u", dev, pipe);
  922. return vblank_ctrl_queue_work(priv, pipe, true);
  923. }
  924. static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
  925. {
  926. struct msm_drm_private *priv = dev->dev_private;
  927. struct msm_kms *kms = priv->kms;
  928. if (!kms)
  929. return;
  930. DBG("dev=%pK, crtc=%u", dev, pipe);
  931. vblank_ctrl_queue_work(priv, pipe, false);
  932. }
  933. /*
  934. * DRM ioctls:
  935. */
  936. static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
  937. struct drm_file *file)
  938. {
  939. struct drm_msm_gem_new *args = data;
  940. if (args->flags & ~MSM_BO_FLAGS) {
  941. DRM_ERROR("invalid flags: %08x\n", args->flags);
  942. return -EINVAL;
  943. }
  944. return msm_gem_new_handle(dev, file, args->size,
  945. args->flags, &args->handle, NULL);
  946. }
  947. static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
  948. {
  949. return ktime_set(timeout.tv_sec, timeout.tv_nsec);
  950. }
  951. static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
  952. struct drm_file *file)
  953. {
  954. struct drm_msm_gem_cpu_prep *args = data;
  955. struct drm_gem_object *obj;
  956. ktime_t timeout = to_ktime(args->timeout);
  957. int ret;
  958. if (args->op & ~MSM_PREP_FLAGS) {
  959. DRM_ERROR("invalid op: %08x\n", args->op);
  960. return -EINVAL;
  961. }
  962. obj = drm_gem_object_lookup(file, args->handle);
  963. if (!obj)
  964. return -ENOENT;
  965. ret = msm_gem_cpu_prep(obj, args->op, &timeout);
  966. drm_gem_object_put_unlocked(obj);
  967. return ret;
  968. }
  969. static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
  970. struct drm_file *file)
  971. {
  972. struct drm_msm_gem_cpu_fini *args = data;
  973. struct drm_gem_object *obj;
  974. int ret;
  975. obj = drm_gem_object_lookup(file, args->handle);
  976. if (!obj)
  977. return -ENOENT;
  978. ret = msm_gem_cpu_fini(obj);
  979. drm_gem_object_put_unlocked(obj);
  980. return ret;
  981. }
  982. static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
  983. struct drm_file *file)
  984. {
  985. struct drm_msm_gem_madvise *args = data;
  986. struct drm_gem_object *obj;
  987. int ret;
  988. switch (args->madv) {
  989. case MSM_MADV_DONTNEED:
  990. case MSM_MADV_WILLNEED:
  991. break;
  992. default:
  993. return -EINVAL;
  994. }
  995. ret = mutex_lock_interruptible(&dev->struct_mutex);
  996. if (ret)
  997. return ret;
  998. obj = drm_gem_object_lookup(file, args->handle);
  999. if (!obj) {
  1000. ret = -ENOENT;
  1001. goto unlock;
  1002. }
  1003. ret = msm_gem_madvise(obj, args->madv);
  1004. if (ret >= 0) {
  1005. args->retained = ret;
  1006. ret = 0;
  1007. }
  1008. drm_gem_object_put(obj);
  1009. unlock:
  1010. mutex_unlock(&dev->struct_mutex);
  1011. return ret;
  1012. }
  1013. static int msm_drm_object_supports_event(struct drm_device *dev,
  1014. struct drm_msm_event_req *req)
  1015. {
  1016. int ret = -EINVAL;
  1017. struct drm_mode_object *arg_obj;
  1018. arg_obj = drm_mode_object_find(dev, NULL, req->object_id,
  1019. req->object_type);
  1020. if (!arg_obj)
  1021. return -ENOENT;
  1022. switch (arg_obj->type) {
  1023. case DRM_MODE_OBJECT_CRTC:
  1024. case DRM_MODE_OBJECT_CONNECTOR:
  1025. ret = 0;
  1026. break;
  1027. default:
  1028. ret = -EOPNOTSUPP;
  1029. break;
  1030. }
  1031. drm_mode_object_put(arg_obj);
  1032. return ret;
  1033. }
  1034. static int msm_register_event(struct drm_device *dev,
  1035. struct drm_msm_event_req *req, struct drm_file *file, bool en)
  1036. {
  1037. int ret = -EINVAL;
  1038. struct msm_drm_private *priv = dev->dev_private;
  1039. struct msm_kms *kms = priv->kms;
  1040. struct drm_mode_object *arg_obj;
  1041. arg_obj = drm_mode_object_find(dev, file, req->object_id,
  1042. req->object_type);
  1043. if (!arg_obj)
  1044. return -ENOENT;
  1045. ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
  1046. drm_mode_object_put(arg_obj);
  1047. return ret;
  1048. }
  1049. static int msm_event_client_count(struct drm_device *dev,
  1050. struct drm_msm_event_req *req_event, bool locked)
  1051. {
  1052. struct msm_drm_private *priv = dev->dev_private;
  1053. unsigned long flag = 0;
  1054. struct msm_drm_event *node;
  1055. int count = 0;
  1056. if (!locked)
  1057. spin_lock_irqsave(&dev->event_lock, flag);
  1058. list_for_each_entry(node, &priv->client_event_list, base.link) {
  1059. if (node->event.base.type == req_event->event &&
  1060. node->event.info.object_id == req_event->object_id)
  1061. count++;
  1062. }
  1063. if (!locked)
  1064. spin_unlock_irqrestore(&dev->event_lock, flag);
  1065. return count;
  1066. }
  1067. static int msm_ioctl_register_event(struct drm_device *dev, void *data,
  1068. struct drm_file *file)
  1069. {
  1070. struct msm_drm_private *priv = dev->dev_private;
  1071. struct drm_msm_event_req *req_event = data;
  1072. struct msm_drm_event *client, *node;
  1073. unsigned long flag = 0;
  1074. bool dup_request = false;
  1075. int ret = 0, count = 0;
  1076. ret = msm_drm_object_supports_event(dev, req_event);
  1077. if (ret) {
  1078. DRM_ERROR("unsupported event %x object %x object id %d\n",
  1079. req_event->event, req_event->object_type,
  1080. req_event->object_id);
  1081. return ret;
  1082. }
  1083. spin_lock_irqsave(&dev->event_lock, flag);
  1084. list_for_each_entry(node, &priv->client_event_list, base.link) {
  1085. if (node->base.file_priv != file)
  1086. continue;
  1087. if (node->event.base.type == req_event->event &&
  1088. node->event.info.object_id == req_event->object_id) {
  1089. DRM_DEBUG("duplicate request for event %x obj id %d\n",
  1090. node->event.base.type,
  1091. node->event.info.object_id);
  1092. dup_request = true;
  1093. break;
  1094. }
  1095. }
  1096. spin_unlock_irqrestore(&dev->event_lock, flag);
  1097. if (dup_request)
  1098. return -EALREADY;
  1099. client = kzalloc(sizeof(*client), GFP_KERNEL);
  1100. if (!client)
  1101. return -ENOMEM;
  1102. client->base.file_priv = file;
  1103. client->base.event = &client->event.base;
  1104. client->event.base.type = req_event->event;
  1105. memcpy(&client->event.info, req_event, sizeof(client->event.info));
  1106. /* Get the count of clients that have registered for event.
  1107. * Event should be enabled for first client, for subsequent enable
  1108. * calls add to client list and return.
  1109. */
  1110. count = msm_event_client_count(dev, req_event, false);
  1111. /* Add current client to list */
  1112. spin_lock_irqsave(&dev->event_lock, flag);
  1113. list_add_tail(&client->base.link, &priv->client_event_list);
  1114. spin_unlock_irqrestore(&dev->event_lock, flag);
  1115. if (count)
  1116. return 0;
  1117. ret = msm_register_event(dev, req_event, file, true);
  1118. if (ret) {
  1119. DRM_ERROR("failed to enable event %x object %x object id %d\n",
  1120. req_event->event, req_event->object_type,
  1121. req_event->object_id);
  1122. spin_lock_irqsave(&dev->event_lock, flag);
  1123. list_del(&client->base.link);
  1124. spin_unlock_irqrestore(&dev->event_lock, flag);
  1125. kfree(client);
  1126. }
  1127. return ret;
  1128. }
  1129. static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
  1130. struct drm_file *file)
  1131. {
  1132. struct msm_drm_private *priv = dev->dev_private;
  1133. struct drm_msm_event_req *req_event = data;
  1134. struct msm_drm_event *client = NULL, *node, *temp;
  1135. unsigned long flag = 0;
  1136. int count = 0;
  1137. bool found = false;
  1138. int ret = 0;
  1139. ret = msm_drm_object_supports_event(dev, req_event);
  1140. if (ret) {
  1141. DRM_ERROR("unsupported event %x object %x object id %d\n",
  1142. req_event->event, req_event->object_type,
  1143. req_event->object_id);
  1144. return ret;
  1145. }
  1146. spin_lock_irqsave(&dev->event_lock, flag);
  1147. list_for_each_entry_safe(node, temp, &priv->client_event_list,
  1148. base.link) {
  1149. if (node->event.base.type == req_event->event &&
  1150. node->event.info.object_id == req_event->object_id &&
  1151. node->base.file_priv == file) {
  1152. client = node;
  1153. list_del(&client->base.link);
  1154. found = true;
  1155. kfree(client);
  1156. break;
  1157. }
  1158. }
  1159. spin_unlock_irqrestore(&dev->event_lock, flag);
  1160. if (!found)
  1161. return -ENOENT;
  1162. count = msm_event_client_count(dev, req_event, false);
  1163. if (!count)
  1164. ret = msm_register_event(dev, req_event, file, false);
  1165. return ret;
  1166. }
  1167. void msm_mode_object_event_notify(struct drm_mode_object *obj,
  1168. struct drm_device *dev, struct drm_event *event, u8 *payload)
  1169. {
  1170. struct msm_drm_private *priv = NULL;
  1171. unsigned long flags;
  1172. struct msm_drm_event *notify, *node;
  1173. int len = 0, ret;
  1174. if (!obj || !event || !event->length || !payload) {
  1175. DRM_ERROR("err param obj %pK event %pK len %d payload %pK\n",
  1176. obj, event, ((event) ? (event->length) : -1),
  1177. payload);
  1178. return;
  1179. }
  1180. priv = (dev) ? dev->dev_private : NULL;
  1181. if (!dev || !priv) {
  1182. DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
  1183. return;
  1184. }
  1185. spin_lock_irqsave(&dev->event_lock, flags);
  1186. list_for_each_entry(node, &priv->client_event_list, base.link) {
  1187. if (node->event.base.type != event->type ||
  1188. obj->id != node->event.info.object_id)
  1189. continue;
  1190. len = event->length + sizeof(struct msm_drm_event);
  1191. if (node->base.file_priv->event_space < len) {
  1192. DRM_ERROR("Insufficient space %d for event %x len %d\n",
  1193. node->base.file_priv->event_space, event->type,
  1194. len);
  1195. continue;
  1196. }
  1197. notify = kzalloc(len, GFP_ATOMIC);
  1198. if (!notify)
  1199. continue;
  1200. notify->base.file_priv = node->base.file_priv;
  1201. notify->base.event = &notify->event.base;
  1202. notify->event.base.type = node->event.base.type;
  1203. notify->event.base.length = event->length +
  1204. sizeof(struct drm_msm_event_resp);
  1205. memcpy(&notify->event.info, &node->event.info,
  1206. sizeof(notify->event.info));
  1207. memcpy(notify->event.data, payload, event->length);
  1208. ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
  1209. &notify->base, &notify->event.base);
  1210. if (ret) {
  1211. kfree(notify);
  1212. continue;
  1213. }
  1214. drm_send_event_locked(dev, &notify->base);
  1215. }
  1216. spin_unlock_irqrestore(&dev->event_lock, flags);
  1217. }
  1218. static int msm_release(struct inode *inode, struct file *filp)
  1219. {
  1220. struct drm_file *file_priv = filp->private_data;
  1221. struct drm_minor *minor = file_priv->minor;
  1222. struct drm_device *dev = minor->dev;
  1223. struct msm_drm_private *priv = dev->dev_private;
  1224. struct msm_drm_event *node, *temp, *tmp_node;
  1225. u32 count;
  1226. unsigned long flags;
  1227. LIST_HEAD(tmp_head);
  1228. spin_lock_irqsave(&dev->event_lock, flags);
  1229. list_for_each_entry_safe(node, temp, &priv->client_event_list,
  1230. base.link) {
  1231. if (node->base.file_priv != file_priv)
  1232. continue;
  1233. list_del(&node->base.link);
  1234. list_add_tail(&node->base.link, &tmp_head);
  1235. }
  1236. spin_unlock_irqrestore(&dev->event_lock, flags);
  1237. list_for_each_entry_safe(node, temp, &tmp_head,
  1238. base.link) {
  1239. list_del(&node->base.link);
  1240. count = msm_event_client_count(dev, &node->event.info, false);
  1241. list_for_each_entry(tmp_node, &tmp_head, base.link) {
  1242. if (tmp_node->event.base.type ==
  1243. node->event.info.event &&
  1244. tmp_node->event.info.object_id ==
  1245. node->event.info.object_id)
  1246. count++;
  1247. }
  1248. if (!count)
  1249. msm_register_event(dev, &node->event.info, file_priv,
  1250. false);
  1251. kfree(node);
  1252. }
  1253. return drm_release(inode, filp);
  1254. }
  1255. /**
  1256. * msm_ioctl_rmfb2 - remove an FB from the configuration
  1257. * @dev: drm device for the ioctl
  1258. * @data: data pointer for the ioctl
  1259. * @file_priv: drm file for the ioctl call
  1260. *
  1261. * Remove the FB specified by the user.
  1262. *
  1263. * Called by the user via ioctl.
  1264. *
  1265. * Returns:
  1266. * Zero on success, negative errno on failure.
  1267. */
  1268. int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
  1269. struct drm_file *file_priv)
  1270. {
  1271. struct drm_framebuffer *fb = NULL;
  1272. struct drm_framebuffer *fbl = NULL;
  1273. uint32_t *id = data;
  1274. int found = 0;
  1275. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  1276. return -EINVAL;
  1277. fb = drm_framebuffer_lookup(dev, file_priv, *id);
  1278. if (!fb)
  1279. return -ENOENT;
  1280. /* drop extra ref from traversing drm_framebuffer_lookup */
  1281. drm_framebuffer_put(fb);
  1282. mutex_lock(&file_priv->fbs_lock);
  1283. list_for_each_entry(fbl, &file_priv->fbs, filp_head)
  1284. if (fb == fbl)
  1285. found = 1;
  1286. if (!found) {
  1287. mutex_unlock(&file_priv->fbs_lock);
  1288. return -ENOENT;
  1289. }
  1290. list_del_init(&fb->filp_head);
  1291. mutex_unlock(&file_priv->fbs_lock);
  1292. drm_framebuffer_put(fb);
  1293. return 0;
  1294. }
  1295. EXPORT_SYMBOL(msm_ioctl_rmfb2);
  1296. /**
  1297. * msm_ioctl_power_ctrl - enable/disable power vote on MDSS Hw
  1298. * @dev: drm device for the ioctl
  1299. * @data: data pointer for the ioctl
  1300. * @file_priv: drm file for the ioctl call
  1301. *
  1302. */
  1303. int msm_ioctl_power_ctrl(struct drm_device *dev, void *data,
  1304. struct drm_file *file_priv)
  1305. {
  1306. struct msm_file_private *ctx = file_priv->driver_priv;
  1307. struct msm_drm_private *priv;
  1308. struct drm_msm_power_ctrl *power_ctrl = data;
  1309. bool vote_req = false;
  1310. int old_cnt;
  1311. int rc = 0;
  1312. if (unlikely(!power_ctrl)) {
  1313. DRM_ERROR("invalid ioctl data\n");
  1314. return -EINVAL;
  1315. }
  1316. priv = dev->dev_private;
  1317. mutex_lock(&ctx->power_lock);
  1318. old_cnt = ctx->enable_refcnt;
  1319. if (power_ctrl->enable) {
  1320. if (!ctx->enable_refcnt)
  1321. vote_req = true;
  1322. ctx->enable_refcnt++;
  1323. } else if (ctx->enable_refcnt) {
  1324. ctx->enable_refcnt--;
  1325. if (!ctx->enable_refcnt)
  1326. vote_req = true;
  1327. } else {
  1328. pr_err("ignoring, unbalanced disable\n");
  1329. }
  1330. if (vote_req) {
  1331. if (power_ctrl->enable)
  1332. rc = pm_runtime_get_sync(dev->dev);
  1333. else
  1334. pm_runtime_put_sync(dev->dev);
  1335. if (rc < 0)
  1336. ctx->enable_refcnt = old_cnt;
  1337. else
  1338. rc = 0;
  1339. }
  1340. pr_debug("pid %d enable %d, refcnt %d, vote_req %d\n",
  1341. current->pid, power_ctrl->enable, ctx->enable_refcnt,
  1342. vote_req);
  1343. SDE_EVT32(current->pid, power_ctrl->enable, ctx->enable_refcnt,
  1344. vote_req);
  1345. mutex_unlock(&ctx->power_lock);
  1346. return rc;
  1347. }
  1348. static const struct drm_ioctl_desc msm_ioctls[] = {
  1349. DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
  1350. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
  1351. DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
  1352. DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
  1353. DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
  1354. DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event,
  1355. DRM_UNLOCKED),
  1356. DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
  1357. DRM_UNLOCKED),
  1358. DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2, DRM_UNLOCKED),
  1359. DRM_IOCTL_DEF_DRV(MSM_POWER_CTRL, msm_ioctl_power_ctrl,
  1360. DRM_RENDER_ALLOW),
  1361. };
  1362. static const struct vm_operations_struct vm_ops = {
  1363. .fault = msm_gem_fault,
  1364. .open = drm_gem_vm_open,
  1365. .close = drm_gem_vm_close,
  1366. };
  1367. static const struct file_operations fops = {
  1368. .owner = THIS_MODULE,
  1369. .open = drm_open,
  1370. .release = msm_release,
  1371. .unlocked_ioctl = drm_ioctl,
  1372. .compat_ioctl = drm_compat_ioctl,
  1373. .poll = drm_poll,
  1374. .read = drm_read,
  1375. .llseek = no_llseek,
  1376. .mmap = msm_gem_mmap,
  1377. };
  1378. static struct drm_driver msm_driver = {
  1379. .driver_features = DRIVER_GEM |
  1380. DRIVER_RENDER |
  1381. DRIVER_ATOMIC |
  1382. DRIVER_MODESET,
  1383. .open = msm_open,
  1384. .postclose = msm_postclose,
  1385. .lastclose = msm_lastclose,
  1386. .irq_handler = msm_irq,
  1387. .irq_preinstall = msm_irq_preinstall,
  1388. .irq_postinstall = msm_irq_postinstall,
  1389. .irq_uninstall = msm_irq_uninstall,
  1390. .enable_vblank = msm_enable_vblank,
  1391. .disable_vblank = msm_disable_vblank,
  1392. .gem_free_object = msm_gem_free_object,
  1393. .gem_vm_ops = &vm_ops,
  1394. .dumb_create = msm_gem_dumb_create,
  1395. .dumb_map_offset = msm_gem_dumb_map_offset,
  1396. .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
  1397. .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
  1398. .gem_prime_export = drm_gem_prime_export,
  1399. .gem_prime_import = msm_gem_prime_import,
  1400. .gem_prime_pin = msm_gem_prime_pin,
  1401. .gem_prime_unpin = msm_gem_prime_unpin,
  1402. .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
  1403. .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
  1404. .gem_prime_vmap = msm_gem_prime_vmap,
  1405. .gem_prime_vunmap = msm_gem_prime_vunmap,
  1406. .gem_prime_mmap = msm_gem_prime_mmap,
  1407. .ioctls = msm_ioctls,
  1408. .num_ioctls = ARRAY_SIZE(msm_ioctls),
  1409. .fops = &fops,
  1410. .name = "msm_drm",
  1411. .desc = "MSM Snapdragon DRM",
  1412. .date = "20130625",
  1413. .major = MSM_VERSION_MAJOR,
  1414. .minor = MSM_VERSION_MINOR,
  1415. .patchlevel = MSM_VERSION_PATCHLEVEL,
  1416. };
  1417. #ifdef CONFIG_PM_SLEEP
  1418. static int msm_pm_suspend(struct device *dev)
  1419. {
  1420. struct drm_device *ddev;
  1421. struct msm_drm_private *priv;
  1422. struct msm_kms *kms;
  1423. if (!dev)
  1424. return -EINVAL;
  1425. ddev = dev_get_drvdata(dev);
  1426. if (!ddev || !ddev->dev_private)
  1427. return -EINVAL;
  1428. priv = ddev->dev_private;
  1429. kms = priv->kms;
  1430. if (kms && kms->funcs && kms->funcs->pm_suspend)
  1431. return kms->funcs->pm_suspend(dev);
  1432. /* disable hot-plug polling */
  1433. drm_kms_helper_poll_disable(ddev);
  1434. return 0;
  1435. }
  1436. static int msm_pm_resume(struct device *dev)
  1437. {
  1438. struct drm_device *ddev;
  1439. struct msm_drm_private *priv;
  1440. struct msm_kms *kms;
  1441. if (!dev)
  1442. return -EINVAL;
  1443. ddev = dev_get_drvdata(dev);
  1444. if (!ddev || !ddev->dev_private)
  1445. return -EINVAL;
  1446. priv = ddev->dev_private;
  1447. kms = priv->kms;
  1448. if (kms && kms->funcs && kms->funcs->pm_resume)
  1449. return kms->funcs->pm_resume(dev);
  1450. /* enable hot-plug polling */
  1451. drm_kms_helper_poll_enable(ddev);
  1452. return 0;
  1453. }
  1454. #endif
  1455. #ifdef CONFIG_PM
  1456. static int msm_runtime_suspend(struct device *dev)
  1457. {
  1458. struct drm_device *ddev = dev_get_drvdata(dev);
  1459. struct msm_drm_private *priv = ddev->dev_private;
  1460. DBG("");
  1461. if (priv->mdss)
  1462. msm_mdss_disable(priv->mdss);
  1463. else
  1464. sde_power_resource_enable(&priv->phandle, false);
  1465. return 0;
  1466. }
  1467. static int msm_runtime_resume(struct device *dev)
  1468. {
  1469. struct drm_device *ddev = dev_get_drvdata(dev);
  1470. struct msm_drm_private *priv = ddev->dev_private;
  1471. int ret;
  1472. DBG("");
  1473. if (priv->mdss)
  1474. ret = msm_mdss_enable(priv->mdss);
  1475. else
  1476. ret = sde_power_resource_enable(&priv->phandle, true);
  1477. return ret;
  1478. }
  1479. #endif
  1480. static const struct dev_pm_ops msm_pm_ops = {
  1481. SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
  1482. SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
  1483. };
  1484. /*
  1485. * Componentized driver support:
  1486. */
  1487. /*
  1488. * NOTE: duplication of the same code as exynos or imx (or probably any other).
  1489. * so probably some room for some helpers
  1490. */
  1491. static int compare_of(struct device *dev, void *data)
  1492. {
  1493. return dev->of_node == data;
  1494. }
  1495. /*
  1496. * Identify what components need to be added by parsing what remote-endpoints
  1497. * our MDP output ports are connected to. In the case of LVDS on MDP4, there
  1498. * is no external component that we need to add since LVDS is within MDP4
  1499. * itself.
  1500. */
  1501. static int add_components_mdp(struct device *mdp_dev,
  1502. struct component_match **matchptr)
  1503. {
  1504. struct device_node *np = mdp_dev->of_node;
  1505. struct device_node *ep_node;
  1506. struct device *master_dev;
  1507. /*
  1508. * on MDP4 based platforms, the MDP platform device is the component
  1509. * master that adds other display interface components to itself.
  1510. *
  1511. * on MDP5 based platforms, the MDSS platform device is the component
  1512. * master that adds MDP5 and other display interface components to
  1513. * itself.
  1514. */
  1515. if (of_device_is_compatible(np, "qcom,mdp4"))
  1516. master_dev = mdp_dev;
  1517. else
  1518. master_dev = mdp_dev->parent;
  1519. for_each_endpoint_of_node(np, ep_node) {
  1520. struct device_node *intf;
  1521. struct of_endpoint ep;
  1522. int ret;
  1523. ret = of_graph_parse_endpoint(ep_node, &ep);
  1524. if (ret) {
  1525. dev_err(mdp_dev, "unable to parse port endpoint\n");
  1526. of_node_put(ep_node);
  1527. return ret;
  1528. }
  1529. /*
  1530. * The LCDC/LVDS port on MDP4 is a speacial case where the
  1531. * remote-endpoint isn't a component that we need to add
  1532. */
  1533. if (of_device_is_compatible(np, "qcom,mdp4") &&
  1534. ep.port == 0)
  1535. continue;
  1536. /*
  1537. * It's okay if some of the ports don't have a remote endpoint
  1538. * specified. It just means that the port isn't connected to
  1539. * any external interface.
  1540. */
  1541. intf = of_graph_get_remote_port_parent(ep_node);
  1542. if (!intf)
  1543. continue;
  1544. if (of_device_is_available(intf))
  1545. drm_of_component_match_add(master_dev, matchptr,
  1546. compare_of, intf);
  1547. of_node_put(intf);
  1548. }
  1549. return 0;
  1550. }
  1551. static int compare_name_mdp(struct device *dev, void *data)
  1552. {
  1553. return (strnstr(dev_name(dev), "mdp", strlen("mdp")) != NULL);
  1554. }
  1555. static int add_display_components(struct device *dev,
  1556. struct component_match **matchptr)
  1557. {
  1558. struct device *mdp_dev = NULL;
  1559. struct device_node *node;
  1560. int ret;
  1561. if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) {
  1562. struct device_node *np = dev->of_node;
  1563. unsigned int i;
  1564. for (i = 0; ; i++) {
  1565. node = of_parse_phandle(np, "connectors", i);
  1566. if (!node)
  1567. break;
  1568. component_match_add(dev, matchptr, compare_of, node);
  1569. }
  1570. return 0;
  1571. }
  1572. /*
  1573. * MDP5 based devices don't have a flat hierarchy. There is a top level
  1574. * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
  1575. * children devices, find the MDP5 node, and then add the interfaces
  1576. * to our components list.
  1577. */
  1578. if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
  1579. ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
  1580. if (ret) {
  1581. dev_err(dev, "failed to populate children devices\n");
  1582. return ret;
  1583. }
  1584. mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
  1585. if (!mdp_dev) {
  1586. dev_err(dev, "failed to find MDSS MDP node\n");
  1587. of_platform_depopulate(dev);
  1588. return -ENODEV;
  1589. }
  1590. put_device(mdp_dev);
  1591. /* add the MDP component itself */
  1592. component_match_add(dev, matchptr, compare_of,
  1593. mdp_dev->of_node);
  1594. } else {
  1595. /* MDP4 */
  1596. mdp_dev = dev;
  1597. }
  1598. ret = add_components_mdp(mdp_dev, matchptr);
  1599. if (ret)
  1600. of_platform_depopulate(dev);
  1601. return ret;
  1602. }
  1603. struct msm_gem_address_space *
  1604. msm_gem_smmu_address_space_get(struct drm_device *dev,
  1605. unsigned int domain)
  1606. {
  1607. struct msm_drm_private *priv = NULL;
  1608. struct msm_kms *kms;
  1609. const struct msm_kms_funcs *funcs;
  1610. struct msm_gem_address_space *aspace;
  1611. if (!iommu_present(&platform_bus_type))
  1612. return ERR_PTR(-ENODEV);
  1613. if ((!dev) || (!dev->dev_private))
  1614. return ERR_PTR(-EINVAL);
  1615. priv = dev->dev_private;
  1616. kms = priv->kms;
  1617. if (!kms)
  1618. return ERR_PTR(-EINVAL);
  1619. funcs = kms->funcs;
  1620. if ((!funcs) || (!funcs->get_address_space))
  1621. return ERR_PTR(-EINVAL);
  1622. aspace = funcs->get_address_space(priv->kms, domain);
  1623. return aspace ? aspace : ERR_PTR(-EINVAL);
  1624. }
  1625. int msm_get_mixer_count(struct msm_drm_private *priv,
  1626. const struct drm_display_mode *mode,
  1627. const struct msm_resource_caps_info *res, u32 *num_lm)
  1628. {
  1629. struct msm_kms *kms;
  1630. const struct msm_kms_funcs *funcs;
  1631. if (!priv) {
  1632. DRM_ERROR("invalid drm private struct\n");
  1633. return -EINVAL;
  1634. }
  1635. kms = priv->kms;
  1636. if (!kms) {
  1637. DRM_ERROR("invalid msm kms struct\n");
  1638. return -EINVAL;
  1639. }
  1640. funcs = kms->funcs;
  1641. if (!funcs || !funcs->get_mixer_count) {
  1642. DRM_ERROR("invalid function pointers\n");
  1643. return -EINVAL;
  1644. }
  1645. return funcs->get_mixer_count(priv->kms, mode, res, num_lm);
  1646. }
  1647. int msm_get_dsc_count(struct msm_drm_private *priv,
  1648. u32 hdisplay, u32 *num_dsc)
  1649. {
  1650. struct msm_kms *kms;
  1651. const struct msm_kms_funcs *funcs;
  1652. if (!priv) {
  1653. DRM_ERROR("invalid drm private struct\n");
  1654. return -EINVAL;
  1655. }
  1656. kms = priv->kms;
  1657. if (!kms) {
  1658. DRM_ERROR("invalid msm kms struct\n");
  1659. return -EINVAL;
  1660. }
  1661. funcs = kms->funcs;
  1662. if (!funcs || !funcs->get_dsc_count) {
  1663. DRM_ERROR("invalid function pointers\n");
  1664. return -EINVAL;
  1665. }
  1666. return funcs->get_dsc_count(priv->kms, hdisplay, num_dsc);
  1667. }
  1668. static int msm_drm_bind(struct device *dev)
  1669. {
  1670. return msm_drm_component_init(dev);
  1671. }
  1672. static void msm_drm_unbind(struct device *dev)
  1673. {
  1674. msm_drm_uninit(dev);
  1675. }
  1676. static const struct component_master_ops msm_drm_ops = {
  1677. .bind = msm_drm_bind,
  1678. .unbind = msm_drm_unbind,
  1679. };
  1680. static int msm_drm_component_dependency_check(struct device *dev)
  1681. {
  1682. struct device_node *node;
  1683. struct device_node *np = dev->of_node;
  1684. unsigned int i;
  1685. if (!of_device_is_compatible(dev->of_node, "qcom,sde-kms"))
  1686. return 0;
  1687. for (i = 0; ; i++) {
  1688. node = of_parse_phandle(np, "connectors", i);
  1689. if (!node)
  1690. break;
  1691. if (of_node_name_eq(node,"qcom,sde_rscc") &&
  1692. of_device_is_available(node) &&
  1693. of_node_check_flag(node, OF_POPULATED)) {
  1694. struct platform_device *pdev =
  1695. of_find_device_by_node(node);
  1696. if (!platform_get_drvdata(pdev)) {
  1697. dev_err(dev,
  1698. "qcom,sde_rscc not probed yet\n");
  1699. return -EPROBE_DEFER;
  1700. } else {
  1701. return 0;
  1702. }
  1703. }
  1704. }
  1705. return 0;
  1706. }
  1707. /*
  1708. * Platform driver:
  1709. */
  1710. static int msm_pdev_probe(struct platform_device *pdev)
  1711. {
  1712. int ret;
  1713. struct component_match *match = NULL;
  1714. ret = msm_drm_component_dependency_check(&pdev->dev);
  1715. if (ret)
  1716. return ret;
  1717. ret = msm_drm_device_init(pdev, &msm_driver);
  1718. if (ret)
  1719. return ret;
  1720. ret = add_display_components(&pdev->dev, &match);
  1721. if (ret)
  1722. return ret;
  1723. if (!match)
  1724. return -ENODEV;
  1725. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  1726. return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
  1727. }
  1728. static int msm_pdev_remove(struct platform_device *pdev)
  1729. {
  1730. component_master_del(&pdev->dev, &msm_drm_ops);
  1731. of_platform_depopulate(&pdev->dev);
  1732. msm_drm_unbind(&pdev->dev);
  1733. component_master_del(&pdev->dev, &msm_drm_ops);
  1734. return 0;
  1735. }
  1736. static void msm_pdev_shutdown(struct platform_device *pdev)
  1737. {
  1738. struct drm_device *ddev = platform_get_drvdata(pdev);
  1739. struct msm_drm_private *priv = NULL;
  1740. if (!ddev) {
  1741. DRM_ERROR("invalid drm device node\n");
  1742. return;
  1743. }
  1744. priv = ddev->dev_private;
  1745. if (!priv) {
  1746. DRM_ERROR("invalid msm drm private node\n");
  1747. return;
  1748. }
  1749. msm_lastclose(ddev);
  1750. /* set this after lastclose to allow kickoff from lastclose */
  1751. priv->shutdown_in_progress = true;
  1752. }
  1753. static const struct of_device_id dt_match[] = {
  1754. { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
  1755. { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
  1756. { .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE },
  1757. {},
  1758. };
  1759. MODULE_DEVICE_TABLE(of, dt_match);
  1760. static struct platform_driver msm_platform_driver = {
  1761. .probe = msm_pdev_probe,
  1762. .remove = msm_pdev_remove,
  1763. .shutdown = msm_pdev_shutdown,
  1764. .driver = {
  1765. .name = "msm_drm",
  1766. .of_match_table = dt_match,
  1767. .pm = &msm_pm_ops,
  1768. .suppress_bind_attrs = true,
  1769. },
  1770. };
  1771. static int __init msm_drm_register(void)
  1772. {
  1773. if (!modeset)
  1774. return -EINVAL;
  1775. DBG("init");
  1776. sde_rsc_rpmh_register();
  1777. sde_rsc_register();
  1778. dsi_display_register();
  1779. msm_hdcp_register();
  1780. dp_display_register();
  1781. msm_smmu_driver_init();
  1782. sde_rotator_register();
  1783. sde_rotator_smmu_driver_register();
  1784. msm_dsi_register();
  1785. msm_edp_register();
  1786. msm_hdmi_register();
  1787. sde_wb_register();
  1788. return platform_driver_register(&msm_platform_driver);
  1789. }
  1790. static void __exit msm_drm_unregister(void)
  1791. {
  1792. DBG("fini");
  1793. platform_driver_unregister(&msm_platform_driver);
  1794. sde_wb_unregister();
  1795. msm_hdmi_unregister();
  1796. msm_edp_unregister();
  1797. msm_dsi_unregister();
  1798. sde_rotator_smmu_driver_unregister();
  1799. sde_rotator_unregister();
  1800. msm_smmu_driver_cleanup();
  1801. msm_hdcp_unregister();
  1802. dp_display_unregister();
  1803. dsi_display_unregister();
  1804. sde_rsc_unregister();
  1805. }
  1806. module_init(msm_drm_register);
  1807. module_exit(msm_drm_unregister);
  1808. MODULE_AUTHOR("Rob Clark <[email protected]");
  1809. MODULE_DESCRIPTION("MSM DRM Driver");
  1810. MODULE_LICENSE("GPL");