msm_drv.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. */
  7. #ifndef __MSM_DRV_H__
  8. #define __MSM_DRV_H__
  9. #include <linux/kernel.h>
  10. #include <linux/clk.h>
  11. #include <linux/cpufreq.h>
  12. #include <linux/module.h>
  13. #include <linux/component.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/pm.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/slab.h>
  18. #include <linux/list.h>
  19. #include <linux/iommu.h>
  20. #include <linux/types.h>
  21. #include <linux/of_graph.h>
  22. #include <linux/of_device.h>
  23. #include <linux/sizes.h>
  24. #include <linux/kthread.h>
  25. #include <drm/drm_atomic.h>
  26. #include <drm/drm_atomic_helper.h>
  27. #include <drm/drm_probe_helper.h>
  28. #include <drm/drm_fb_helper.h>
  29. #include <drm/display/drm_dsc.h>
  30. #include <drm/msm_drm.h>
  31. #include <drm/drm_gem.h>
  32. #ifdef CONFIG_FAULT_INJECTION
  33. extern struct fault_attr fail_gem_alloc;
  34. extern struct fault_attr fail_gem_iova;
  35. #else
  36. # define should_fail(attr, size) 0
  37. #endif
  38. struct msm_kms;
  39. struct msm_gpu;
  40. struct msm_mmu;
  41. struct msm_mdss;
  42. struct msm_rd_state;
  43. struct msm_perf_state;
  44. struct msm_gem_submit;
  45. struct msm_fence_context;
  46. struct msm_gem_address_space;
  47. struct msm_gem_vma;
  48. struct msm_disp_state;
  49. #define MAX_CRTCS 8
  50. #define MAX_BRIDGES 8
  51. #define FRAC_16_16(mult, div) (((mult) << 16) / (div))
  52. enum msm_dp_controller {
  53. MSM_DP_CONTROLLER_0,
  54. MSM_DP_CONTROLLER_1,
  55. MSM_DP_CONTROLLER_2,
  56. MSM_DP_CONTROLLER_COUNT,
  57. };
  58. #define MSM_GPU_MAX_RINGS 4
  59. #define MAX_H_TILES_PER_DISPLAY 2
  60. /**
  61. * enum msm_event_wait - type of HW events to wait for
  62. * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
  63. * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
  64. * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
  65. */
  66. enum msm_event_wait {
  67. MSM_ENC_COMMIT_DONE = 0,
  68. MSM_ENC_TX_COMPLETE,
  69. MSM_ENC_VBLANK,
  70. };
  71. /**
  72. * struct msm_display_topology - defines a display topology pipeline
  73. * @num_lm: number of layer mixers used
  74. * @num_enc: number of compression encoder blocks used
  75. * @num_intf: number of interfaces the panel is mounted on
  76. * @num_dspp: number of dspp blocks used
  77. * @num_dsc: number of Display Stream Compression (DSC) blocks used
  78. */
  79. struct msm_display_topology {
  80. u32 num_lm;
  81. u32 num_enc;
  82. u32 num_intf;
  83. u32 num_dspp;
  84. u32 num_dsc;
  85. };
  86. /* Commit/Event thread specific structure */
  87. struct msm_drm_thread {
  88. struct drm_device *dev;
  89. unsigned int crtc_id;
  90. struct kthread_worker *worker;
  91. };
  92. struct msm_drm_private {
  93. struct drm_device *dev;
  94. struct msm_kms *kms;
  95. int (*kms_init)(struct drm_device *dev);
  96. /* subordinate devices, if present: */
  97. struct platform_device *gpu_pdev;
  98. /* possibly this should be in the kms component, but it is
  99. * shared by both mdp4 and mdp5..
  100. */
  101. struct hdmi *hdmi;
  102. /* DSI is shared by mdp4 and mdp5 */
  103. struct msm_dsi *dsi[2];
  104. struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
  105. /* when we have more than one 'msm_gpu' these need to be an array: */
  106. struct msm_gpu *gpu;
  107. /* gpu is only set on open(), but we need this info earlier */
  108. bool is_a2xx;
  109. bool has_cached_coherent;
  110. struct drm_fb_helper *fbdev;
  111. struct msm_rd_state *rd; /* debugfs to dump all submits */
  112. struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
  113. struct msm_perf_state *perf;
  114. /**
  115. * List of all GEM objects (mainly for debugfs, protected by obj_lock
  116. * (acquire before per GEM object lock)
  117. */
  118. struct list_head objects;
  119. struct mutex obj_lock;
  120. /**
  121. * lru:
  122. *
  123. * The various LRU's that a GEM object is in at various stages of
  124. * it's lifetime. Objects start out in the unbacked LRU. When
  125. * pinned (for scannout or permanently mapped GPU buffers, like
  126. * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When
  127. * unpinned, it moves into willneed or dontneed LRU depending on
  128. * madvise state. When backing pages are evicted (willneed) or
  129. * purged (dontneed) it moves back into the unbacked LRU.
  130. *
  131. * The dontneed LRU is considered by the shrinker for objects
  132. * that are candidate for purging, and the willneed LRU is
  133. * considered for objects that could be evicted.
  134. */
  135. struct {
  136. /**
  137. * unbacked:
  138. *
  139. * The LRU for GEM objects without backing pages allocated.
  140. * This mostly exists so that objects are always is one
  141. * LRU.
  142. */
  143. struct drm_gem_lru unbacked;
  144. /**
  145. * pinned:
  146. *
  147. * The LRU for pinned GEM objects
  148. */
  149. struct drm_gem_lru pinned;
  150. /**
  151. * willneed:
  152. *
  153. * The LRU for unpinned GEM objects which are in madvise
  154. * WILLNEED state (ie. can be evicted)
  155. */
  156. struct drm_gem_lru willneed;
  157. /**
  158. * dontneed:
  159. *
  160. * The LRU for unpinned GEM objects which are in madvise
  161. * DONTNEED state (ie. can be purged)
  162. */
  163. struct drm_gem_lru dontneed;
  164. /**
  165. * lock:
  166. *
  167. * Protects manipulation of all of the LRUs.
  168. */
  169. struct mutex lock;
  170. } lru;
  171. struct workqueue_struct *wq;
  172. unsigned int num_crtcs;
  173. struct drm_crtc *crtcs[MAX_CRTCS];
  174. struct msm_drm_thread event_thread[MAX_CRTCS];
  175. unsigned int num_bridges;
  176. struct drm_bridge *bridges[MAX_BRIDGES];
  177. /* VRAM carveout, used when no IOMMU: */
  178. struct {
  179. unsigned long size;
  180. dma_addr_t paddr;
  181. /* NOTE: mm managed at the page level, size is in # of pages
  182. * and position mm_node->start is in # of pages:
  183. */
  184. struct drm_mm mm;
  185. spinlock_t lock; /* Protects drm_mm node allocation/removal */
  186. } vram;
  187. struct notifier_block vmap_notifier;
  188. struct shrinker shrinker;
  189. struct drm_atomic_state *pm_state;
  190. /**
  191. * hangcheck_period: For hang detection, in ms
  192. *
  193. * Note that in practice, a submit/job will get at least two hangcheck
  194. * periods, due to checking for progress being implemented as simply
  195. * "have the CP position registers changed since last time?"
  196. */
  197. unsigned int hangcheck_period;
  198. /**
  199. * disable_err_irq:
  200. *
  201. * Disable handling of GPU hw error interrupts, to force fallback to
  202. * sw hangcheck timer. Written (via debugfs) by igt tests to test
  203. * the sw hangcheck mechanism.
  204. */
  205. bool disable_err_irq;
  206. };
  207. struct msm_format {
  208. uint32_t pixel_format;
  209. };
  210. struct msm_pending_timer;
  211. int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
  212. struct msm_kms *kms, int crtc_idx);
  213. void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
  214. void msm_atomic_commit_tail(struct drm_atomic_state *state);
  215. struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
  216. void msm_atomic_state_clear(struct drm_atomic_state *state);
  217. void msm_atomic_state_free(struct drm_atomic_state *state);
  218. int msm_crtc_enable_vblank(struct drm_crtc *crtc);
  219. void msm_crtc_disable_vblank(struct drm_crtc *crtc);
  220. int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
  221. void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
  222. struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev);
  223. bool msm_use_mmu(struct drm_device *dev);
  224. int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
  225. struct drm_file *file);
  226. #ifdef CONFIG_DEBUG_FS
  227. unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
  228. #endif
  229. void msm_gem_shrinker_init(struct drm_device *dev);
  230. void msm_gem_shrinker_cleanup(struct drm_device *dev);
  231. int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
  232. struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
  233. int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map);
  234. void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
  235. struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
  236. struct dma_buf_attachment *attach, struct sg_table *sg);
  237. int msm_gem_prime_pin(struct drm_gem_object *obj);
  238. void msm_gem_prime_unpin(struct drm_gem_object *obj);
  239. int msm_framebuffer_prepare(struct drm_framebuffer *fb,
  240. struct msm_gem_address_space *aspace, bool needs_dirtyfb);
  241. void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
  242. struct msm_gem_address_space *aspace, bool needed_dirtyfb);
  243. uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
  244. struct msm_gem_address_space *aspace, int plane);
  245. struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
  246. const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
  247. struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
  248. struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
  249. struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
  250. int w, int h, int p, uint32_t format);
  251. struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
  252. void msm_fbdev_free(struct drm_device *dev);
  253. struct hdmi;
  254. #ifdef CONFIG_DRM_MSM_HDMI
  255. int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
  256. struct drm_encoder *encoder);
  257. void __init msm_hdmi_register(void);
  258. void __exit msm_hdmi_unregister(void);
  259. #else
  260. static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
  261. struct drm_encoder *encoder)
  262. {
  263. return -EINVAL;
  264. }
  265. static inline void __init msm_hdmi_register(void) {}
  266. static inline void __exit msm_hdmi_unregister(void) {}
  267. #endif
  268. struct msm_dsi;
  269. #ifdef CONFIG_DRM_MSM_DSI
  270. int dsi_dev_attach(struct platform_device *pdev);
  271. void dsi_dev_detach(struct platform_device *pdev);
  272. void __init msm_dsi_register(void);
  273. void __exit msm_dsi_unregister(void);
  274. int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
  275. struct drm_encoder *encoder);
  276. void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi);
  277. bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
  278. bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
  279. bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
  280. struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
  281. #else
  282. static inline void __init msm_dsi_register(void)
  283. {
  284. }
  285. static inline void __exit msm_dsi_unregister(void)
  286. {
  287. }
  288. static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
  289. struct drm_device *dev,
  290. struct drm_encoder *encoder)
  291. {
  292. return -EINVAL;
  293. }
  294. static inline void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi)
  295. {
  296. }
  297. static inline bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
  298. {
  299. return false;
  300. }
  301. static inline bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi)
  302. {
  303. return false;
  304. }
  305. static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
  306. {
  307. return false;
  308. }
  309. static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
  310. {
  311. return NULL;
  312. }
  313. #endif
  314. #ifdef CONFIG_DRM_MSM_DP
  315. int __init msm_dp_register(void);
  316. void __exit msm_dp_unregister(void);
  317. int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
  318. struct drm_encoder *encoder);
  319. void msm_dp_irq_postinstall(struct msm_dp *dp_display);
  320. void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
  321. void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
  322. bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
  323. #else
  324. static inline int __init msm_dp_register(void)
  325. {
  326. return -EINVAL;
  327. }
  328. static inline void __exit msm_dp_unregister(void)
  329. {
  330. }
  331. static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
  332. struct drm_device *dev,
  333. struct drm_encoder *encoder)
  334. {
  335. return -EINVAL;
  336. }
  337. static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
  338. {
  339. }
  340. static inline void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display)
  341. {
  342. }
  343. static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
  344. struct drm_minor *minor)
  345. {
  346. }
  347. static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
  348. {
  349. return false;
  350. }
  351. #endif
  352. #ifdef CONFIG_DRM_MSM_MDP4
  353. void msm_mdp4_register(void);
  354. void msm_mdp4_unregister(void);
  355. #else
  356. static inline void msm_mdp4_register(void) {}
  357. static inline void msm_mdp4_unregister(void) {}
  358. #endif
  359. #ifdef CONFIG_DRM_MSM_MDP5
  360. void msm_mdp_register(void);
  361. void msm_mdp_unregister(void);
  362. #else
  363. static inline void msm_mdp_register(void) {}
  364. static inline void msm_mdp_unregister(void) {}
  365. #endif
  366. #ifdef CONFIG_DRM_MSM_DPU
  367. void msm_dpu_register(void);
  368. void msm_dpu_unregister(void);
  369. #else
  370. static inline void msm_dpu_register(void) {}
  371. static inline void msm_dpu_unregister(void) {}
  372. #endif
  373. #ifdef CONFIG_DRM_MSM_MDSS
  374. void msm_mdss_register(void);
  375. void msm_mdss_unregister(void);
  376. #else
  377. static inline void msm_mdss_register(void) {}
  378. static inline void msm_mdss_unregister(void) {}
  379. #endif
  380. #ifdef CONFIG_DEBUG_FS
  381. void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
  382. int msm_debugfs_late_init(struct drm_device *dev);
  383. int msm_rd_debugfs_init(struct drm_minor *minor);
  384. void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
  385. __printf(3, 4)
  386. void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
  387. const char *fmt, ...);
  388. int msm_perf_debugfs_init(struct drm_minor *minor);
  389. void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
  390. #else
  391. static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
  392. __printf(3, 4)
  393. static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
  394. struct msm_gem_submit *submit,
  395. const char *fmt, ...) {}
  396. static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
  397. static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
  398. #endif
  399. struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
  400. struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
  401. const char *name);
  402. void __iomem *msm_ioremap(struct platform_device *pdev, const char *name);
  403. void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
  404. phys_addr_t *size);
  405. void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name);
  406. struct icc_path *msm_icc_get(struct device *dev, const char *name);
  407. #define msm_writel(data, addr) writel((data), (addr))
  408. #define msm_readl(addr) readl((addr))
  409. static inline void msm_rmw(void __iomem *addr, u32 mask, u32 or)
  410. {
  411. u32 val = msm_readl(addr);
  412. val &= ~mask;
  413. msm_writel(val | or, addr);
  414. }
  415. /**
  416. * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
  417. *
  418. * @timer: hrtimer to control when the kthread work is triggered
  419. * @work: the kthread work
  420. * @worker: the kthread worker the work will be scheduled on
  421. */
  422. struct msm_hrtimer_work {
  423. struct hrtimer timer;
  424. struct kthread_work work;
  425. struct kthread_worker *worker;
  426. };
  427. void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
  428. ktime_t wakeup_time,
  429. enum hrtimer_mode mode);
  430. void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
  431. struct kthread_worker *worker,
  432. kthread_work_func_t fn,
  433. clockid_t clock_id,
  434. enum hrtimer_mode mode);
  435. #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
  436. #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
  437. static inline int align_pitch(int width, int bpp)
  438. {
  439. int bytespp = (bpp + 7) / 8;
  440. /* adreno needs pitch aligned to 32 pixels: */
  441. return bytespp * ALIGN(width, 32);
  442. }
  443. /* for the generated headers: */
  444. #define INVALID_IDX(idx) ({BUG(); 0;})
  445. #define fui(x) ({BUG(); 0;})
  446. #define _mesa_float_to_half(x) ({BUG(); 0;})
  447. #define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
  448. /* for conditionally setting boolean flag(s): */
  449. #define COND(bool, val) ((bool) ? (val) : 0)
  450. static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
  451. {
  452. ktime_t now = ktime_get();
  453. s64 remaining_jiffies;
  454. if (ktime_compare(*timeout, now) < 0) {
  455. remaining_jiffies = 0;
  456. } else {
  457. ktime_t rem = ktime_sub(*timeout, now);
  458. remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
  459. }
  460. return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
  461. }
  462. /* Driver helpers */
  463. extern const struct component_master_ops msm_drm_ops;
  464. int msm_pm_prepare(struct device *dev);
  465. void msm_pm_complete(struct device *dev);
  466. int msm_drv_probe(struct device *dev,
  467. int (*kms_init)(struct drm_device *dev));
  468. void msm_drv_shutdown(struct platform_device *pdev);
  469. #endif /* __MSM_DRV_H__ */