Browse Source

Merge remote-tracking branch 'origin/display-kernel.lnx.5.15' into display-kernel.lnx.1.0

* origin/display-kernel.lnx.5.15:
  disp: msm: dp: fix aux state during individual plug out/in
  disp: msm: sde: Add scaler offset for de lpf
  disp: msm: dsi: Enable TPG functionality
  disp: msm: dsi: Avoid re-initializing PLL registers during dyn clk switch
  disp: msm: sde: reset wb output crop during cwb disable
  disp: msm: dp: destroy audio workqueue outside session_lock
  disp: msm: sde: disable hw-fencing for commit before vm transition
  disp: msm: dp: set DSC capabilities in mode only if panel supports DSC
  disp: msm: dp: update DP aux state with correct status
  disp: msm: dp: update debug message for mst conn id debug node
  disp: msm: sde: fix cwb output res with DS & demura tap point
  disp: msm: sde: fix UBWC stat error log format
  disp: msm: sde: avoid PM suspend/resume if display has splash enabled
  disp: msm: sde: add ctx_id to debug message in sde_fence_signal
  disp: msm: sde: correct the sde vm release sequence
  disp: msm: sde: add additional WB roi checks
  disp: msm: sde: add check for layer-mixer width
  disp: msm: sde: set connector lm_mask for dp display
  disp: msm: sde: fix cwb lm allocation failures in RM
  disp: msm: sde: proper allocation of dcwb for LMs
  disp: msm: sde: fix dcwb idx selection for pp_dither and CTL blocks
  disp: added environment variable for build.sh techpack display_tp
  disp: msm: sde: update atomic check for VM_ACQUIRE state
  disp: msm: sde: avoid null pointer dereference
  drm/msm: don't allocate pages from the MOVABLE zone
  disp: msm: sde: add wait on spec fences for hwfencing
  disp: msm: sde: add tx wait for WB display during modeset
  disp: msm: avoid cwb on esd recovery commit
  disp: msm: sde: update uidle ctl register only for master encoder
  disp: msm: sde: Fix data width calculation when widebus is enabled
  disp: msm: sde: update encoder wait event timeout condition
  disp: msm: sde: avoid clear_pending_flush on hw_ctl during power_on commit
  disp: msm: sde: update atomic check for VM_REQ_ACQUIRE state
  disp: msm: sde: force RC mask revalidation during mode switch
  disp: msm: dp: get DSC enable status from mode instead of panel
  disp: msm: sde: avoid ctl switch allocation in RM
  disp: msm: change log level from error to debug for smmu cb not found
  disp: msm: dsi: change hs timer control to fix timeout issue
  disp: msm: sde: wait for active region only on DSI panel
  disp: msm: sde: handle vsync wait status check during timeout
  disp: msm: dp: add debug logs to ipc logging
  disp: msm: sde: avoid demura layers validation against crtc w/h
  drm: msm: update lfc config for demura
  disp: msm: dsi: avoid DSI pll codes parsing in TVM
  disp: msm: sde: add out of bounds check for dnsc_blur & wb cache
  disp: add support to compile out display kernel module
  disp: msm: sde: add support for display emulation on RUMI.
  disp: msm: avoid crtc seamless check if active_changed is set
  disp: msm: sde: override tearcheck rd_ptr_val when qsync is enabled
  disp: msm: dsi: move warn to info if secondary panel is not assigned
  disp: msm: dsi: turn on the PLL before switching RCG parent during clk on
  drm: msm: allow opr_en in spr bypass mode
  disp: msm: dsi: add missing dsi ctrl mutex lock in host timing update
  disp: msm: sde: add missing validations for dnsc_blur
  disp: msm: sde: add support for LUTDMA VBIF clock split
  disp: msm: sde: log vblank timestamp in eventlogs
  disp: msm: send power_on event in dual display composer kill scenario
  disp: msm: dp: change display status log level
  disp: msm: sde: add support for hwfence profiling
  disp: msm: dp: update DSC resource book keeping for mst
  disp: msm: dsi: pass DRM_BRIDGE_ATTACH_NO_CONNECTOR during bride attach
  disp: msm: sde: add hw fence support for prog line count
  disp: msm: sde: hw_fence update autorefresh disable sequence
  disp: msm: sde: disable hw_fence for cmd/vid mode switch
  disp: msm: sde: add fence ready in event log
  disp: msm: sde: adds mem mapping for hwfence ipcc reg
  disp: config: add hw fence configuration files for Kalama
  disp: msm: sde: add support for hw-fence feature
  disp: enable the msm_drm packing for auto builds
  disp: msm: remove parsing deep color modes in sde parser
  disp: msm: sde: add custom event to notify OPR, MISR value change
  disp: msm: sde: toggle LLCC SCID for consecutive LLCC write
  disp: msm: sde: add reg dma support for vig DE lpf
  disp: msm: sde: update vsync soure as part of post modeset
  disp: msm: Address static analysis issues
  disp: msm: add augen3 configuration
  disp: msm: dsi: Don't clear status interrupts while error interrupts toggle
  disp: msm: dp: improve accuracy of mvid/nvid calculation
  disp: msm: sde: fix precise vsync feature check
  disp: msm: sde: add support for LLCC_DISP_1 SCID
  disp: msm: sde: convert system cache boolean to feature bit
  disp: msm: sde: log SCID during LLCC activation
  disp: msm: merge flag of register and dbgbus
  disp: msm: sde: enable vsync irq during sys cache read work
  disp: msm: sde: change ubwc revision
  disp: msm: dp: add ability to select pattern for tpg
  disp: config: enable HDCP config for kalama
  disp: msm: link HDCP sec-module as a dependency
  Revert "disp: msm: dp: avoid duplicate read of link status"
  disp: msm: dp: set drm device pointer in dp aux object
  disp: msm: dp: update pll params with latest HPG values
  disp: msm: sde: reset plane cache state on plane disable
  disp: msm: sde: use LLCC_DISP for static display usecase with cwb
  disp: msm: sde: enable LLCC_DISP_WB for kalama target
  disp: msm: avoid rotator code compilation
  disp: msm: sde: fix GEM object inactive list locking
  disp: msm: sde: handle SSPP system cache for multi-plane scenario
  disp: msm: dsi: parse panel ack disabled property for sim panels
  disp: msm: sde: add line insertion support for sspp
  disp: msm: add mmrm configs for Kalama
  disp: msm: link mmrm module as a dependency
  disp: msm: sde: fix sde_vbif_get_xin_status return value
  disp: msm: sde: consider max of actual and default prefill lines
  disp: msm: sde: disable autorefresh on encoder disable
  disp: msm: hdcp: set default topology as DOWN_REQUEST_TOPOLOGY
  disp: msm: dsi: Remove backlight operation during poms process
  disp: msm: sde: address static analysis issues
  disp: msm: sde: enable llcc in AOD mode
  disp: msm: remove unused code snippet
  disp: msm: sde: remove hardcoding of LLCC use case id
  disp: msm: add capability to dynamically update the transfer time
  disp: msm: sde: shorter idle-pc duration in doze mode
  disp: msm: sde: add the DE lpf flag setting
  disp: msm: sde: refactor _sde_encoder_phys_wb_update_cwb_flush function
  disp: msm: dsi: add MISR support for ctl 2.2 version
  disp: msm: sde: install default value for panel_mode property
  disp: msm: sde: remove WB output buffer pitch alignment check
  disp: config: conditional import of msm-ext-display symbols
  disp: msm: sde: avoid null pointer dereference
  disp: msm: optimize devcoredump read operation duration
  disp: msm: sde: enable tui flag in catalog for kalama
  disp: msm: sde: SID programming for new MDSS
  disp: msm: sde: update HFC layer checks
  disp: msm: sde: program master intf register for single intf
  disp: msm: dp: add pll params table for 4nm PHY pll settings
  disp: msm: sde: avoid null pointer dereference
  disp: msm: sde: refactor dsi_display_get_modes function
  disp: msm: dsi: mitigate errors on non-parsed regulator nodes
  disp: msm: sde: use INTF mdp_vsync timestamp only for video-mode
  disp: msm: sde: avoid slave encoder wait with ctl-done
  disp: msm: fix WD timer load value calculation
  drm: msm: add spr by pass support
  disp: msm: dp: set the rates for clocks provided by DP PLL
  update source and include paths for LE
  disp: msm: dp: calculate mvid and nvid dividers with in DP driver
  disp: msm: dp: PHY config update to align with kalama HPG
  disp: config: enable msm_ext_display config for kailua
  disp: config: enable dp compilation for kailua
  disp: msm: link msm-ext-disp module as a dependency
  disp: msm: dp: Add support for USB3 GDSC vote from displayport driver
  disp: msm: sde: add check to avoid multiple active CWB
  disp: msm: sde: fix the wd-timer-ctrl config for WD TE
  disp: msm: add support for INTF WD jitter
  disp: msm: display error log signature alignment
  disp: msm: sde: set NOAUTOEN for sde irq to match with power event
  disp: msm: sde: move sde power event call into kms post init
  disp: msm: sde: update alignment check for dest WB fb
  disp: msm: update sde rsc register offsets based on drv version
  disp: msm: avoid BW_INDICATION write if BW does not change
  disp: msm: sde: release splash memory using memblock_free
  disp: msm: sde: drop suspend state if commit is skipped
  disp: msm: sde: Enable demura tap point capability in cwb
  disp: msm: sde: avoid error during fal10_veto override enablement
  disp: msm: sde: update sde debugbus logging for vbif & dsi
  disp: msm: sde: add uidle fill level scaling
  disp: msm: update copyright description
  disp: msm: sde: configure dest_scaler op_mode for two independent displays
  disp: msm: dp: updated copyright set for 4nm target
  disp: msm: sde: add support for DS2 and DS3
  Revert "disp: msm: sde: consider max of actual and default prefill lines"
  disp: msm: sde: Reset backlight scale when HWC is stopped
  disp: config: correct the copyright markers
  disp: msm: sde: fix UBWC decoder version support for Kalama
  disp: msm: dp: avoid duplicate read of link status
  disp: msm: sde: fix dnsc_blur mux setting for cwb
  disp: msm: dsi: update vreg_ctrl settings for cape
  disp: msm: fail commit if drm_gem_obj was found attached to a sec CB
  disp: msm: dp: updated register values for 4nm target
  disp: msm: sde: avoid ALIGN check on sde_dbg_reg_register_dump_range
  disp: msm: dp: avoid dp sw reset on disconnect path
  disp: msm: dp: use link clk khz when initializing mst mgr
  disp: msm: dp: avoid duplicate read of link status
  disp: msm: dp: fix configuration of aux switches and HPD sources
  disp: msm: sde: Add a new major version of sixzone in Kalama for SB LUTDMA
  disp: msm: dp: init DP catalog for kalama
  disp: msm: dp: avoid return value check for certain debugfs functions
  disp: msm: dp: remove unused header declaration
  disp: msm: sde: update framedata event handling
  disp: msm: dsi: Add new phy comaptible string for cape
  disp: msm: sde: add DE LPF blend support
  disp: msm: sde: Split PA sixzone lutdma implementation
  msm: drm: uapi: Add uapi support for sixzone saturation adjustment
  disp: msm: sde: software override for fal10 in cwb enable
  disp: msm: sde: Update LTM merge mode setting for kailua
  disp: msm: sde: Add support for LTM2/3 for kailua
  disp: msm: update cleanup during bind failure in msm_drm_component_init
  disp: msm: avoid using #ifdef for configurations
  disp: msm: sde: parametrize RC minimum region width
  disp: config: add kalama TUI configuration files
  disp: msm: dp: remove dead code of "qcom,dp-mst-sim"
  disp: msm: sde: update worst case time to execute one tcs vote for rsc
  disp: msm: avoid use macro as vendor module guideline
  disp: msm: sde: dump user input_fence info on spec fence timeout
  disp: msm: dsi: optimize the display error log print
  disp: msm: sde: add null pointer check for encoder current master
  disp: msm: dsi: enable DMA start window scheduling for broadcast commands
  disp: msm: add devcoredump support for sde_dbg
  disp: msm: sde/dsi: reduce display cyclomatic complexity
  disp: msm: sde: add debugfs for FAL1 and FAL10 config
  disp: msm: sde: remove unsupported NV16 and NV61 YUV format
  disp: msm: sde: avoid alignment checks for linear formats
  disp: msm: reset thread priority work on every new run
  disp: msm: sde: remove rgb/cursor pipe related code
  disp: msm: dsi: add API for handling PHY programming during 0p9 collapse
  disp: msm: dsi: add new PHY and PLL version files
  disp: msm: sde: send power on event for cont. splash
  disp: msm: use pm_runtime_resume_and_get instead of pm_runtime_get_sync
  disp: msm: sde: update cwb block offset for kalama target
  disp: msm: sde: add line-based QoS calculation support
  disp: msm: sde: add offline WB QoS support
  disp: msm: sde: update DT parsing for VBIF QoS remap levels
  disp: msm: sde: update danger/safe QoS LUTs for landscape panels
  disp: msm: sde: disable ot limit for cwb
  disp: msm: sde: allow CDM access for all WB blocks
  disp: msm: sde: always set CTL_x_UIDLE_ACTIVE register to "1"
  disp: msm: use vzalloc for large allocations
  disp: msm: sde: Add support to limit DSC size to 10k
  disp: msm: sde: add tx wait during DMS for sim panel
  disp: msm: dsi: add check for any queued DSI CMDs before clock force update
  disp: msm: sde: correct pp block allocation during dcwb dither programming
  disp: msm: sde: avoid setting of max vblank count
  disp: msm: sde: add cached lut flag in sde plane
  disp: msm: sde: avoid use after free in msm_lastclose
  disp: msm: sde: update TEAR_SYNC_WRCOUNT register before vsync counter
  disp: msm: dsi: Support uncompressed rgb101010 format
  disp: msm: sde: update idle_pc_enabled flag for all encoders
  display: driver: default post start if SBLUA DMA exist
  disp: msm: sde: flush esd work before disabling the encoder
  disp: msm: sde: allow qsync update along with modeset
  disp: msm: dp: avoid dp sw reset on disconnect path
  disp: msm: sde: consider max of actual and default prefill lines
  disp: msm: ensure vbif debugbus not in use is disabled
  disp: msm: sde: update cached encoder mask if required
  disp: msm: sde: while timing engine enabling poll for active region
  disp: msm: enable cache flag for dumb buffer
  disp: msm: sde: disable ot limit for cwb
  disp: msm: sde: avoid race condition at vm release
  disp: msm: dsi: set qsync min fps list length to zero
  disp: msm: sde: reset mixers in crtc when ctl datapath switches
  disp: msm: sde: update vm state atomic check for non-primary usecases
  disp: msm: sde: reset CTL_UIDLE_ACTIVE register only if uidle is disabled

Change-Id: I85622b3c2d491140558842b7640e918015d4edd4
Signed-off-by: Ashwin Pillai <[email protected]>
Ashwin Pillai 2 years ago
parent
commit
3e23c1fccc
100 changed files with 7562 additions and 1850 deletions
  1. 11 2
      Android.mk
  2. 2 2
      Makefile.am
  3. 18 0
      config/augen3disp.conf
  4. 22 0
      config/augen3dispconf.h
  5. 6 0
      config/gki_kalamadisp.conf
  6. 7 0
      config/gki_kalamadispconf.h
  7. 10 0
      config/gki_kalamadisptui.conf
  8. 17 0
      config/gki_kalamadisptuiconf.h
  9. 1 1
      config/gki_parrotdispconf.h
  10. 7 1
      display_driver_board.mk
  11. 12 1
      display_driver_product.mk
  12. 5 2
      hdcp/msm_hdcp.c
  13. 34 1
      include/uapi/display/drm/msm_drm_pp.h
  14. 16 1
      include/uapi/display/drm/sde_drm.h
  15. 2 0
      include/uapi/display/media/mmm_color_fmt.h
  16. 23 3
      msm/Android.mk
  17. 20 6
      msm/Kbuild
  18. 76 31
      msm/dp/dp_aux.c
  19. 3 1
      msm/dp/dp_aux.h
  20. 3 2
      msm/dp/dp_aux_bridge.c
  21. 5 4
      msm/dp/dp_aux_bridge.h
  22. 14 15
      msm/dp/dp_catalog.c
  23. 2 1
      msm/dp/dp_catalog.h
  24. 39 54
      msm/dp/dp_catalog_v420.c
  25. 14 4
      msm/dp/dp_ctrl.c
  26. 22 87
      msm/dp/dp_debug.c
  27. 41 9
      msm/dp/dp_debug.h
  28. 85 55
      msm/dp/dp_display.c
  29. 4 0
      msm/dp/dp_display.h
  30. 6 0
      msm/dp/dp_drm.c
  31. 10 8
      msm/dp/dp_gpio_hpd.c
  32. 29 30
      msm/dp/dp_hpd.c
  33. 12 9
      msm/dp/dp_lphw_hpd.c
  34. 36 32
      msm/dp/dp_mst_drm.c
  35. 1 62
      msm/dp/dp_mst_sim.c
  36. 7 4
      msm/dp/dp_mst_sim_helper.c
  37. 19 17
      msm/dp/dp_panel.c
  38. 3 2
      msm/dp/dp_panel.h
  39. 1 24
      msm/dp/dp_parser.c
  40. 3 3
      msm/dp/dp_parser.h
  41. 5 0
      msm/dp/dp_pll.c
  42. 33 12
      msm/dp/dp_pll.h
  43. 98 122
      msm/dp/dp_pll_4nm.c
  44. 75 105
      msm/dp/dp_pll_5nm.c
  45. 30 22
      msm/dp/dp_power.c
  46. 3 0
      msm/dp/dp_power.h
  47. 2 1
      msm/dp/dp_usbpd.c
  48. 43 1
      msm/dsi/dsi_catalog.c
  49. 39 1
      msm/dsi/dsi_catalog.h
  50. 53 23
      msm/dsi/dsi_ctrl.c
  51. 17 1
      msm/dsi/dsi_ctrl.h
  52. 31 1
      msm/dsi/dsi_ctrl_hw.h
  53. 36 0
      msm/dsi/dsi_ctrl_hw_2_2.c
  54. 40 10
      msm/dsi/dsi_ctrl_hw_cmn.c
  55. 17 0
      msm/dsi/dsi_defs.h
  56. 205 76
      msm/dsi/dsi_display.c
  57. 14 1
      msm/dsi/dsi_display.h
  58. 75 49
      msm/dsi/dsi_drm.c
  59. 102 14
      msm/dsi/dsi_panel.c
  60. 2 0
      msm/dsi/dsi_panel.h
  61. 2 1
      msm/dsi/dsi_parser.h
  62. 11 0
      msm/dsi/dsi_phy.c
  63. 5 0
      msm/dsi/dsi_phy_hw.h
  64. 16 3
      msm/dsi/dsi_phy_hw_v4_0.c
  65. 889 0
      msm/dsi/dsi_phy_hw_v5_0.c
  66. 3 1
      msm/dsi/dsi_phy_timing_calc.c
  67. 11 9
      msm/dsi/dsi_pll.c
  68. 4 0
      msm/dsi/dsi_pll.h
  69. 1487 0
      msm/dsi/dsi_pll_4nm.c
  70. 299 0
      msm/dsi/dsi_pll_4nm.h
  71. 3 1
      msm/dsi/dsi_pll_5nm.c
  72. 1 1
      msm/dsi/dsi_pwr.c
  73. 10 3
      msm/msm_atomic.c
  74. 3 2
      msm/msm_cooling_device.h
  75. 88 52
      msm/msm_drv.c
  76. 51 4
      msm/msm_drv.h
  77. 21 14
      msm/msm_fb.c
  78. 30 15
      msm/msm_gem.c
  79. 7 4
      msm/msm_kms.h
  80. 6 5
      msm/msm_smmu.c
  81. 46 11
      msm/sde/sde_color_processing.c
  82. 8 0
      msm/sde/sde_color_processing.h
  83. 139 27
      msm/sde/sde_connector.c
  84. 57 0
      msm/sde/sde_connector.h
  85. 5 4
      msm/sde/sde_core_irq.c
  86. 21 15
      msm/sde/sde_core_perf.c
  87. 587 66
      msm/sde/sde_crtc.c
  88. 87 2
      msm/sde/sde_crtc.h
  89. 497 114
      msm/sde/sde_encoder.c
  90. 60 2
      msm/sde/sde_encoder.h
  91. 2 0
      msm/sde/sde_encoder_dce.c
  92. 12 3
      msm/sde/sde_encoder_phys.h
  93. 134 22
      msm/sde/sde_encoder_phys_cmd.c
  94. 101 17
      msm/sde/sde_encoder_phys_vid.c
  95. 303 147
      msm/sde/sde_encoder_phys_wb.c
  96. 559 42
      msm/sde/sde_fence.c
  97. 98 2
      msm/sde/sde_fence.h
  98. 227 289
      msm/sde/sde_hw_catalog.c
  99. 103 40
      msm/sde/sde_hw_catalog.h
  100. 1 19
      msm/sde/sde_hw_catalog_format.h

+ 11 - 2
Android.mk

@@ -1,3 +1,12 @@
 # Android makefile for display kernel modules
-LOCAL_PATH := $(call my-dir)
-include $(LOCAL_PATH)/msm/Android.mk
+DISPLAY_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false)
+		DISPLAY_DLKM_ENABLE := false
+	endif
+endif
+
+ifeq ($(DISPLAY_DLKM_ENABLE),  true)
+	LOCAL_PATH := $(call my-dir)
+	include $(LOCAL_PATH)/msm/Android.mk
+endif

+ 2 - 2
Makefile.am

@@ -1,11 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-DISPLAY_ROOT=$(ROOTDIR)vendor/qcom/opensource/display-drivers
+DISPLAY_ROOT=$(ROOTDIR)display/vendor/qcom/opensource/display-drivers
 CONFIG_DRM_MSM=$(MODULE_DRM_MSM)
 KBUILD_OPTIONS := DISPLAY_ROOT=$(DISPLAY_ROOT) CONFIG_DRM_MSM=$(CONFIG_DRM_MSM)
 
 ifeq ($(TARGET_SUPPORT),genericarmv8)
-	KBUILD_OPTIONS += CONFIG_ARCH_WAIPIO=y
+	KBUILD_OPTIONS += CONFIG_ARCH_KALAMA=y
 endif
 
 obj-m += msm/

+ 18 - 0
config/augen3disp.conf

@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+export CONFIG_DRM_MSM=y
+export CONFIG_DRM_MSM_SDE=y
+export CONFIG_SYNC_FILE=y
+export CONFIG_DRM_MSM_DSI=y
+export CONFIG_DRM_MSM_DP=n
+export CONFIG_DRM_MSM_DP_MST=n
+export CONFIG_QCOM_MDSS_PLL=y
+export CONFIG_DRM_SDE_RSC=n
+export CONFIG_DRM_SDE_WB=n
+export CONFIG_DRM_MSM_REGISTER_LOGGING=n
+export CONFIG_SDE_RECOVERY_MANAGER=n
+export CONFIG_DRM_SDE_SHD=n
+export CONFIG_DRM_SDE_SHP=n
+export CONFIG_DRM_SDE_ROI_MISR=n
+export CONFIG_DRM_MSM_LEASE=n
+export CONFIG_DISPLAY_BUILD=m

+ 22 - 0
config/augen3dispconf.h

@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define CONFIG_DRM_MSM 1
+#define CONFIG_DRM_MSM_SDE 1
+#define CONFIG_SYNC_FILE 1
+#define CONFIG_DRM_MSM_DSI 1
+#define CONFIG_DRM_MSM_DP 0
+#define CONFIG_DRM_MSM_DP_MST 0
+#define CONFIG_DRM_SDE_WB 0
+#define CONFIG_DRM_SDE_RSC 0
+#define CONFIG_DRM_MSM_REGISTER_LOGGING 0
+#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
+#define CONFIG_QCOM_MDSS_PLL 1
+#define CONFIG_GKI_DISPLAY 1
+#define CONFIG_MSM_EXT_DISPLAY 1
+#define CONFIG_DRM_SDE_ROI_MISR 0
+#define CONFIG_DRM_SDE_SHD 0
+#define CONFIG_DRM_SDE_SHP 0
+#define CONFIG_DRM_MSM_LEASE 0

+ 6 - 0
config/gki_kalamadisp.conf

@@ -2,10 +2,16 @@ export CONFIG_DRM_MSM=y
 export CONFIG_DRM_MSM_SDE=y
 export CONFIG_SYNC_FILE=y
 export CONFIG_DRM_MSM_DSI=y
+export CONFIG_DRM_MSM_DP=y
+export CONFIG_DRM_MSM_DP_MST=y
 export CONFIG_DSI_PARSER=y
 export CONFIG_QCOM_MDSS_PLL=y
 export CONFIG_DRM_SDE_RSC=y
 export CONFIG_DRM_SDE_WB=y
 export CONFIG_DRM_MSM_REGISTER_LOGGING=y
+export CONFIG_MSM_MMRM=y
 export CONFIG_DISPLAY_BUILD=m
 export CONFIG_HDCP_QSEECOM=y
+export CONFIG_DRM_SDE_VM=y
+export CONFIG_QTI_HW_FENCE=y
+export CONFIG_QCOM_SPEC_SYNC=y

+ 7 - 0
config/gki_kalamadispconf.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  */
 
@@ -7,12 +8,18 @@
 #define CONFIG_DRM_MSM_SDE 1
 #define CONFIG_SYNC_FILE 1
 #define CONFIG_DRM_MSM_DSI 1
+#define CONFIG_DRM_MSM_DP 1
+#define CONFIG_DRM_MSM_DP_MST 1
 #define CONFIG_DSI_PARSER 1
 #define CONFIG_DRM_SDE_WB 1
 #define CONFIG_DRM_SDE_RSC 1
 #define CONFIG_DRM_MSM_REGISTER_LOGGING 1
+#define CONFIG_MSM_MMRM 1
 #define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
 #define CONFIG_QCOM_MDSS_PLL 1
 #define CONFIG_GKI_DISPLAY 1
 #define CONFIG_MSM_EXT_DISPLAY 1
 #define CONFIG_HDCP_QSEECOM 1
+#define CONFIG_DRM_SDE_VM 1
+#define CONFIG_QTI_HW_FENCE 1
+#define CONFIG_QCOM_SPEC_SYNC 1

+ 10 - 0
config/gki_kalamadisptui.conf

@@ -0,0 +1,10 @@
+export CONFIG_DRM_MSM=y
+export CONFIG_DRM_MSM_SDE=y
+export CONFIG_SYNC_FILE=y
+export CONFIG_DRM_MSM_DSI=y
+export CONFIG_DSI_PARSER=y
+export CONFIG_QCOM_MDSS_PLL=y
+export CONFIG_DRM_MSM_REGISTER_LOGGING=y
+export CONFIG_DISPLAY_BUILD=m
+export CONFIG_DRM_SDE_VM=y
+export CONFIG_DRM_LOW_MSM_MEM_FOOTPRINT=y

+ 17 - 0
config/gki_kalamadisptuiconf.h

@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#define CONFIG_DRM_MSM 1
+#define CONFIG_DRM_MSM_SDE 1
+#define CONFIG_SYNC_FILE 1
+#define CONFIG_DRM_MSM_DSI 1
+#define CONFIG_DSI_PARSER 1
+#define CONFIG_DRM_MSM_REGISTER_LOGGING 1
+#define CONFIG_DRM_SDE_EVTLOG_DEBUG 1
+#define CONFIG_QCOM_MDSS_PLL 1
+#define CONFIG_GKI_DISPLAY 1
+#define CONFIG_DRM_SDE_VM 1
+#define CONFIG_DRM_MSM_LOW_MEM_FOOTPRINT 1

+ 1 - 1
config/gki_parrotdispconf.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #define CONFIG_DRM_MSM 1

+ 7 - 1
display_driver_board.mk

@@ -1,6 +1,12 @@
 #SPDX-License-Identifier: GPL-2.0-only
+DISPLAY_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false)
+		DISPLAY_DLKM_ENABLE := false
+	endif
+endif
 
-ifneq ($(TARGET_BOARD_AUTO),true)
+ifeq ($(DISPLAY_DLKM_ENABLE),  true)
 	ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
 		BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_drm.ko
 		BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_drm.ko

+ 12 - 1
display_driver_product.mk

@@ -1,3 +1,14 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-PRODUCT_PACKAGES += msm_drm.ko
+DISPLAY_DLKM_ENABLE := true
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_DISPLAY_OVERRIDE), false)
+		DISPLAY_DLKM_ENABLE := false
+	endif
+endif
+
+ifeq ($(DISPLAY_DLKM_ENABLE),  true)
+	PRODUCT_PACKAGES += msm_drm.ko
+endif
+
+DISPLAY_MODULES_DRIVER := msm_drm.ko

+ 5 - 2
hdcp/msm_hdcp.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
@@ -135,8 +136,8 @@ static ssize_t tp_show(struct device *dev, struct device_attribute *attr,
 
 		ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY);
 
-		/* clear the flag once data is read back to user space*/
-		hdcp->tp_msgid = -1;
+		/* reset the flag once the data is written back to user space */
+		hdcp->tp_msgid = DOWN_REQUEST_TOPOLOGY;
 		break;
 	default:
 		ret = -EINVAL;
@@ -290,6 +291,8 @@ static int msm_hdcp_probe(struct platform_device *pdev)
 	if (ret)
 		pr_err("unable to register msm_hdcp sysfs nodes\n");
 
+	hdcp->tp_msgid = DOWN_REQUEST_TOPOLOGY;
+
 	return 0;
 error_cdev_add:
 	device_destroy(hdcp->class, hdcp->dev_num);

+ 34 - 1
include/uapi/display/drm/msm_drm_pp.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -7,6 +8,12 @@
 #define _MSM_DRM_PP_H_
 
 #include <linux/types.h>
+#include <drm/drm.h>
+
+#define ENABLE_EVENT_SPR_OPR_VALUE
+#define ENABLE_EVENT_INTF_MISR_SIGNATURE
+#define MAX_DSI_DISPLAY 4
+
 /**
  * struct drm_msm_pcc_coeff - PCC coefficient structure for each color
  *                            component.
@@ -141,6 +148,7 @@ struct drm_msm_memcol {
 #define SIXZONE_HUE_ENABLE (1 << 0)
 #define SIXZONE_SAT_ENABLE (1 << 1)
 #define SIXZONE_VAL_ENABLE (1 << 2)
+#define SIXZONE_SV_ENABLE (1 << 3)
 /* struct drm_msm_sixzone_curve - Sixzone HSV adjustment curve structure.
  * @p0: Hue adjustment.
  * @p1: Saturation/Value adjustment.
@@ -155,12 +163,16 @@ struct drm_msm_sixzone_curve {
  *         - SIXZONE_HUE_ENABLE: Enable hue adjustment
  *         - SIXZONE_SAT_ENABLE: Enable saturation adjustment
  *         - SIXZONE_VAL_ENABLE: Enable value adjustment
+ *         - SIXZONE_SV_ENABLE: Enable SV feature
  * @threshold: threshold qualifier.
  * @adjust_p0: Adjustment curve.
  * @adjust_p1: Adjustment curve.
  * @sat_hold: Saturation hold info.
  * @val_hold: Value hold info.
  * @curve: HSV adjustment curve lut.
+ * @sat_adjust_p0: Saturation adjustment curve.
+ * @sat_adjust_p1: Saturation adjustment curve.
+ * @curve_p2: Saturation Mid/Saturation High adjustment
  */
 struct drm_msm_sixzone {
 	__u64 flags;
@@ -170,6 +182,9 @@ struct drm_msm_sixzone {
 	__u32 sat_hold;
 	__u32 val_hold;
 	struct drm_msm_sixzone_curve curve[SIXZONE_LUT_SIZE];
+	__u32 sat_adjust_p0;
+	__u32 sat_adjust_p1;
+	__u32 curve_p2[SIXZONE_LUT_SIZE];
 };
 
 #define GAMUT_3D_MODE_17 1
@@ -480,7 +495,7 @@ struct drm_msm_ad4_roi_cfg {
 #define LTM_DATA_SIZE_3 33
 #define LTM_BUFFER_SIZE 5
 #define LTM_GUARD_BYTES 255
-#define LTM_BLOCK_SIZE 2
+#define LTM_BLOCK_SIZE 4
 
 #define LTM_STATS_SAT (1 << 1)
 #define LTM_STATS_MERGE_SAT (1 << 2)
@@ -572,6 +587,7 @@ struct drm_msm_ltm_buffer {
 #define SPR_INIT_PARAM_SIZE_3 16
 #define SPR_INIT_PARAM_SIZE_4 24
 #define SPR_INIT_PARAM_SIZE_5 32
+#define SPR_FLAG_BYPASS (1 << 0)
 
 /**
  * struct drm_msm_spr_init_cfg - SPR initial configuration structure
@@ -742,4 +758,21 @@ struct drm_msm_dimming_bl_lut {
 	__u32 mapped_bl[DIMMING_BL_LUT_LEN];
 };
 
+struct drm_msm_opr_value {
+	__u32 num_valid_opr;
+	__u32 opr_value[MAX_DSI_DISPLAY];
+};
+
+#define SDE_MAX_ROI 4
+struct drm_msm_roi {
+	__u32 num_rects;
+	struct drm_clip_rect roi[SDE_MAX_ROI];
+};
+
+struct drm_msm_misr_sign {
+	__u64 num_valid_misr;
+	struct drm_msm_roi roi_list;
+	__u64 misr_sign_value[MAX_DSI_DISPLAY];
+};
+
 #endif /* _MSM_DRM_PP_H_ */

+ 16 - 1
include/uapi/display/drm/sde_drm.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -261,6 +262,9 @@ struct sde_drm_de_v1 {
 /* Disable dynamic expansion */
 #define SDE_DYN_EXP_DISABLE 0x1
 
+#define SDE_DE_LPF_BLEND_FILT
+#define SDE_DE_LPF_BLEND_FLAG_EN (1 << 0)
+
 #define SDE_DRM_QSEED3LITE
 #define SDE_DRM_QSEED4
 #define SDE_DRM_INLINE_PREDOWNSCALE
@@ -301,6 +305,10 @@ struct sde_drm_de_v1 {
  * @pre_downscale_x_1  Pre-downscale ratio, x-direction, plane 1(UV)
  * @pre_downscale_y_0  Pre-downscale ratio, y-direction, plane 0(Y/RGB)
  * @pre_downscale_y_1  Pre-downscale ratio, y-direction, plane 1(UV)
+ * @de_lpf_flags:      Detail enhancer lpf blned configuration flags
+ * @de_lpf_h:          Detail enhancer lpf blend high
+ * @de_lpf_l:          Detail enhancer lpf blend low
+ * @de_lpf_m:          Detail enhancer lpf blend medium
  */
 struct sde_drm_scaler_v2 {
 	/*
@@ -366,10 +374,15 @@ struct sde_drm_scaler_v2 {
 	__u32 pre_downscale_x_1;
 	__u32 pre_downscale_y_0;
 	__u32 pre_downscale_y_1;
+
+	__u32 de_lpf_flags;
+	__u32 de_lpf_h;
+	__u32 de_lpf_l;
+	__u32 de_lpf_m;
 };
 
 /* Number of dest scalers supported */
-#define SDE_MAX_DS_COUNT 2
+#define SDE_MAX_DS_COUNT 4
 
 /*
  * Destination scaler flag config
@@ -904,6 +917,8 @@ struct sde_drm_dnsc_blur_cfg {
 #define DRM_EVENT_FRAME_DATA 0x8000000C
 #define DRM_EVENT_DIMMING_BL 0X8000000D
 #define DRM_EVENT_VM_RELEASE 0X8000000E
+#define DRM_EVENT_OPR_VALUE 0X8000000F
+#define DRM_EVENT_MISR_SIGN 0X80000010
 
 #ifndef DRM_MODE_FLAG_VID_MODE_PANEL
 #define DRM_MODE_FLAG_VID_MODE_PANEL        0x01

+ 2 - 0
include/uapi/display/media/mmm_color_fmt.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 /*
  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef __MMM_COLOR_FMT_INFO_H__
 #define __MMM_COLOR_FMT_INFO_H__
@@ -887,6 +888,7 @@ static inline unsigned int MMM_COLOR_FMT_Y_STRIDE(unsigned int color_fmt,
 	case MMM_COLOR_FMT_P010_512:
 		alignment = 512;
 		stride = MMM_COLOR_FMT_ALIGN(width * 2, alignment);
+		break;
 	default:
 		break;
 	}

+ 23 - 3
msm/Android.mk

@@ -22,7 +22,15 @@ KBUILD_OPTIONS += MODNAME=msm_drm
 KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
 KBUILD_OPTIONS += $(DISPLAY_SELECT)
 
-KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS=$(PWD)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+ifneq ($(TARGET_BOARD_AUTO),true)
+KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+ifneq ($(TARGET_BOARD_PLATFORM), taro)
+	KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,msm-ext-disp-module-symvers)/Module.symvers
+	KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,sec-module-symvers)/Module.symvers
+	KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
+	KBUILD_OPTIONS += KBUILD_EXTRA_SYMBOLS+=$(PWD)/$(call intermediates-dir-for,DLKM,sync-fence-module-symvers)/Module.symvers
+endif
+endif
 
 ###########################################################
 include $(CLEAR_VARS)
@@ -33,8 +41,20 @@ LOCAL_MODULE_TAGS         := optional
 LOCAL_MODULE_DEBUG_ENABLE := true
 LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
 
-LOCAL_REQUIRED_MODULES    := mmrm-module-symvers
-LOCAL_ADDITIONAL_DEPENDENCIES := $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+ifneq ($(TARGET_BOARD_AUTO),true)
+LOCAL_REQUIRED_MODULES    += mmrm-module-symvers
+LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,mmrm-module-symvers)/Module.symvers
+ifneq ($(TARGET_BOARD_PLATFORM), taro)
+	LOCAL_REQUIRED_MODULES    += msm-ext-disp-module-symvers
+	LOCAL_REQUIRED_MODULES    += sec-module-symvers
+	LOCAL_REQUIRED_MODULES    += hw-fence-module-symvers
+	LOCAL_REQUIRED_MODULES    += sync-fence-module-symvers
+	LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,msm-ext-disp-module-symvers)/Module.symvers
+	LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,sec-module-symvers)/Module.symvers
+	LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,hw-fence-module-symvers)/Module.symvers
+	LOCAL_ADDITIONAL_DEPENDENCIES += $(call intermediates-dir-for,DLKM,sync-fence-module-symvers)/Module.symvers
+endif
+endif
 
 include $(DLKM_DIR)/Build_external_kernelmodule.mk
 ###########################################################

+ 20 - 6
msm/Kbuild

@@ -22,10 +22,20 @@ ifeq ($(CONFIG_ARCH_PARROT), y)
 	LINUX_INC +=	-include $(DISPLAY_ROOT)/config/gki_parrotdispconf.h
 endif
 
-#ifeq ($(CONFIG_ARCH_KALAMA), y)
+ifeq ($(CONFIG_ARCH_KALAMA), y)
+ifeq ($(CONFIG_ARCH_QTI_VM), y)
+        include $(DISPLAY_ROOT)/config/gki_kalamadisptui.conf
+        LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadisptuiconf.h
+else
 	include $(DISPLAY_ROOT)/config/gki_kalamadisp.conf
 	LINUX_INC += -include $(DISPLAY_ROOT)/config/gki_kalamadispconf.h
-#endif
+endif
+endif
+
+ifeq (y, $(findstring y, $(CONFIG_ARCH_SA8155) $(CONFIG_ARCH_SA6155) $(CONFIG_ARCH_SA8195)))
+	include $(DISPLAY_ROOT)/config/augen3disp.conf
+	LINUX_INC += -include $(DISPLAY_ROOT)/config/augen3dispconf.h
+endif
 
 LINUX_INC +=	-Iinclude/linux \
 		-Iinclude/linux/drm
@@ -91,17 +101,19 @@ msm_drm-$(CONFIG_MSM_SDE_ROTATOR) += ../rotator/sde_rotator_dev.o \
 				     ../rotator/sde_rotator_r1.o \
 				     ../rotator/sde_rotator_r3.o
 
+ifeq ($(CONFIG_MSM_SDE_ROTATOR), y)
 msm_drm-$(CONFIG_SYNC_FILE) += ../rotator/sde_rotator_sync.o
 
+msm_drm-$(CONFIG_DEBUG_FS) += ../rotator/sde_rotator_debug.o \
+			      ../rotator/sde_rotator_r1_debug.o \
+			      ../rotator/sde_rotator_r3_debug.o
+endif
+
 msm_drm-$(CONFIG_DRM_SDE_VM) += sde/sde_vm_common.o \
 				sde/sde_vm_primary.o \
 				sde/sde_vm_trusted.o \
 				sde/sde_vm_msgq.o
 
-msm_drm-$(CONFIG_DEBUG_FS) += ../rotator/sde_rotator_debug.o \
-			      ../rotator/sde_rotator_r1_debug.o \
-			      ../rotator/sde_rotator_r3_debug.o
-
 msm_drm-$(CONFIG_DRM_MSM_DP) += dp/dp_altmode.o \
 				dp/dp_parser.o \
 				dp/dp_power.o \
@@ -198,11 +210,13 @@ msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi_phy.o \
 				 dsi/dsi_phy.o \
 				 dsi/dsi_phy_hw_v3_0.o \
 				 dsi/dsi_phy_hw_v4_0.o \
+				 dsi/dsi_phy_hw_v5_0.o \
 				 dsi/dsi_phy_timing_calc.o \
 				 dsi/dsi_phy_timing_v3_0.o \
 				 dsi/dsi_phy_timing_v4_0.o \
 				 dsi/dsi_pll.o \
 				 dsi/dsi_pll_5nm.o \
+				 dsi/dsi_pll_4nm.o \
 				 dsi/dsi_ctrl_hw_cmn.o \
 				 dsi/dsi_ctrl_hw_2_2.o \
 				 dsi/dsi_ctrl.o \

+ 76 - 31
msm/dp/dp_aux.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -11,6 +12,31 @@
 #include "dp_debug.h"
 
 #define DP_AUX_ENUM_STR(x)		#x
+#define DP_AUX_IPC_NUM_PAGES 10
+
+#define DP_AUX_DEBUG(dp_aux, fmt, ...) \
+	do { \
+		if (dp_aux) \
+			ipc_log_string(dp_aux->ipc_log_context, "[d][%-4d]"fmt,\
+					current->pid, ##__VA_ARGS__); \
+		DP_DEBUG_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_AUX_WARN(dp_aux, fmt, ...) \
+	do { \
+		if (dp_aux) \
+			ipc_log_string(dp_aux->ipc_log_context, "[w][%-4d]"fmt,\
+					current->pid, ##__VA_ARGS__); \
+		DP_WARN_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_AUX_ERR(dp_aux, fmt, ...) \
+	do { \
+		if (dp_aux) \
+			ipc_log_string(dp_aux->ipc_log_context, "[e][%-4d]"fmt,\
+					current->pid, ##__VA_ARGS__); \
+		DP_ERR_V(fmt, ##__VA_ARGS__); \
+	} while (0)
 
 enum {
 	DP_AUX_DATA_INDEX_WRITE = BIT(31),
@@ -46,7 +72,7 @@ struct dp_aux_private {
 	atomic_t aborted;
 };
 
-#ifdef CONFIG_DYNAMIC_DEBUG
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
 static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
 		struct drm_dp_aux_msg *msg)
 {
@@ -56,10 +82,11 @@ static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
 	u8 linebuf[64];
 	struct dp_aux_private *aux = container_of(drm_aux,
 		struct dp_aux_private, drm_aux);
+	struct dp_aux *dp_aux = &aux->dp_aux;
 
 	snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ",
-		aux->native ? "NAT" : "I2C",
-		aux->read ? "RD" : "WR",
+		(msg->request & DP_AUX_I2C_MOT) ? "I2C" : "NAT",
+		(msg->request & DP_AUX_I2C_READ) ? "RD" : "WR",
 		msg->address, msg->size);
 
 	for (i = 0; i < msg->size; i += rowsize) {
@@ -69,7 +96,10 @@ static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
 		hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1,
 			linebuf, sizeof(linebuf), false);
 
-		DP_DEBUG("%s%s\n", prefix, linebuf);
+		if (msg->size == 1 && msg->address == 0)
+			DP_DEBUG_V("%s%s\n", prefix, linebuf);
+		else
+			DP_AUX_DEBUG(dp_aux, "%s%s\n", prefix, linebuf);
 	}
 }
 #else
@@ -77,7 +107,7 @@ static void dp_aux_hex_dump(struct drm_dp_aux *drm_aux,
 		struct drm_dp_aux_msg *msg)
 {
 }
-#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
 
 static char *dp_aux_get_error(u32 aux_error)
 {
@@ -106,6 +136,7 @@ static u32 dp_aux_write(struct dp_aux_private *aux,
 	u8 *msgdata = msg->buffer;
 	int const aux_cmd_fifo_len = 128;
 	int i = 0;
+	struct dp_aux *dp_aux = &aux->dp_aux;
 
 	if (aux->read)
 		len = 4;
@@ -117,7 +148,7 @@ static u32 dp_aux_write(struct dp_aux_private *aux,
 	 * limit buf length to 128 bytes here
 	 */
 	if (len > aux_cmd_fifo_len) {
-		DP_ERR("buf len error\n");
+		DP_AUX_ERR(dp_aux, "buf len error\n");
 		return 0;
 	}
 
@@ -165,18 +196,19 @@ static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
 {
 	u32 ret = 0, len = 0, timeout;
 	int const aux_timeout_ms = HZ/4;
+	struct dp_aux *dp_aux = &aux->dp_aux;
 
 	reinit_completion(&aux->comp);
 
 	len = dp_aux_write(aux, msg);
 	if (len == 0) {
-		DP_ERR("DP AUX write failed\n");
+		DP_AUX_ERR(dp_aux, "DP AUX write failed\n");
 		return -EINVAL;
 	}
 
 	timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
 	if (!timeout) {
-		DP_ERR("aux %s timeout\n", (aux->read ? "read" : "write"));
+		DP_AUX_ERR(dp_aux, "aux %s timeout\n", (aux->read ? "read" : "write"));
 		return -ETIMEDOUT;
 	}
 
@@ -199,6 +231,7 @@ static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
 	u8 *dp;
 	u32 i, actual_i;
 	u32 len = msg->size;
+	struct dp_aux *dp_aux = &aux->dp_aux;
 
 	aux->catalog->clear_trans(aux->catalog, true);
 
@@ -220,7 +253,7 @@ static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
 
 		actual_i = (data >> 16) & 0xFF;
 		if (i != actual_i)
-			DP_WARN("Index mismatch: expected %d, found %d\n",
+			DP_AUX_WARN(dp_aux, "Index mismatch: expected %d, found %d\n",
 				i, actual_i);
 	}
 }
@@ -279,7 +312,7 @@ static void dp_aux_isr(struct dp_aux *dp_aux)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -301,7 +334,7 @@ static void dp_aux_reconfig(struct dp_aux *dp_aux)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -317,7 +350,7 @@ static void dp_aux_abort_transaction(struct dp_aux *dp_aux, bool abort)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -426,6 +459,7 @@ static int dp_aux_transfer_ready(struct dp_aux_private *aux,
 	int ret = 0;
 	int const aux_cmd_native_max = 16;
 	int const aux_cmd_i2c_max = 128;
+	struct dp_aux *dp_aux = &aux->dp_aux;
 
 	if (atomic_read(&aux->aborted)) {
 		ret = -ETIMEDOUT;
@@ -444,7 +478,7 @@ static int dp_aux_transfer_ready(struct dp_aux_private *aux,
 	/* msg sanity check */
 	if ((aux->native && (msg->size > aux_cmd_native_max)) ||
 		(msg->size > aux_cmd_i2c_max)) {
-		DP_ERR("%s: invalid msg: size(%zu), request(%x)\n",
+		DP_AUX_ERR(dp_aux, "%s: invalid msg: size(%zu), request(%x)\n",
 			__func__, msg->size, msg->request);
 		ret = -EINVAL;
 		goto error;
@@ -549,6 +583,7 @@ static ssize_t dp_aux_bridge_transfer(struct drm_dp_aux *drm_aux,
 		size = aux->aux_bridge->transfer(aux->aux_bridge,
 				drm_aux, msg);
 		aux->bridge_in_transfer = false;
+		dp_aux_hex_dump(drm_aux, msg);
 	}
 
 	return size;
@@ -580,6 +615,7 @@ static ssize_t dp_aux_transfer_debug(struct drm_dp_aux *drm_aux,
 		size = aux->sim_bridge->transfer(aux->sim_bridge,
 				drm_aux, msg);
 		aux->sim_in_transfer = false;
+		dp_aux_hex_dump(drm_aux, msg);
 	}
 end:
 	return size;
@@ -598,7 +634,7 @@ static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux || !aux_cfg) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -607,6 +643,10 @@ static void dp_aux_init(struct dp_aux *dp_aux, struct dp_aux_cfg *aux_cfg)
 	if (aux->enabled)
 		return;
 
+	dp_aux->ipc_log_context = ipc_log_context_create(DP_AUX_IPC_NUM_PAGES, "drm_dp_aux", 0);
+	if (!dp_aux->ipc_log_context)
+		DP_AUX_WARN(dp_aux, "Error in creating dp_aux_ipc_log context\n");
+
 	dp_aux_reset_phy_config_indices(aux_cfg);
 	aux->catalog->setup(aux->catalog, aux_cfg);
 	aux->catalog->reset(aux->catalog);
@@ -621,7 +661,7 @@ static void dp_aux_deinit(struct dp_aux *dp_aux)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -630,18 +670,23 @@ static void dp_aux_deinit(struct dp_aux *dp_aux)
 	if (!aux->enabled)
 		return;
 
+	if (dp_aux->ipc_log_context) {
+		ipc_log_context_destroy(dp_aux->ipc_log_context);
+		dp_aux->ipc_log_context = NULL;
+	}
+
 	atomic_set(&aux->aborted, 1);
 	aux->catalog->enable(aux->catalog, false);
 	aux->enabled = false;
 }
 
-static int dp_aux_register(struct dp_aux *dp_aux)
+static int dp_aux_register(struct dp_aux *dp_aux, struct drm_device *drm_dev)
 {
 	struct dp_aux_private *aux;
 	int ret = 0;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		ret = -EINVAL;
 		goto exit;
 	}
@@ -651,10 +696,13 @@ static int dp_aux_register(struct dp_aux *dp_aux)
 	aux->drm_aux.name = "sde_dp_aux";
 	aux->drm_aux.dev = aux->dev;
 	aux->drm_aux.transfer = dp_aux_transfer;
+#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE)
+	aux->drm_aux.drm_dev = drm_dev;
+#endif
 	atomic_set(&aux->aborted, 1);
 	ret = drm_dp_aux_register(&aux->drm_aux);
 	if (ret) {
-		DP_ERR("%s: failed to register drm aux: %d\n", __func__, ret);
+		DP_AUX_ERR(dp_aux, "%s: failed to register drm aux: %d\n", __func__, ret);
 		goto exit;
 	}
 	dp_aux->drm_aux = &aux->drm_aux;
@@ -671,7 +719,7 @@ static void dp_aux_deregister(struct dp_aux *dp_aux)
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -685,7 +733,7 @@ static void dp_aux_set_sim_mode(struct dp_aux *dp_aux,
 	struct dp_aux_private *aux;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		return;
 	}
 
@@ -715,7 +763,7 @@ static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux,
 	enum fsa_function event = FSA_USBC_DISPLAYPORT_DISCONNECTED;
 
 	if (!dp_aux) {
-		DP_ERR("invalid input\n");
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -723,7 +771,7 @@ static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux,
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
 	if (!aux->aux_switch_node) {
-		DP_DEBUG("undefined fsa4480 handle\n");
+		DP_AUX_DEBUG(dp_aux, "undefined fsa4480 handle\n");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -737,18 +785,18 @@ static int dp_aux_configure_aux_switch(struct dp_aux *dp_aux,
 			event = FSA_USBC_ORIENTATION_CC2;
 			break;
 		default:
-			DP_ERR("invalid orientation\n");
+			DP_AUX_ERR(dp_aux, "invalid orientation\n");
 			rc = -EINVAL;
 			goto end;
 		}
 	}
 
-	DP_DEBUG("enable=%d, orientation=%d, event=%d\n",
+	DP_AUX_DEBUG(dp_aux, "enable=%d, orientation=%d, event=%d\n",
 			enable, orientation, event);
 
 	rc = fsa4480_switch_event(aux->aux_switch_node, event);
 	if (rc)
-		DP_ERR("failed to configure fsa4480 i2c device (%d)\n", rc);
+		DP_AUX_ERR(dp_aux, "failed to configure fsa4480 i2c device (%d)\n", rc);
 end:
 	return rc;
 }
@@ -759,13 +807,10 @@ struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog,
 {
 	int rc = 0;
 	struct dp_aux_private *aux;
-	struct dp_aux *dp_aux;
+	struct dp_aux *dp_aux = NULL;
 
-	if (!catalog || !parser ||
-			(!parser->no_aux_switch &&
-				!aux_switch &&
-				!parser->gpio_aux_switch)) {
-		DP_ERR("invalid input\n");
+	if (!catalog || !parser) {
+		DP_AUX_ERR(dp_aux, "invalid input\n");
 		rc = -ENODEV;
 		goto error;
 	}

+ 3 - 1
msm/dp/dp_aux.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -41,9 +42,10 @@ struct dp_aux {
 	bool read;
 
 	struct mutex *access_lock;
+	void *ipc_log_context;
 
 	struct drm_dp_aux *drm_aux;
-	int (*drm_aux_register)(struct dp_aux *aux);
+	int (*drm_aux_register)(struct dp_aux *aux, struct drm_device *drm_dev);
 	void (*drm_aux_deregister)(struct dp_aux *aux);
 	void (*isr)(struct dp_aux *aux);
 	void (*init)(struct dp_aux *aux, struct dp_aux_cfg *aux_cfg);

+ 3 - 2
msm/dp/dp_aux_bridge.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -49,7 +50,7 @@ int dp_aux_add_bridge(struct dp_aux_bridge *bridge)
 	return 0;
 }
 
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF)
 struct dp_aux_bridge *of_dp_aux_find_bridge(struct device_node *np)
 {
 	struct dp_aux_bridge *bridge;
@@ -66,5 +67,5 @@ struct dp_aux_bridge *of_dp_aux_find_bridge(struct device_node *np)
 	mutex_unlock(&dp_aux_bridge_lock);
 	return NULL;
 }
-#endif
+#endif /* CONFIG_OF */
 

+ 5 - 4
msm/dp/dp_aux_bridge.h

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -59,9 +60,9 @@ enum dp_aux_bridge_flag {
  * @head: to keep track of all added bridges
  */
 struct dp_aux_bridge {
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF)
 	struct device_node *of_node;
-#endif
+#endif /* CONFIG_OF */
 	void *dev_priv;
 	u32 flag;
 	void *mst_ctx;
@@ -116,14 +117,14 @@ struct dp_aux_bridge {
  */
 int dp_aux_add_bridge(struct dp_aux_bridge *bridge);
 
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF)
 /**
  * of_dp_aux_find_bridge - Find registered DP aux bridge
  * @np: device node pointer to the bridge
  * return: DP aux bridge pointer, NULL if not found
  */
 struct dp_aux_bridge *of_dp_aux_find_bridge(struct device_node *np);
-#endif
+#endif /* CONFIG_OF */
 
 #endif /* _DP_AUX_BRIDGE_H_ */
 

+ 14 - 15
msm/dp/dp_catalog.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -43,6 +44,8 @@
 	(DP_INTR_MST_DP0_VCPF_SENT | DP_INTR_MST_DP1_VCPF_SENT)
 
 #define DP_INTR_MASK5		(DP_INTERRUPT_STATUS5 << 2)
+#define DP_TPG_PATTERN_MAX	9
+#define DP_TPG_PATTERN_DEFAULT	8
 
 #define dp_catalog_fill_io(x) { \
 	catalog->io.x = parser->get_io(parser, #x); \
@@ -1416,8 +1419,7 @@ static void dp_catalog_ctrl_usb_reset(struct dp_catalog_ctrl *ctrl, bool flip)
 	wmb();
 }
 
-static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
-	bool enable)
+static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel, u32 pattern)
 {
 	struct dp_catalog_private *catalog;
 	struct dp_io_data *io_data;
@@ -1440,7 +1442,7 @@ static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
 	else if (panel->stream_id == DP_STREAM_1)
 		io_data = catalog->io.dp_p1;
 
-	if (!enable) {
+	if (!pattern) {
 		dp_write(MMSS_DP_TPG_MAIN_CONTROL, 0x0);
 		dp_write(MMSS_DP_BIST_ENABLE, 0x0);
 		reg = dp_read(MMSS_DP_TIMING_ENGINE_EN);
@@ -1450,6 +1452,9 @@ static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
 		return;
 	}
 
+	if (pattern > DP_TPG_PATTERN_MAX)
+		pattern = DP_TPG_PATTERN_DEFAULT;
+
 	dp_write(MMSS_DP_INTF_HSYNC_CTL,
 			panel->hsync_ctl);
 	dp_write(MMSS_DP_INTF_VSYNC_PERIOD_F0,
@@ -1471,7 +1476,7 @@ static void dp_catalog_panel_tpg_cfg(struct dp_catalog_panel *panel,
 	dp_write(MMSS_DP_INTF_POLARITY_CTL, 0);
 	wmb(); /* ensure TPG registers are programmed */
 
-	dp_write(MMSS_DP_TPG_MAIN_CONTROL, 0x100);
+	dp_write(MMSS_DP_TPG_MAIN_CONTROL, (1 << pattern));
 	dp_write(MMSS_DP_TPG_VIDEO_CONFIG, 0x5);
 	wmb(); /* ensure TPG config is programmed */
 	dp_write(MMSS_DP_BIST_ENABLE, 0x1);
@@ -2812,18 +2817,12 @@ static int dp_catalog_init(struct device *dev, struct dp_catalog *dp_catalog,
 	struct dp_catalog_private *catalog = container_of(dp_catalog,
 				struct dp_catalog_private, dp_catalog);
 
-	switch (parser->hw_cfg.phy_version) {
-	case DP_PHY_VERSION_4_2_0:
-		dp_catalog->sub = dp_catalog_get_v420(dev, dp_catalog,
-					&catalog->io);
-		break;
-	case DP_PHY_VERSION_2_0_0:
-		dp_catalog->sub = dp_catalog_get_v200(dev, dp_catalog,
-					&catalog->io);
-		break;
-	default:
+	if (parser->hw_cfg.phy_version >= DP_PHY_VERSION_4_2_0)
+		dp_catalog->sub = dp_catalog_get_v420(dev, dp_catalog, &catalog->io);
+	else if (parser->hw_cfg.phy_version == DP_PHY_VERSION_2_0_0)
+		dp_catalog->sub = dp_catalog_get_v200(dev, dp_catalog, &catalog->io);
+	else
 		goto end;
-	}
 
 	if (IS_ERR(dp_catalog->sub)) {
 		rc = PTR_ERR(dp_catalog->sub);

+ 2 - 1
msm/dp/dp_catalog.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -208,7 +209,7 @@ struct dp_catalog_panel {
 	void (*config_sdp)(struct dp_catalog_panel *panel, bool en);
 	int (*set_colorspace)(struct dp_catalog_panel *panel,
 		 bool vsc_supported);
-	void (*tpg_config)(struct dp_catalog_panel *panel, bool enable);
+	void (*tpg_config)(struct dp_catalog_panel *panel, u32  pattern);
 	void (*config_spd)(struct dp_catalog_panel *panel);
 	void (*config_misc)(struct dp_catalog_panel *panel);
 	void (*config_msa)(struct dp_catalog_panel *panel,

+ 39 - 54
msm/dp/dp_catalog_v420.c

@@ -1,21 +1,16 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 
 #include "dp_catalog.h"
 #include "dp_reg.h"
 #include "dp_debug.h"
-
-#define MMSS_DP_PIXEL_BASE_V130			(0x1A8)
-#define MMSS_DP_PIXEL1_BASE_V130		(0x1C0)
-
-#define MMSS_DP_PIXEL_BASE_V140			(0x1BC)
-#define MMSS_DP_PIXEL1_BASE_V140		(0x1D4)
-
-#define MMSS_DP_M_OFF				(0x8)
-#define MMSS_DP_N_OFF				(0xC)
+#include "dp_pll.h"
+#include <linux/rational.h>
+#include <drm/drm_fixed.h>
 
 #define dp_catalog_get_priv_v420(x) ({ \
 	struct dp_catalog *catalog; \
@@ -78,6 +73,13 @@ static u8 const dp_swing_hbr_rbr[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
 	{0x1F, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2v */
 };
 
+static const u8 dp_pre_emp_hbr_rbr_v600[MAX_VOLTAGE_LEVELS][MAX_PRE_EMP_LEVELS] = {
+	{0x00, 0x0D, 0x14, 0x1A}, /* pe0, 0 db */
+	{0x00, 0x0E, 0x15, 0xFF}, /* pe1, 3.5 db */
+	{0x00, 0x0E, 0xFF, 0xFF}, /* pe2, 6.0 db */
+	{0x02, 0xFF, 0xFF, 0xFF}  /* pe3, 9.5 db */
+};
+
 struct dp_catalog_private_v420 {
 	struct device *dev;
 	struct dp_catalog_sub sub;
@@ -107,7 +109,7 @@ static void dp_catalog_aux_setup_v420(struct dp_catalog_aux *aux,
 	if (phy_version >= 0x60000000) {
 		/* Turn on BIAS current for PHY/PLL */
 		io_data = catalog->io->dp_pll;
-		dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x1D);
+		dp_write(QSERDES_COM_BIAS_EN_CLKBUFLR_EN_V600, 0x17);
 		wmb(); /* make sure BIAS programming happened */
 	} else {
 		/* Turn on BIAS current for PHY/PLL */
@@ -161,15 +163,14 @@ static void dp_catalog_aux_clear_hw_int_v420(struct dp_catalog_aux *aux)
 static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
 					u32 rate, u32 stream_rate_khz)
 {
-	u32 pixel_m, pixel_n;
-	u32 mvid, nvid, reg_off = 0, mvid_off = 0, nvid_off = 0;
+	u32 mvid, nvid, mvid_off = 0, nvid_off = 0;
 	u32 const nvid_fixed = 0x8000;
-	u32 const link_rate_hbr2 = 540000;
-	u32 const link_rate_hbr3 = 810000;
 	struct dp_catalog *dp_catalog;
 	struct dp_catalog_private_v420 *catalog;
 	struct dp_io_data *io_data;
-	u32 version;
+	unsigned long num, den;
+	u32 const input_scale = 10;
+	u64 f1, f2;
 
 	if (!panel || !rate) {
 		DP_ERR("invalid input\n");
@@ -184,48 +185,27 @@ static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
 	dp_catalog = container_of(panel, struct dp_catalog, panel);
 	catalog = container_of(dp_catalog->sub, struct dp_catalog_private_v420, sub);
 
-	version = dp_catalog_get_dp_core_version(dp_catalog);
-	io_data = catalog->io->dp_mmss_cc;
-
-	if (version >= 0x10040000) {
-		if (panel->stream_id == DP_STREAM_1)
-			reg_off = MMSS_DP_PIXEL1_BASE_V140;
-		else
-			reg_off = MMSS_DP_PIXEL_BASE_V140;
-	} else {
-		if (panel->stream_id == DP_STREAM_1)
-			reg_off = MMSS_DP_PIXEL1_BASE_V130;
-		else
-			reg_off = MMSS_DP_PIXEL_BASE_V130;
-	}
-
-
-	pixel_m = dp_read(reg_off + MMSS_DP_M_OFF);
-	pixel_n = dp_read(reg_off + MMSS_DP_N_OFF);
-	DP_DEBUG("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
+	/*
+	 * MND calculator requires the target clock to be less than half the input clock. To meet
+	 * this requirement, the input clock is scaled here and then the resulting M value is
+	 * scaled by the same factor to offset the pre-scale.
+	 */
+	rational_best_approximation(rate * input_scale, stream_rate_khz,
+			(unsigned long)(1 << 16) - 1,
+			(unsigned long)(1 << 16) - 1, &den, &num);
 
-	mvid = (pixel_m & 0xFFFF) * 5;
-	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+	mvid = (num & 0xFFFF);
+	nvid = (den & 0xFFFF);
+	mvid *= input_scale;
 
 	if (nvid < nvid_fixed) {
-		u32 temp;
-
-		temp = (nvid_fixed / nvid) * nvid;
-		mvid = (nvid_fixed / nvid) * mvid;
-		nvid = temp;
+		f1 = drm_fixp_from_fraction(nvid_fixed, nvid);
+		f2 = drm_fixp_from_fraction(mvid, 1);
+		f1 = drm_fixp_mul(f1, f2);
+		mvid = drm_fixp2int(f1);
+		nvid = nvid_fixed;
 	}
 
-	DP_DEBUG("rate = %d\n", rate);
-
-	if (panel->widebus_en)
-		mvid <<= 1;
-
-	if (link_rate_hbr2 == rate)
-		nvid *= 2;
-
-	if (link_rate_hbr3 == rate)
-		nvid *= 3;
-
 	io_data = catalog->io->dp_link;
 
 	if (panel->stream_id == DP_STREAM_1) {
@@ -233,7 +213,7 @@ static void dp_catalog_panel_config_msa_v420(struct dp_catalog_panel *panel,
 		nvid_off = DP1_SOFTWARE_NVID - DP_SOFTWARE_NVID;
 	}
 
-	DP_DEBUG("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+	DP_DEBUG("pclk=%ld, lclk=%ld, mvid=0x%x, nvid=0x%x\n", stream_rate_khz, rate, mvid, nvid);
 	dp_write(DP_SOFTWARE_MVID + mvid_off, mvid);
 	dp_write(DP_SOFTWARE_NVID + nvid_off, nvid);
 }
@@ -268,6 +248,7 @@ static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl,
 	struct dp_io_data *io_data;
 	u8 value0, value1;
 	u32 version;
+	u32 phy_version;
 
 	if (!ctrl || !((v_level < MAX_VOLTAGE_LEVELS)
 		&& (p_level < MAX_PRE_EMP_LEVELS))) {
@@ -278,6 +259,7 @@ static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl,
 	DP_DEBUG("hw: v=%d p=%d, high=%d\n", v_level, p_level, high);
 
 	catalog = dp_catalog_get_priv_v420(ctrl);
+	phy_version = dp_catalog_get_dp_phy_version(catalog->dpc);
 
 	io_data = catalog->io->dp_ahb;
 	version = dp_read(DP_HW_VERSION);
@@ -292,7 +274,10 @@ static void dp_catalog_ctrl_update_vx_px_v420(struct dp_catalog_ctrl *ctrl,
 			value1 = dp_pre_emp_hbr2_hbr3[v_level][p_level];
 		} else {
 			value0 = dp_swing_hbr_rbr[v_level][p_level];
-			value1 = dp_pre_emp_hbr_rbr[v_level][p_level];
+			if (phy_version >= 0x60000000)
+				value1 = dp_pre_emp_hbr_rbr_v600[v_level][p_level];
+			else
+				value1 = dp_pre_emp_hbr_rbr[v_level][p_level];
 		}
 	} else {
 		value0 = vm_voltage_swing[v_level][p_level];

+ 14 - 4
msm/dp/dp_ctrl.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -1236,15 +1237,24 @@ static void dp_ctrl_mst_stream_setup(struct dp_ctrl_private *ctrl,
 			lanes, bw_code, x_int, y_frac_enum);
 }
 
-static void dp_ctrl_dsc_setup(struct dp_ctrl_private *ctrl)
+static void dp_ctrl_dsc_setup(struct dp_ctrl_private *ctrl, struct dp_panel *panel)
 {
 	int rlen;
 	u32 dsc_enable;
+	struct dp_panel_info *pinfo = &panel->pinfo;
 
 	if (!ctrl->fec_mode)
 		return;
 
-	dsc_enable = ctrl->dsc_mode ? 1 : 0;
+	/* Set DP_DSC_ENABLE DPCD register if compression is enabled for SST monitor.
+	 * Set DP_DSC_ENABLE DPCD register if compression is enabled for
+	 * atleast 1 of the MST monitor.
+	 */
+	dsc_enable = (pinfo->comp_info.enabled == true) ? 1 : 0;
+
+	if (ctrl->mst_mode && (panel->stream_id == DP_STREAM_1) && !dsc_enable)
+		return;
+
 	rlen = drm_dp_dpcd_writeb(ctrl->aux->drm_aux, DP_DSC_ENABLE,
 			dsc_enable);
 	if (rlen < 1)
@@ -1297,7 +1307,7 @@ static int dp_ctrl_stream_on(struct dp_ctrl *dp_ctrl, struct dp_panel *panel)
 
 	/* wait for link training completion before fec config as per spec */
 	dp_ctrl_fec_setup(ctrl);
-	dp_ctrl_dsc_setup(ctrl);
+	dp_ctrl_dsc_setup(ctrl, panel);
 
 	return rc;
 }
@@ -1434,7 +1444,7 @@ static void dp_ctrl_off(struct dp_ctrl *dp_ctrl)
 
 	ctrl->catalog->fec_config(ctrl->catalog, false);
 	dp_ctrl_configure_source_link_params(ctrl, false);
-	ctrl->catalog->reset(ctrl->catalog);
+	dp_ctrl_state_ctrl(ctrl, 0);
 
 	/* Make sure DP is disabled before clk disable */
 	wmb();

+ 22 - 87
msm/dp/dp_debug.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -543,14 +544,14 @@ static ssize_t dp_debug_write_mst_con_id(struct file *file,
 
 	debug->mst_con_id = con_id;
 
+	if (status == connector_status_unknown)
+		goto out;
+
 	if (status == connector_status_connected)
 		DP_INFO("plug mst connector %d\n", con_id);
-	else
+	else if (status == connector_status_disconnected)
 		DP_INFO("unplug mst connector %d\n", con_id);
 
-	if (status == connector_status_unknown)
-		goto out;
-
 	mst_port = sde_conn->mst_port;
 	dp_panel = sde_conn->drv_panel;
 
@@ -892,7 +893,7 @@ static ssize_t dp_debug_tpg_write(struct file *file,
 	struct dp_debug_private *debug = file->private_data;
 	char buf[SZ_8];
 	size_t len = 0;
-	u32 tpg_state = 0;
+	u32 tpg_pattern = 0;
 
 	if (!debug)
 		return -ENODEV;
@@ -907,19 +908,18 @@ static ssize_t dp_debug_tpg_write(struct file *file,
 
 	buf[len] = '\0';
 
-	if (kstrtoint(buf, 10, &tpg_state) != 0)
+	if (kstrtoint(buf, 10, &tpg_pattern) != 0)
 		goto bail;
 
-	tpg_state &= 0x1;
-	DP_DEBUG("tpg_state: %d\n", tpg_state);
+	DP_DEBUG("tpg_pattern: %d\n", tpg_pattern);
 
-	if (tpg_state == debug->dp_debug.tpg_state)
+	if (tpg_pattern == debug->dp_debug.tpg_pattern)
 		goto bail;
 
 	if (debug->panel)
-		debug->panel->tpg_config(debug->panel, tpg_state);
+		debug->panel->tpg_config(debug->panel, tpg_pattern);
 
-	debug->dp_debug.tpg_state = tpg_state;
+	debug->dp_debug.tpg_pattern = tpg_pattern;
 bail:
 	return len;
 }
@@ -1385,7 +1385,7 @@ static ssize_t dp_debug_tpg_read(struct file *file,
 	if (*ppos)
 		return 0;
 
-	len += snprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_state);
+	len += scnprintf(buf, SZ_8, "%d\n", debug->dp_debug.tpg_pattern);
 
 	len = min_t(size_t, count, len);
 	if (copy_to_user(user_buff, buf, len))
@@ -2054,25 +2054,10 @@ static int dp_debug_init_hdcp(struct dp_debug_private *debug,
 		struct dentry *dir)
 {
 	int rc = 0;
-	struct dentry *file;
 
-	file = debugfs_create_bool("hdcp_wait_sink_sync", 0644, dir,
-			&debug->dp_debug.hdcp_wait_sink_sync);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs hdcp_wait_sink_sync failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("hdcp_wait_sink_sync", 0644, dir, &debug->dp_debug.hdcp_wait_sink_sync);
 
-	file = debugfs_create_bool("force_encryption", 0644, dir,
-			&debug->dp_debug.force_encryption);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs force_encryption failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("force_encryption", 0644, dir, &debug->dp_debug.force_encryption);
 
 	return rc;
 }
@@ -2203,23 +2188,9 @@ static int dp_debug_init_sim(struct dp_debug_private *debug, struct dentry *dir)
 		return rc;
 	}
 
-	file = debugfs_create_bool("skip_uevent", 0644, dir,
-			&debug->dp_debug.skip_uevent);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs skip_uevent failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("skip_uevent", 0644, dir, &debug->dp_debug.skip_uevent);
 
-	file = debugfs_create_bool("force_multi_func", 0644, dir,
-			&debug->hpd->force_multi_func);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs force_multi_func failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("force_multi_func", 0644, dir, &debug->hpd->force_multi_func);
 
 	return rc;
 }
@@ -2228,25 +2199,10 @@ static int dp_debug_init_dsc_fec(struct dp_debug_private *debug,
 		struct dentry *dir)
 {
 	int rc = 0;
-	struct dentry *file;
 
-	file = debugfs_create_bool("dsc_feature_enable", 0644, dir,
-			&debug->parser->dsc_feature_enable);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs dsc_feature failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("dsc_feature_enable", 0644, dir, &debug->parser->dsc_feature_enable);
 
-	file = debugfs_create_bool("fec_feature_enable", 0644, dir,
-			&debug->parser->fec_feature_enable);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs fec_feature_enable failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("fec_feature_enable", 0644, dir, &debug->parser->fec_feature_enable);
 
 	return rc;
 }
@@ -2299,25 +2255,10 @@ static int dp_debug_init_feature_toggle(struct dp_debug_private *debug,
 		struct dentry *dir)
 {
 	int rc = 0;
-	struct dentry *file;
 
-	file = debugfs_create_bool("ssc_enable", 0644, dir,
-			&debug->pll->ssc_en);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs ssc_enable failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("ssc_enable", 0644, dir, &debug->pll->ssc_en);
 
-	file = debugfs_create_bool("widebus_mode", 0644, dir,
-			&debug->parser->has_widebus);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs widebus_mode failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+	debugfs_create_bool("widebus_mode", 0644, dir, &debug->parser->has_widebus);
 
 	return rc;
 }
@@ -2326,16 +2267,10 @@ static int dp_debug_init_configs(struct dp_debug_private *debug,
 		struct dentry *dir)
 {
 	int rc = 0;
-	struct dentry *file;
 
-	file = debugfs_create_ulong("connect_notification_delay_ms", 0644, dir,
+	debugfs_create_ulong("connect_notification_delay_ms", 0644, dir,
 		&debug->dp_debug.connect_notification_delay_ms);
-	if (IS_ERR_OR_NULL(file)) {
-		rc = PTR_ERR(file);
-		DP_ERR("[%s] debugfs connect_notification_delay_ms failed, rc=%d\n",
-		       DEBUG_NAME, rc);
-		return rc;
-	}
+
 	debug->dp_debug.connect_notification_delay_ms =
 		DEFAULT_CONNECT_NOTIFICATION_DELAY_MS;
 

+ 41 - 9
msm/dp/dp_debug.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -12,9 +13,40 @@
 #include "dp_aux.h"
 #include "dp_display.h"
 #include "dp_pll.h"
+#include <linux/ipc_logging.h>
+
+#define DP_IPC_LOG(fmt, ...) \
+	do {  \
+		void *ipc_logging_context = get_ipc_log_context(); \
+		ipc_log_string(ipc_logging_context, fmt, ##__VA_ARGS__); \
+	} while (0)
 
 #define DP_DEBUG(fmt, ...)                                                   \
 	do {                                                                 \
+		DP_IPC_LOG("[d][%-4d]"fmt, current->pid, ##__VA_ARGS__); \
+		DP_DEBUG_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_INFO(fmt, ...)                                                   \
+	do {                                                                 \
+		DP_IPC_LOG("[i][%-4d]"fmt, current->pid, ##__VA_ARGS__); \
+		DP_INFO_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_WARN(fmt, ...)                                                   \
+	do {                                                                 \
+		DP_IPC_LOG("[w][%-4d]"fmt, current->pid, ##__VA_ARGS__); \
+		DP_WARN_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_ERR(fmt, ...)                                                   \
+	do {                                                                 \
+		DP_IPC_LOG("[e][%-4d]"fmt, current->pid, ##__VA_ARGS__); \
+		DP_ERR_V(fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define DP_DEBUG_V(fmt, ...) \
+	do { \
 		if (drm_debug_enabled(DRM_UT_KMS))                        \
 			DRM_DEBUG("[msm-dp-debug][%-4d]"fmt, current->pid,   \
 					##__VA_ARGS__);                      \
@@ -23,7 +55,7 @@
 				       current->pid, ##__VA_ARGS__);         \
 	} while (0)
 
-#define DP_INFO(fmt, ...)                                                    \
+#define DP_INFO_V(fmt, ...)                                                    \
 	do {                                                                 \
 		if (drm_debug_enabled(DRM_UT_KMS))                        \
 			DRM_INFO("[msm-dp-info][%-4d]"fmt, current->pid,    \
@@ -33,13 +65,13 @@
 				       current->pid, ##__VA_ARGS__);         \
 	} while (0)
 
-#define DP_WARN(fmt, ...)                                    \
-	pr_warn("[drm:%s][msm-dp-warn][%-4d]"fmt, __func__,  \
-			current->pid, ##__VA_ARGS__)
+#define DP_WARN_V(fmt, ...)                                    \
+		pr_warn("[drm:%s][msm-dp-warn][%-4d]"fmt, __func__,  \
+				current->pid, ##__VA_ARGS__)
 
-#define DP_ERR(fmt, ...)                                    \
-	pr_err("[drm:%s][msm-dp-err][%-4d]"fmt, __func__,   \
-		       current->pid, ##__VA_ARGS__)
+#define DP_ERR_V(fmt, ...)                                    \
+		pr_err("[drm:%s][msm-dp-err][%-4d]"fmt, __func__,   \
+				current->pid, ##__VA_ARGS__)
 
 #define DEFAULT_DISCONNECT_DELAY_MS 0
 #define MAX_DISCONNECT_DELAY_MS 10000
@@ -52,7 +84,7 @@
  * @psm_enabled: specifies whether psm enabled
  * @hdcp_disabled: specifies if hdcp is disabled
  * @hdcp_wait_sink_sync: used to wait for sink synchronization before HDCP auth
- * @tpg_state: specifies whether tpg feature is enabled
+ * @tpg_pattern: selects tpg pattern on the controller
  * @max_pclk_khz: max pclk supported
  * @force_encryption: enable/disable forced encryption for HDCP 2.2
  * @skip_uevent: skip hotplug uevent to the user space
@@ -70,7 +102,7 @@ struct dp_debug {
 	bool psm_enabled;
 	bool hdcp_disabled;
 	bool hdcp_wait_sink_sync;
-	bool tpg_state;
+	u32 tpg_pattern;
 	u32 max_pclk_khz;
 	bool force_encryption;
 	bool skip_uevent;

+ 85 - 55
msm/dp/dp_display.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -14,6 +15,7 @@
 #include <linux/usb/phy.h>
 #include <linux/jiffies.h>
 #include <linux/pm_qos.h>
+#include <linux/ipc_logging.h>
 
 #include "sde_connector.h"
 
@@ -33,6 +35,7 @@
 #include "dp_pll.h"
 #include "sde_dbg.h"
 
+#define DRM_DP_IPC_NUM_PAGES 10
 #define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
 
 #define dp_display_state_show(x) { \
@@ -40,6 +43,11 @@
 		dp_display_state_name(dp->state)); \
 	SDE_EVT32_EXTERNAL(dp->state); }
 
+#define dp_display_state_warn(x) { \
+	DP_WARN("%s: state (0x%x): %s\n", x, dp->state, \
+		dp_display_state_name(dp->state)); \
+	SDE_EVT32_EXTERNAL(dp->state); }
+
 #define dp_display_state_log(x) { \
 	DP_DEBUG("%s: state (0x%x): %s\n", x, dp->state, \
 		dp_display_state_name(dp->state)); \
@@ -195,6 +203,7 @@ struct dp_display_private {
 	struct work_struct attention_work;
 	struct mutex session_lock;
 	bool hdcp_delayed_off;
+	bool no_aux_switch;
 
 	u32 active_stream_cnt;
 	struct dp_mst mst;
@@ -1138,7 +1147,7 @@ static int dp_display_host_ready(struct dp_display_private *dp)
 static void dp_display_host_unready(struct dp_display_private *dp)
 {
 	if (!dp_display_state_is(DP_STATE_INITIALIZED)) {
-		dp_display_state_show("[not initialized]");
+		dp_display_state_warn("[not initialized]");
 		return;
 	}
 
@@ -1435,7 +1444,7 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
 		return -ENODEV;
 	}
 
-	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
+	if (!dp->debug->sim_mode && !dp->no_aux_switch
 	    && !dp->parser->gpio_aux_switch && dp->aux_switch_node) {
 		rc = dp_display_init_aux_switch(dp);
 		if (rc)
@@ -1474,29 +1483,11 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
 	return 0;
 }
 
-static void dp_display_update_dsc_resources(struct dp_display_private *dp,
-		struct dp_panel *panel, bool enable)
+static void dp_display_clear_dsc_resources(struct dp_display_private *dp,
+		struct dp_panel *panel)
 {
-	int rc;
-	u32 dsc_blk_cnt = 0;
-	struct msm_drm_private *priv = dp->priv;
-
-	if (enable) {
-		if (panel->pinfo.comp_info.comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
-				(panel->pinfo.comp_info.enabled)) {
-			rc = msm_get_dsc_count(priv, panel->pinfo.h_active,
-					&dsc_blk_cnt);
-			if (rc) {
-				DP_ERR("error getting dsc count. rc:%d\n", rc);
-				return;
-			}
-		}
-		dp->tot_dsc_blks_in_use += dsc_blk_cnt;
-		panel->dsc_blks_in_use += dsc_blk_cnt;
-	} else {
-		dp->tot_dsc_blks_in_use -= panel->dsc_blks_in_use;
-		panel->dsc_blks_in_use = 0;
-	}
+	dp->tot_dsc_blks_in_use -= panel->dsc_blks_in_use;
+	panel->dsc_blks_in_use = 0;
 }
 
 static int dp_display_stream_pre_disable(struct dp_display_private *dp,
@@ -1528,7 +1519,7 @@ static void dp_display_stream_disable(struct dp_display_private *dp,
 		return;
 	}
 
-	dp_display_update_dsc_resources(dp, dp_panel, false);
+	dp_display_clear_dsc_resources(dp, dp_panel);
 
 	DP_DEBUG("stream_id=%d, active_stream_cnt=%d, tot_dsc_blks_in_use=%d\n",
 			dp_panel->stream_id, dp->active_stream_cnt,
@@ -1677,7 +1668,7 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
 	dp_display_state_remove(DP_STATE_CONFIGURED);
 	mutex_unlock(&dp->session_lock);
 
-	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
+	if (!dp->debug->sim_mode && !dp->no_aux_switch
 	    && !dp->parser->gpio_aux_switch)
 		dp->aux->aux_switch(dp->aux, false, ORIENTATION_NONE);
 
@@ -1693,15 +1684,14 @@ static int dp_display_stream_enable(struct dp_display_private *dp,
 
 	rc = dp->ctrl->stream_on(dp->ctrl, dp_panel);
 
-	if (dp->debug->tpg_state)
-		dp_panel->tpg_config(dp_panel, true);
+	if (dp->debug->tpg_pattern)
+		dp_panel->tpg_config(dp_panel, dp->debug->tpg_pattern);
 
 	if (!rc) {
 		dp->active_panels[dp_panel->stream_id] = dp_panel;
 		dp->active_stream_cnt++;
 	}
 
-	dp_display_update_dsc_resources(dp, dp_panel, true);
 
 	DP_DEBUG("dp active_stream_cnt:%d, tot_dsc_blks_in_use=%d\n",
 			dp->active_stream_cnt, dp->tot_dsc_blks_in_use);
@@ -2041,8 +2031,10 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
 	dp_core_revision = dp_catalog_get_dp_core_version(dp->catalog);
 
 	dp->aux_switch_node = of_parse_phandle(dp->pdev->dev.of_node, phandle, 0);
-	if (!dp->aux_switch_node)
+	if (!dp->aux_switch_node) {
 		DP_DEBUG("cannot parse %s handle\n", phandle);
+		dp->no_aux_switch = true;
+	}
 
 	dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser,
 			dp->aux_switch_node, dp->aux_bridge);
@@ -2053,7 +2045,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
 		goto error_aux;
 	}
 
-	rc = dp->aux->drm_aux_register(dp->aux);
+	rc = dp->aux->drm_aux_register(dp->aux, dp->dp_display.drm_dev);
 	if (rc) {
 		DP_ERR("DRM DP AUX register failed\n");
 		goto error_pll;
@@ -2289,6 +2281,7 @@ static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
 	const u32 num_components = 3, default_bpp = 24;
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel;
+	bool dsc_en = (mode->capabilities & DP_PANEL_CAPS_DSC) ? true : false;
 
 	if (!dp_display || !panel) {
 		DP_ERR("invalid input\n");
@@ -2313,7 +2306,7 @@ static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
 		mode->timing.bpp = default_bpp;
 
 	mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel,
-			mode->timing.bpp, mode->timing.pixel_clk_khz);
+			mode->timing.bpp, mode->timing.pixel_clk_khz, dsc_en);
 
 	dp_panel->pinfo = mode->timing;
 	mutex_unlock(&dp->session_lock);
@@ -2386,10 +2379,10 @@ static int dp_display_prepare(struct dp_display *dp_display, void *panel)
 
 	/*
 	 * If DP_STATE_ENABLED, there is nothing left to do.
-	 * However, this should not happen ideally. So, log this.
+	 * This would happen during MST flow. So, log this.
 	 */
 	if (dp_display_state_is(DP_STATE_ENABLED)) {
-		dp_display_state_show("[already enabled]");
+		dp_display_state_warn("[already enabled]");
 		goto end;
 	}
 
@@ -2581,12 +2574,13 @@ static int dp_display_post_enable(struct dp_display *dp_display, void *panel)
 		dp_panel->audio->lane_count = dp->link->link_params.lane_count;
 		dp_panel->audio->on(dp_panel->audio);
 	}
-end:
-	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 
+	dp->aux->state &= ~DP_STATE_CTRL_POWERED_OFF;
+	dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
 	complete_all(&dp->notification_comp);
-	mutex_unlock(&dp->session_lock);
 	DP_DEBUG("display post enable complete. state: 0x%x\n", dp->state);
+end:
+	mutex_unlock(&dp->session_lock);
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
 	return 0;
 }
@@ -2806,7 +2800,9 @@ static int dp_display_unprepare(struct dp_display *dp_display, void *panel)
 	}
 
 	dp_display_state_remove(DP_STATE_ENABLED);
-	dp->aux->state = DP_STATE_CTRL_POWERED_OFF;
+
+	dp->aux->state &= ~DP_STATE_CTRL_POWERED_ON;
+	dp->aux->state |= DP_STATE_CTRL_POWERED_OFF;
 
 	complete_all(&dp->notification_comp);
 
@@ -2918,7 +2914,7 @@ static int dp_display_validate_topology(struct dp_display_private *dp,
 		return -EPERM;
 	}
 
-	DP_DEBUG("mode %sx%d is valid, supported DP topology lm:%d dsc:%d 3dmux:%d\n",
+	DP_DEBUG_V("mode %sx%d is valid, supported DP topology lm:%d dsc:%d 3dmux:%d\n",
 				mode->name, fps, num_lm, num_dsc, num_3dmux);
 
 	return 0;
@@ -2978,7 +2974,8 @@ static enum drm_mode_status dp_display_validate_mode(
 	mode_status = MODE_OK;
 end:
 	mutex_unlock(&dp->session_lock);
-	DP_DEBUG("[%s] mode is %s\n", mode->name,
+
+	DP_DEBUG_V("[%s] mode is %s\n", mode->name,
 			(mode_status == MODE_OK) ? "valid" : "invalid");
 
 	return mode_status;
@@ -3001,11 +2998,11 @@ static int dp_display_get_available_dp_resources(struct dp_display *dp_display,
 	max_dp_avail_res->num_dsc = min(avail_res->num_dsc,
 			dp_display->max_dsc_count);
 
-	DP_DEBUG("max_lm:%d, avail_lm:%d, dp_avail_lm:%d\n",
+	DP_DEBUG_V("max_lm:%d, avail_lm:%d, dp_avail_lm:%d\n",
 			dp_display->max_mixer_count, avail_res->num_lm,
 			max_dp_avail_res->num_lm);
 
-	DP_DEBUG("max_dsc:%d, avail_dsc:%d, dp_avail_dsc:%d\n",
+	DP_DEBUG_V("max_dsc:%d, avail_dsc:%d, dp_avail_dsc:%d\n",
 			dp_display->max_dsc_count, avail_res->num_dsc,
 			max_dp_avail_res->num_dsc);
 
@@ -3046,7 +3043,7 @@ static void dp_display_convert_to_dp_mode(struct dp_display *dp_display,
 	int rc;
 	struct dp_display_private *dp;
 	struct dp_panel *dp_panel;
-	u32 free_dsc_blks = 0, required_dsc_blks = 0;
+	u32 free_dsc_blks = 0, required_dsc_blks = 0, curr_dsc = 0, new_dsc = 0;
 
 	if (!dp_display || !drm_mode || !dp_mode || !panel) {
 		DP_ERR("invalid input\n");
@@ -3058,26 +3055,38 @@ static void dp_display_convert_to_dp_mode(struct dp_display *dp_display,
 
 	memset(dp_mode, 0, sizeof(*dp_mode));
 
-	free_dsc_blks = dp_display->max_dsc_count -
+	if (dp_panel->dsc_en) {
+		free_dsc_blks = dp_display->max_dsc_count -
 				dp->tot_dsc_blks_in_use +
 				dp_panel->dsc_blks_in_use;
+		DP_DEBUG_V("Before: in_use:%d, max:%d, free:%d\n",
+				dp->tot_dsc_blks_in_use,
+				dp_display->max_dsc_count, free_dsc_blks);
 
-	rc = msm_get_dsc_count(dp->priv, drm_mode->hdisplay,
-			&required_dsc_blks);
-	if (rc) {
-		DP_ERR("error getting dsc count. rc:%d\n", rc);
-		return;
-	}
+		rc = msm_get_dsc_count(dp->priv, drm_mode->hdisplay,
+				&required_dsc_blks);
+		if (rc) {
+			DP_ERR("error getting dsc count. rc:%d\n", rc);
+			return;
+		}
 
-	if (free_dsc_blks >= required_dsc_blks)
-		dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
+		curr_dsc = dp_panel->dsc_blks_in_use;
+		dp->tot_dsc_blks_in_use -= dp_panel->dsc_blks_in_use;
+		dp_panel->dsc_blks_in_use = 0;
+
+		if (free_dsc_blks >= required_dsc_blks) {
+			dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
+			new_dsc = max(curr_dsc, required_dsc_blks);
+			dp_panel->dsc_blks_in_use = new_dsc;
+			dp->tot_dsc_blks_in_use += new_dsc;
+		}
 
-	if (dp_mode->capabilities & DP_PANEL_CAPS_DSC)
-		DP_DEBUG("in_use:%d, max:%d, free:%d, req:%d, caps:0x%x\n",
+		DP_DEBUG_V("After: in_use:%d, max:%d, free:%d, req:%d, caps:0x%x\n",
 				dp->tot_dsc_blks_in_use,
 				dp_display->max_dsc_count,
 				free_dsc_blks, required_dsc_blks,
 				dp_mode->capabilities);
+	}
 
 	dp_panel->convert_to_dp_mode(dp_panel, drm_mode, dp_mode);
 }
@@ -3341,6 +3350,7 @@ static int dp_display_mst_connector_uninstall(struct dp_display *dp_display,
 	struct sde_connector *sde_conn;
 	struct dp_panel *dp_panel;
 	struct dp_display_private *dp;
+	struct dp_audio *audio = NULL;
 
 	if (!dp_display || !connector) {
 		DP_ERR("invalid input\n");
@@ -3366,13 +3376,17 @@ static int dp_display_mst_connector_uninstall(struct dp_display *dp_display,
 	}
 
 	dp_panel = sde_conn->drv_panel;
-	dp_audio_put(dp_panel->audio);
+
+	/* Make a copy of audio structure to call into dp_audio_put later */
+	audio = dp_panel->audio;
 	dp_panel_put(dp_panel);
 
 	DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n",
 			connector->base.id);
 
 	mutex_unlock(&dp->session_lock);
+
+	dp_audio_put(audio);
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
 
 	return rc;
@@ -3593,6 +3607,10 @@ static int dp_display_probe(struct platform_device *pdev)
 
 	g_dp_display = &dp->dp_display;
 
+	g_dp_display->dp_ipc_log = ipc_log_context_create(DRM_DP_IPC_NUM_PAGES, "drm_dp", 0);
+	if (!g_dp_display->dp_ipc_log)
+		DP_WARN("Error in creating ipc_log_context\n");
+
 	g_dp_display->enable        = dp_display_enable;
 	g_dp_display->post_enable   = dp_display_post_enable;
 	g_dp_display->pre_disable   = dp_display_pre_disable;
@@ -3704,6 +3722,11 @@ static int dp_display_remove(struct platform_device *pdev)
 	platform_set_drvdata(pdev, NULL);
 	devm_kfree(&pdev->dev, dp);
 
+	if (g_dp_display->dp_ipc_log) {
+		ipc_log_context_destroy(g_dp_display->dp_ipc_log);
+		g_dp_display->dp_ipc_log = NULL;
+	}
+
 	return 0;
 }
 
@@ -3765,6 +3788,13 @@ static void dp_pm_complete(struct device *dev)
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
 }
 
+void *get_ipc_log_context(void)
+{
+	if (g_dp_display && g_dp_display->dp_ipc_log)
+		return g_dp_display->dp_ipc_log;
+	return NULL;
+}
+
 static const struct dev_pm_ops dp_pm_ops = {
 	.prepare = dp_pm_prepare,
 	.complete = dp_pm_complete,

+ 4 - 0
msm/dp/dp_display.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -49,6 +50,7 @@ struct dp_display {
 	void *dp_mst_prv_info;
 	u32 max_mixer_count;
 	u32 max_dsc_count;
+	void *dp_ipc_log;
 
 	int (*enable)(struct dp_display *dp_display, void *panel);
 	int (*post_enable)(struct dp_display *dp_display, void *panel);
@@ -105,6 +107,8 @@ struct dp_display {
 			struct msm_resource_caps_info *max_dp_avail_res);
 };
 
+void *get_ipc_log_context(void);
+
 #if IS_ENABLED(CONFIG_DRM_MSM_DP)
 int dp_display_get_num_of_displays(void);
 int dp_display_get_displays(void **displays, int count);

+ 6 - 0
msm/dp/dp_drm.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -419,6 +420,11 @@ int dp_connector_get_mode_info(struct drm_connector *connector,
 		DP_ERR("error getting mixer count. rc:%d\n", rc);
 		return rc;
 	}
+	/* reset dp connector lm_mask for every connection event and
+	 * this will get re-populated in resource manager based on
+	 * resolution and topology of dp display.
+	 */
+	sde_conn->lm_mask = 0;
 
 	topology->num_enc = no_enc;
 	topology->num_intf = single_intf;

+ 10 - 8
msm/dp/dp_gpio_hpd.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
@@ -218,6 +219,7 @@ struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
 	const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
 	struct dp_gpio_hpd_private *gpio_hpd;
 	struct dp_pinctrl pinctrl = {0};
+	unsigned int gpio;
 
 	if (!dev || !cb) {
 		DP_ERR("invalid device\n");
@@ -225,6 +227,13 @@ struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
 		goto error;
 	}
 
+	gpio = of_get_named_gpio(dev->of_node, hpd_gpio_name, 0);
+	if (!gpio_is_valid(gpio)) {
+		DP_DEBUG("%s gpio not specified\n", hpd_gpio_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
 	gpio_hpd = devm_kzalloc(dev, sizeof(*gpio_hpd), GFP_KERNEL);
 	if (!gpio_hpd) {
 		rc = -ENOMEM;
@@ -245,14 +254,7 @@ struct dp_hpd *dp_gpio_hpd_get(struct device *dev,
 		}
 	}
 
-	gpio_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
-		hpd_gpio_name, 0);
-	if (!gpio_is_valid(gpio_hpd->gpio_cfg.gpio)) {
-		DP_ERR("%s gpio not specified\n", hpd_gpio_name);
-		rc = -EINVAL;
-		goto gpio_error;
-	}
-
+	gpio_hpd->gpio_cfg.gpio = gpio;
 	strlcpy(gpio_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
 		sizeof(gpio_hpd->gpio_cfg.gpio_name));
 	gpio_hpd->gpio_cfg.value = 0;

+ 29 - 30
msm/dp/dp_hpd.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -45,46 +46,43 @@ struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
 		struct dp_aux_bridge *aux_bridge,
 		struct dp_hpd_cb *cb)
 {
-	struct dp_hpd *dp_hpd;
+	struct dp_hpd *dp_hpd = NULL;
 
 	if (aux_bridge && (aux_bridge->flag & DP_AUX_BRIDGE_HPD)) {
 		dp_hpd = dp_bridge_hpd_get(dev, cb, aux_bridge);
-		if (IS_ERR(dp_hpd)) {
-			pr_err("failed to get bridge hpd\n");
-			return dp_hpd;
-		}
-		dp_hpd->type = DP_HPD_BRIDGE;
-	} else if (parser->no_aux_switch && parser->lphw_hpd) {
-		dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
-		if (IS_ERR_OR_NULL(dp_hpd)) {
-			DP_ERR("failed to get lphw hpd\n");
-			return dp_hpd;
+		if (!IS_ERR(dp_hpd)) {
+			dp_hpd->type = DP_HPD_BRIDGE;
+			goto config;
 		}
+	}
+
+	dp_hpd = dp_lphw_hpd_get(dev, parser, catalog, cb);
+	if (!IS_ERR_OR_NULL(dp_hpd)) {
 		dp_hpd->type = DP_HPD_LPHW;
-	} else if (parser->no_aux_switch) {
-		dp_hpd = dp_gpio_hpd_get(dev, cb);
-		if (IS_ERR_OR_NULL(dp_hpd)) {
-			DP_ERR("failed to get gpio hpd\n");
-			return dp_hpd;
-		}
+		goto config;
+	}
+
+	dp_hpd = dp_gpio_hpd_get(dev, cb);
+	if (!IS_ERR_OR_NULL(dp_hpd)) {
 		dp_hpd->type = DP_HPD_GPIO;
-	} else {
-		dp_hpd = dp_altmode_get(dev, cb);
-		if (!IS_ERR_OR_NULL(dp_hpd)) {
-			dp_hpd->type = DP_HPD_ALTMODE;
-			goto config;
-		}
-		DP_WARN("dp_altmode failed (%ld), falling back to dp_usbpd\n",
-				PTR_ERR(dp_hpd));
+		goto config;
+	}
 
-		dp_hpd = dp_usbpd_get(dev, cb);
-		if (IS_ERR_OR_NULL(dp_hpd)) {
-			DP_ERR("failed to get usbpd\n");
-			return dp_hpd;
-		}
+	dp_hpd = dp_altmode_get(dev, cb);
+	if (!IS_ERR_OR_NULL(dp_hpd)) {
+		dp_hpd->type = DP_HPD_ALTMODE;
+		goto config;
+	}
+
+	dp_hpd = dp_usbpd_get(dev, cb);
+	if (!IS_ERR_OR_NULL(dp_hpd)) {
 		dp_hpd->type = DP_HPD_USBPD;
+		goto config;
 	}
 
+	DP_ERR("Failed to detect HPD type\n");
+	goto end;
+
 config:
 	if (!dp_hpd->host_init)
 		dp_hpd->host_init	= dp_hpd_host_init;
@@ -93,6 +91,7 @@ config:
 	if (!dp_hpd->isr)
 		dp_hpd->isr		= dp_hpd_isr;
 
+end:
 	return dp_hpd;
 }
 

+ 12 - 9
msm/dp/dp_lphw_hpd.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
@@ -320,6 +321,7 @@ static void dp_lphw_hpd_init(struct dp_lphw_hpd_private *lphw_hpd)
 			if (rc)
 				DP_ERR("failed to set hpd_active state\n");
 		}
+		pinctrl.state_hpd_tlmm = pinctrl.state_hpd_ctrl = NULL;
 	}
 }
 
@@ -343,7 +345,8 @@ struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
 {
 	int rc = 0;
 	const char *hpd_gpio_name = "qcom,dp-hpd-gpio";
-	struct dp_lphw_hpd_private *lphw_hpd;
+	struct dp_lphw_hpd_private *lphw_hpd = NULL;
+	unsigned int gpio;
 
 	if (!dev || !parser || !cb) {
 		DP_ERR("invalid device\n");
@@ -351,20 +354,20 @@ struct dp_hpd *dp_lphw_hpd_get(struct device *dev, struct dp_parser *parser,
 		goto error;
 	}
 
+	gpio = of_get_named_gpio(dev->of_node, hpd_gpio_name, 0);
+	if (!gpio_is_valid(gpio)) {
+		DP_DEBUG("%s gpio not specified\n", hpd_gpio_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
 	lphw_hpd = devm_kzalloc(dev, sizeof(*lphw_hpd), GFP_KERNEL);
 	if (!lphw_hpd) {
 		rc = -ENOMEM;
 		goto error;
 	}
 
-	lphw_hpd->gpio_cfg.gpio = of_get_named_gpio(dev->of_node,
-		hpd_gpio_name, 0);
-	if (!gpio_is_valid(lphw_hpd->gpio_cfg.gpio)) {
-		DP_ERR("%s gpio not specified\n", hpd_gpio_name);
-		rc = -EINVAL;
-		goto gpio_error;
-	}
-
+	lphw_hpd->gpio_cfg.gpio = gpio;
 	strlcpy(lphw_hpd->gpio_cfg.gpio_name, hpd_gpio_name,
 		sizeof(lphw_hpd->gpio_cfg.gpio_name));
 	lphw_hpd->gpio_cfg.value = 0;

+ 36 - 32
msm/dp/dp_mst_drm.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -43,9 +44,12 @@
 #include "sde_connector.h"
 #include "dp_drm.h"
 #include "dp_debug.h"
+#include "dp_parser.h"
 
 #define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
-#define DP_MST_INFO(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO(fmt, ...) DP_INFO(fmt, ##__VA_ARGS__)
+#define DP_MST_DEBUG_V(fmt, ...) DP_DEBUG_V(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_V(fmt, ...) DP_INFO_V(fmt, ##__VA_ARGS__)
 
 #define MAX_DP_MST_DRM_ENCODERS		2
 #define MAX_DP_MST_DRM_BRIDGES		2
@@ -248,7 +252,7 @@ static int dp_mst_calc_pbn_mode(struct dp_display_mode *dp_mode)
 	pbn = drm_dp_calc_pbn_mode(dp_mode->timing.pixel_clk_khz, bpp, false);
 	pbn_fp = drm_fixp_from_fraction(pbn, 1);
 
-	DP_DEBUG("before overhead pbn:%d, bpp:%d\n", pbn, bpp);
+	DP_DEBUG_V("before overhead pbn:%d, bpp:%d\n", pbn, bpp);
 
 	if (dsc_en)
 		pbn_fp = drm_fixp_mul(pbn_fp, dp_mode->dsc_overhead_fp);
@@ -258,7 +262,7 @@ static int dp_mst_calc_pbn_mode(struct dp_display_mode *dp_mode)
 
 	pbn = drm_fixp2int(pbn_fp);
 
-	DP_DEBUG("after overhead pbn:%d, bpp:%d\n", pbn, bpp);
+	DP_DEBUG_V("after overhead pbn:%d, bpp:%d\n", pbn, bpp);
 	return pbn;
 }
 
@@ -286,7 +290,7 @@ static int dp_mst_bridge_attach(struct drm_bridge *dp_bridge,
 {
 	struct dp_mst_bridge *bridge;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	if (!dp_bridge) {
@@ -312,7 +316,7 @@ static bool dp_mst_bridge_mode_fixup(struct drm_bridge *drm_bridge,
 	struct drm_crtc_state *crtc_state;
 	struct dp_mst_bridge_state *bridge_state;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 
 	if (!drm_bridge || !mode || !adjusted_mode) {
 		DP_ERR("Invalid params\n");
@@ -355,7 +359,7 @@ static int _dp_mst_compute_config(struct drm_atomic_state *state,
 	int slots = 0, pbn;
 	struct sde_connector *c_conn = to_sde_connector(connector);
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id);
 
 	pbn = mst->mst_fw_cbs->calc_pbn_mode(mode);
@@ -443,7 +447,7 @@ static void _dp_mst_bridge_pre_enable_part1(struct dp_mst_bridge *dp_bridge)
 	bool ret;
 	int pbn, slots;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge));
 
 	/* skip mst specific disable operations during suspend */
@@ -483,7 +487,7 @@ static void _dp_mst_bridge_pre_enable_part2(struct dp_mst_bridge *dp_bridge)
 	struct dp_display *dp_display = dp_bridge->display;
 	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge));
 
 	/* skip mst specific disable operations during suspend */
@@ -506,7 +510,7 @@ static void _dp_mst_bridge_pre_disable_part1(struct dp_mst_bridge *dp_bridge)
 	struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
 	struct drm_dp_mst_port *port = c_conn->mst_port;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(dp_bridge));
 
 	/* skip mst specific disable operations during suspend */
@@ -533,7 +537,7 @@ static void _dp_mst_bridge_pre_disable_part2(struct dp_mst_bridge *dp_bridge)
 		to_sde_connector(dp_bridge->connector);
 	struct drm_dp_mst_port *port = c_conn->mst_port;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY,  DP_MST_CONN_ID(dp_bridge));
 
 	/* skip mst specific disable operations during suspend */
@@ -570,7 +574,7 @@ static void dp_mst_bridge_pre_enable(struct drm_bridge *drm_bridge)
 		return;
 	}
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 
 	bridge = to_dp_mst_bridge(drm_bridge);
 	dp = bridge->display;
@@ -640,7 +644,7 @@ static void dp_mst_bridge_enable(struct drm_bridge *drm_bridge)
 		return;
 	}
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge));
 
 	dp = bridge->display;
@@ -669,7 +673,7 @@ static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
 		return;
 	}
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 
 	bridge = to_dp_mst_bridge(drm_bridge);
 	if (!bridge->connector) {
@@ -719,7 +723,7 @@ static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge)
 		return;
 	}
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, DP_MST_CONN_ID(bridge));
 
 	dp = bridge->display;
@@ -751,7 +755,7 @@ static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
 	struct dp_mst_bridge_state *dp_bridge_state;
 	struct dp_display *dp;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 
 	if (!drm_bridge || !mode || !adjusted_mode) {
 		DP_ERR("Invalid params\n");
@@ -902,7 +906,7 @@ dp_mst_connector_detect(struct drm_connector *connector,
 	struct dp_panel *dp_panel;
 	enum drm_connector_status status;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	dp_panel = c_conn->drv_panel;
@@ -925,7 +929,7 @@ void dp_mst_clear_edid_cache(void *dp_display) {
 	struct drm_connector *conn;
 	struct sde_connector *c_conn;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	if (!dp) {
@@ -947,7 +951,7 @@ void dp_mst_clear_edid_cache(void *dp_display) {
 
 	drm_connector_list_iter_end(&conn_iter);
 
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_DEBUG_V("exit:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT);
 }
 
@@ -960,7 +964,7 @@ static int dp_mst_connector_get_modes(struct drm_connector *connector,
 	int rc = 0;
 	struct edid *edid = NULL;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id);
 
 	mutex_lock(&mst->edid_lock);
@@ -999,7 +1003,7 @@ duplicate_edid:
 			connector, edid);
 
 end:
-	DP_MST_DEBUG("exit: id: %d rc: %d\n", connector->base.id, rc);
+	DP_MST_DEBUG_V("exit: id: %d rc: %d\n", connector->base.id, rc);
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, rc);
 
 	return rc;
@@ -1087,16 +1091,16 @@ int dp_mst_connector_get_mode_info(struct drm_connector *connector,
 {
 	int rc;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id);
 
 	rc = dp_connector_get_mode_info(connector, drm_mode, NULL, mode_info,
 			display, avail_res);
 
-	DP_MST_DEBUG("mst connector:%d get mode info. rc:%d\n",
+	DP_MST_DEBUG_V("mst connector:%d get mode info. rc:%d\n",
 			connector->base.id, rc);
 
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_DEBUG_V("exit:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id);
 
 	return rc;
@@ -1171,7 +1175,7 @@ static int dp_mst_connector_atomic_check(struct drm_connector *connector,
 	struct sde_connector *c_conn = to_sde_connector(connector);
 	struct dp_display_mode dp_mode;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id);
 
 	if (!state)
@@ -1306,7 +1310,7 @@ static int dp_mst_connector_config_hdr(struct drm_connector *connector,
 {
 	int rc;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, connector->base.id);
 
 	rc = dp_connector_config_hdr(connector, display, c_state);
@@ -1314,7 +1318,7 @@ static int dp_mst_connector_config_hdr(struct drm_connector *connector,
 	DP_MST_DEBUG("mst connector:%d cfg hdr. rc:%d\n",
 			connector->base.id, rc);
 
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_DEBUG_V("exit:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, connector->base.id, rc);
 
 	return rc;
@@ -1327,7 +1331,7 @@ static void dp_mst_connector_pre_destroy(struct drm_connector *connector,
 	struct sde_connector *c_conn = to_sde_connector(connector);
 	u32 conn_id = connector->base.id;
 
-	DP_MST_DEBUG("enter:\n");
+	DP_MST_DEBUG_V("enter:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, conn_id);
 
 	kfree(c_conn->cached_edid);
@@ -1336,7 +1340,7 @@ static void dp_mst_connector_pre_destroy(struct drm_connector *connector,
 	drm_dp_mst_put_port_malloc(c_conn->mst_port);
 
 	dp_display->mst_connector_uninstall(dp_display, connector);
-	DP_MST_DEBUG("exit:\n");
+	DP_MST_DEBUG_V("exit:\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, conn_id);
 }
 
@@ -1382,7 +1386,7 @@ dp_mst_add_connector(struct drm_dp_mst_topology_mgr *mgr,
 	struct sde_connector *c_conn;
 	int rc, i;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
@@ -1587,7 +1591,7 @@ dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
 	struct drm_connector *connector;
 	int i, enc_idx;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
@@ -1652,7 +1656,7 @@ dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
 	struct drm_connector *connector;
 	int rc;
 
-	DP_MST_DEBUG("enter\n");
+	DP_MST_DEBUG_V("enter\n");
 	SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
 
 	dev = dp_display->drm_dev;
@@ -1875,7 +1879,7 @@ int dp_mst_init(struct dp_display *dp_display)
 					dp_mst.caps.drm_aux,
 					dp_mst.caps.max_dpcd_transaction_bytes,
 					dp_mst.caps.max_streams_supported,
-					4, DP_LINK_BW_8_1, conn_base_id);
+					4, DP_MAX_LINK_CLK_KHZ, conn_base_id);
 #else
 	ret = drm_dp_mst_topology_mgr_init(&dp_mst.mst_mgr, dev,
 					dp_mst.caps.drm_aux,

+ 1 - 62
msm/dp/dp_mst_sim.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -86,35 +87,6 @@ static const struct dp_mst_sim_port output_port = {
 	0, 0, 2520, 2520, NULL, 0
 };
 
-#ifdef CONFIG_DYNAMIC_DEBUG
-static void dp_sim_aux_hex_dump(struct drm_dp_aux_msg *msg)
-{
-	char prefix[64];
-	int i, linelen, remaining = msg->size;
-	const int rowsize = 16;
-	u8 linebuf[64];
-
-	snprintf(prefix, sizeof(prefix), "%s %s %4xh(%2zu): ",
-		(msg->request & DP_AUX_I2C_MOT) ? "I2C" : "NAT",
-		(msg->request & DP_AUX_I2C_READ) ? "RD" : "WR",
-		msg->address, msg->size);
-
-	for (i = 0; i < msg->size; i += rowsize) {
-		linelen = min(remaining, rowsize);
-		remaining -= rowsize;
-
-		hex_dump_to_buffer(msg->buffer + i, linelen, rowsize, 1,
-			linebuf, sizeof(linebuf), false);
-
-		DP_DEBUG("%s%s\n", prefix, linebuf);
-	}
-}
-#else
-static void dp_sim_aux_hex_dump(struct drm_dp_aux_msg *msg)
-{
-}
-#endif
-
 static int dp_sim_register_hpd(struct dp_aux_bridge *bridge,
 	int (*hpd_cb)(void *, bool, bool), void *dev)
 {
@@ -333,8 +305,6 @@ static ssize_t dp_sim_transfer(struct dp_aux_bridge *bridge,
 		ret = drm_aux->transfer(drm_aux, msg);
 
 end:
-	dp_sim_aux_hex_dump(msg);
-
 	mutex_unlock(&sim_dev->lock);
 
 	return ret;
@@ -1683,34 +1653,3 @@ int dp_sim_remove(struct platform_device *pdev)
 
 	return 0;
 }
-
-#if 0
-static const struct of_device_id dt_match[] = {
-	{ .compatible = "qcom,dp-mst-sim"},
-	{},
-};
-
-static struct platform_driver dp_sim_driver = {
-	.probe = dp_sim_probe,
-	.remove = dp_sim_remove,
-	.driver = {
-		.name = "dp_sim",
-		.of_match_table = dt_match,
-		.suppress_bind_attrs = true,
-	},
-};
-
-static int __init dp_sim_register(void)
-{
-	return platform_driver_register(&dp_sim_driver);
-}
-
-static void __exit dp_sim_unregister(void)
-{
-	platform_driver_unregister(&dp_sim_driver);
-}
-
-module_init(dp_sim_register);
-module_exit(dp_sim_unregister);
-
-#endif

+ 7 - 4
msm/dp/dp_mst_sim_helper.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -44,7 +45,9 @@
 #include "dp_debug.h"
 
 #define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
-#define DP_MST_INFO(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO(fmt, ...) DP_INFO(fmt, ##__VA_ARGS__)
+#define DP_MST_DEBUG_V(fmt, ...) DP_DEBUG_V(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_V(fmt, ...) DP_INFO_V(fmt, ##__VA_ARGS__)
 
 #define DDC_SEGMENT_ADDR 0x30
 
@@ -84,7 +87,7 @@ struct dp_mst_notify_work {
 	u32 port_mask;
 };
 
-#ifdef CONFIG_DYNAMIC_DEBUG
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
 static void dp_sideband_hex_dump(const char *name,
 		u32 address, u8 *buffer, size_t size)
 {
@@ -103,7 +106,7 @@ static void dp_sideband_hex_dump(const char *name,
 		hex_dump_to_buffer(buffer + i, linelen, rowsize, 1,
 			linebuf, sizeof(linebuf), false);
 
-		DP_MST_DEBUG("%s%s\n", prefix, linebuf);
+		DP_MST_DEBUG_V("%s%s\n", prefix, linebuf);
 	}
 }
 #else
@@ -111,7 +114,7 @@ static void dp_sideband_hex_dump(const char *name,
 		u32 address, u8 *buffer, size_t size)
 {
 }
-#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
 
 static u8 dp_mst_sim_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
 {

+ 19 - 17
msm/dp/dp_panel.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -435,7 +436,7 @@ static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
 	tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
 
 	if (dsc_num_bytes == 0)
-		DP_DEBUG("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
+		DP_WARN("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
 
 	dwidth_dsc_bytes = (tot_num_hor_bytes +
 				tot_num_eoc_symbols +
@@ -1112,7 +1113,7 @@ static void dp_panel_calc_tu_parameters(struct dp_panel *dp_panel,
 	in.nlanes = panel->link->link_params.lane_count;
 	in.bpp = pinfo->bpp;
 	in.pixel_enc = 444;
-	in.dsc_en = dp_panel->dsc_en;
+	in.dsc_en = pinfo->comp_info.enabled;
 	in.async_en = 0;
 	in.fec_en = dp_panel->fec_en;
 	in.num_of_dsc_slices = pinfo->comp_info.dsc_info.slice_per_pkt;
@@ -1906,7 +1907,7 @@ end:
 }
 
 static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
-		u32 mode_edid_bpp, u32 mode_pclk_khz)
+		u32 mode_edid_bpp, u32 mode_pclk_khz, bool dsc_en)
 {
 	struct dp_link_params *link_params;
 	struct dp_panel_private *panel;
@@ -1917,7 +1918,7 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	if (dp_panel->dsc_en)
+	if (dsc_en)
 		min_supported_bpp = 24;
 
 	bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
@@ -1933,7 +1934,7 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 	link_bitrate = drm_fixp2int(rate_fp);
 
 	for (; bpp > min_supported_bpp; bpp -= 6) {
-		if (dp_panel->dsc_en) {
+		if (dsc_en) {
 			if (bpp == 30 && !(dp_panel->sink_dsc_caps.color_depth & DP_DSC_10_BPC))
 				continue;
 			else if (bpp == 24 && !(dp_panel->sink_dsc_caps.color_depth & DP_DSC_8_BPC))
@@ -1951,14 +1952,14 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
 	if (bpp < min_supported_bpp)
 		DP_ERR("bpp %d is below minimum supported bpp %d\n", bpp,
 				min_supported_bpp);
-	if (dp_panel->dsc_en && bpp != 24 && bpp != 30 && bpp != 36)
+	if (dsc_en && bpp != 24 && bpp != 30 && bpp != 36)
 		DP_ERR("bpp %d is not supported when dsc is enabled\n", bpp);
 
 	return bpp;
 }
 
 static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
-		u32 mode_edid_bpp, u32 mode_pclk_khz)
+		u32 mode_edid_bpp, u32 mode_pclk_khz, bool dsc_en)
 {
 	struct dp_panel_private *panel;
 	u32 bpp = mode_edid_bpp;
@@ -1975,7 +1976,7 @@ static u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
 				panel->link->test_video.test_bit_depth);
 	else
 		bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
-				mode_pclk_khz);
+				mode_pclk_khz, dsc_en);
 
 	return bpp;
 }
@@ -2074,7 +2075,7 @@ static void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
 	}
 }
 
-static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+static void dp_panel_tpg_config(struct dp_panel *dp_panel, u32 pattern)
 {
 	u32 hsync_start_x, hsync_end_x, hactive;
 	struct dp_catalog_panel *catalog;
@@ -2100,8 +2101,8 @@ static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
 		return;
 	}
 
-	if (!enable) {
-		panel->catalog->tpg_config(catalog, false);
+	if (!pattern) {
+		panel->catalog->tpg_config(catalog, pattern);
 		return;
 	}
 
@@ -2132,7 +2133,7 @@ static void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
 			pinfo->h_sync_width;
 	catalog->display_hctl = (hsync_end_x << 16) | hsync_start_x;
 
-	panel->catalog->tpg_config(catalog, true);
+	panel->catalog->tpg_config(catalog, pattern);
 }
 
 static int dp_panel_config_timing(struct dp_panel *dp_panel)
@@ -2729,9 +2730,11 @@ static void dp_panel_config_ctrl(struct dp_panel *dp_panel)
 	u8 *dpcd = dp_panel->dpcd;
 	struct dp_panel_private *panel;
 	struct dp_catalog_panel *catalog;
+	struct msm_compression_info *comp_info;
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 	catalog = panel->catalog;
+	comp_info = &dp_panel->pinfo.comp_info;
 
 	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
 	config |= (0 << 11); /* RGB */
@@ -2739,7 +2742,7 @@ static void dp_panel_config_ctrl(struct dp_panel *dp_panel)
 	tbd = panel->link->get_test_bits_depth(panel->link,
 			dp_panel->pinfo.bpp);
 
-	if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN || dp_panel->dsc_en)
+	if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN || comp_info->enabled)
 		tbd = (DP_TEST_BIT_DEPTH_8 >> DP_TEST_BIT_DEPTH_SHIFT);
 
 	config |= tbd << 8;
@@ -2949,8 +2952,7 @@ static void dp_panel_convert_to_dp_mode(struct dp_panel *dp_panel,
 {
 	const u32 num_components = 3, default_bpp = 24;
 	struct msm_compression_info *comp_info;
-	bool dsc_cap = (dp_mode->capabilities & DP_PANEL_CAPS_DSC) ?
-				true : false;
+	bool dsc_en = (dp_mode->capabilities & DP_PANEL_CAPS_DSC) ? true : false;
 	int rc;
 
 	dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -3009,9 +3011,9 @@ static void dp_panel_convert_to_dp_mode(struct dp_panel *dp_panel,
 	}
 
 	dp_mode->timing.bpp = dp_panel_get_mode_bpp(dp_panel,
-			dp_mode->timing.bpp, dp_mode->timing.pixel_clk_khz);
+			dp_mode->timing.bpp, dp_mode->timing.pixel_clk_khz, dsc_en);
 
-	if (dp_panel->dsc_en && dsc_cap) {
+	if (dsc_en) {
 		if (dp_panel_dsc_prepare_basic_params(comp_info,
 					dp_mode, dp_panel)) {
 			DP_DEBUG("prepare DSC basic params failed\n");

+ 3 - 2
msm/dp/dp_panel.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -167,7 +168,7 @@ struct dp_panel {
 	int (*read_sink_caps)(struct dp_panel *dp_panel,
 		struct drm_connector *connector, bool multi_func);
 	u32 (*get_mode_bpp)(struct dp_panel *dp_panel, u32 mode_max_bpp,
-			u32 mode_pclk_khz);
+			u32 mode_pclk_khz, bool dsc_en);
 	int (*get_modes)(struct dp_panel *dp_panel,
 		struct drm_connector *connector, struct dp_display_mode *mode);
 	void (*handle_sink_request)(struct dp_panel *dp_panel);
@@ -176,7 +177,7 @@ struct dp_panel {
 			bool dhdr_update, u64 core_clk_rate, bool flush);
 	int (*set_colorspace)(struct dp_panel *dp_panel,
 		u32 colorspace);
-	void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
+	void (*tpg_config)(struct dp_panel *dp_panel, u32 pattern);
 	int (*spd_config)(struct dp_panel *dp_panel);
 	bool (*hdr_supported)(struct dp_panel *dp_panel);
 

+ 1 - 24
msm/dp/dp_parser.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -205,23 +206,6 @@ static int dp_parser_pinctrl(struct dp_parser *parser)
 		goto error;
 	}
 
-	if (parser->no_aux_switch && parser->lphw_hpd) {
-		pinctrl->state_hpd_tlmm = pinctrl->state_hpd_ctrl = NULL;
-
-		pinctrl->state_hpd_tlmm = pinctrl_lookup_state(pinctrl->pin,
-					"mdss_dp_hpd_tlmm");
-		if (!IS_ERR_OR_NULL(pinctrl->state_hpd_tlmm)) {
-			pinctrl->state_hpd_ctrl = pinctrl_lookup_state(
-				pinctrl->pin, "mdss_dp_hpd_ctrl");
-		}
-
-		if (!pinctrl->state_hpd_tlmm || !pinctrl->state_hpd_ctrl) {
-			pinctrl->state_hpd_tlmm = NULL;
-			pinctrl->state_hpd_ctrl = NULL;
-			DP_DEBUG("tlmm or ctrl pinctrl state does not exist\n");
-		}
-	}
-
 	pinctrl->state_active = pinctrl_lookup_state(pinctrl->pin,
 					"mdss_dp_active");
 	if (IS_ERR_OR_NULL(pinctrl->state_active)) {
@@ -253,13 +237,6 @@ static int dp_parser_gpio(struct dp_parser *parser)
 		"qcom,usbplug-cc-gpio",
 	};
 
-	if (of_find_property(of_node, "qcom,dp-hpd-gpio", NULL)) {
-		parser->no_aux_switch = true;
-		parser->lphw_hpd = of_find_property(of_node,
-				"qcom,dp-low-power-hw-hpd", NULL);
-		return 0;
-	}
-
 	if (of_find_property(of_node, "qcom,dp-gpio-aux-switch", NULL))
 		parser->gpio_aux_switch = true;
 	mp->gpio_config = devm_kzalloc(dev,

+ 3 - 3
msm/dp/dp_parser.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -131,12 +132,14 @@ enum dp_phy_aux_config_type {
  * enum dp_phy_version - version of the dp phy
  * @DP_PHY_VERSION_UNKNOWN: Unknown controller version
  * @DP_PHY_VERSION_4_2_0:   DP phy v4.2.0 controller
+ * @DP_PHY_VERSION_6_0_0:   DP phy v6.0.0 controller
  * @DP_PHY_VERSION_MAX:     max version
  */
 enum dp_phy_version {
 	DP_PHY_VERSION_UNKNOWN,
 	DP_PHY_VERSION_2_0_0 = 0x200,
 	DP_PHY_VERSION_4_2_0 = 0x420,
+	DP_PHY_VERSION_6_0_0 = 0x600,
 	DP_PHY_VERSION_MAX
 };
 
@@ -191,7 +194,6 @@ static inline char *dp_phy_aux_config_type_to_string(u32 cfg_type)
  * @hw_cfg: DP HW specific settings
  * @has_mst: MST feature enable status
  * @has_mst_sideband: MST sideband feature enable status
- * @no_aux_switch: presence AUX switch status
  * @gpio_aux_switch: presence GPIO AUX switch status
  * @dsc_feature_enable: DSC feature enable status
  * @fec_feature_enable: FEC feature enable status
@@ -221,13 +223,11 @@ struct dp_parser {
 	struct dp_hw_cfg hw_cfg;
 	bool has_mst;
 	bool has_mst_sideband;
-	bool no_aux_switch;
 	bool dsc_feature_enable;
 	bool fec_feature_enable;
 	bool dsc_continuous_pps;
 	bool has_widebus;
 	bool gpio_aux_switch;
-	bool lphw_hpd;
 	u32 mst_fixed_port[MAX_DP_MST_STREAMS];
 	u32 qos_cpu_mask;
 	unsigned long qos_cpu_latency;

+ 5 - 0
msm/dp/dp_pll.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/err.h>
@@ -55,6 +56,7 @@ static int dp_pll_clock_register(struct dp_pll *pll)
 		rc = dp_pll_clock_register_5nm(pll);
 		break;
 	case DP_PLL_4NM_V1:
+	case DP_PLL_4NM_V1_1:
 		rc = dp_pll_clock_register_4nm(pll);
 		break;
 	default:
@@ -73,6 +75,7 @@ static void dp_pll_clock_unregister(struct dp_pll *pll)
 		dp_pll_clock_unregister_5nm(pll);
 		break;
 	case DP_PLL_4NM_V1:
+	case DP_PLL_4NM_V1_1:
 		dp_pll_clock_unregister_4nm(pll);
 		break;
 	default:
@@ -139,6 +142,8 @@ struct dp_pll *dp_pll_get(struct dp_pll_in *in)
 			pll->revision = DP_PLL_5NM_V2;
 		} else if (!strcmp(label, "4nm-v1")) {
 			pll->revision = DP_PLL_4NM_V1;
+		} else if (!strcmp(label, "4nm-v1.1")) {
+			pll->revision = DP_PLL_4NM_V1_1;
 		} else {
 			DP_ERR("Unsupported pll revision\n");
 			rc = -ENOTSUPP;

+ 33 - 12
msm/dp/dp_pll.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -16,6 +17,7 @@
 #define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
 #define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
 #define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
+#define DP_PHY_VCO_DIV				0x0070
 
 #define dp_pll_get_base(x) pll->io.x->io.base
 
@@ -35,6 +37,15 @@ enum dp_pll_revision {
 	DP_PLL_5NM_V1,
 	DP_PLL_5NM_V2,
 	DP_PLL_4NM_V1,
+	DP_PLL_4NM_V1_1,
+};
+
+enum hsclk_rate {
+	HSCLK_RATE_1620MHZ,
+	HSCLK_RATE_2700MHZ,
+	HSCLK_RATE_5400MHZ,
+	HSCLK_RATE_8100MHZ,
+	HSCLK_RATE_MAX,
 };
 
 static inline const char *dp_pll_get_revision(enum dp_pll_revision rev)
@@ -44,6 +55,7 @@ static inline const char *dp_pll_get_revision(enum dp_pll_revision rev)
 	case DP_PLL_5NM_V1:	return "DP_PLL_5NM_V1";
 	case DP_PLL_5NM_V2:	return "DP_PLL_5NM_V2";
 	case DP_PLL_4NM_V1:	return "DP_PLL_4NM_V1";
+	case DP_PLL_4NM_V1_1:	return "DP_PLL_4NM_V1_1";
 	default:		return "???";
 	}
 }
@@ -89,31 +101,40 @@ struct dp_pll {
 	int (*pll_unprepare)(struct dp_pll *pll);
 };
 
-struct dp_pll_db {
-	struct dp_pll *pll;
-
-	/* lane and orientation settings */
-	u8 lane_cnt;
-	u8 orientation;
-
+struct dp_pll_params {
 	/* COM PHY settings */
 	u32 hsclk_sel;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 lock_cmp_en;
+	/* PHY vco divider */
+	u32 phy_vco_div;
 	u32 dec_start_mode0;
 	u32 div_frac_start1_mode0;
 	u32 div_frac_start2_mode0;
 	u32 div_frac_start3_mode0;
-	u32 integloop_gain0_mode0;
-	u32 integloop_gain1_mode0;
 	u32 lock_cmp1_mode0;
 	u32 lock_cmp2_mode0;
-	u32 lock_cmp_en;
 	u32 ssc_step_size1_mode0;
 	u32 ssc_step_size2_mode0;
 	u32 ssc_per1;
+	u32 ssc_per2;
 	u32 cmp_code1_mode0;
 	u32 cmp_code2_mode0;
-	/* PHY vco divider */
-	u32 phy_vco_div;
+	u32 pll_ivco;
+	u32 bg_timer;
+	u32 core_clk_en;
+	u32 lane_offset_tx;
+	u32 lane_offset_rx;
+};
+
+struct dp_pll_db {
+	struct dp_pll *pll;
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+	u32 rate_idx;
+	const struct dp_pll_params *pll_params;
 };
 
 static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)

+ 98 - 122
msm/dp/dp_pll_4nm.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 /*
@@ -36,7 +36,6 @@
 #define DP_PHY_AUX_CFG1				0x0024
 #define DP_PHY_AUX_CFG2				0x0028
 
-#define DP_PHY_VCO_DIV				0x0070
 #define DP_PHY_TX0_TX1_LANE_CTL			0x0078
 #define DP_PHY_TX2_TX3_LANE_CTL			0x009C
 
@@ -129,14 +128,35 @@
 #define DP_4NM_PHY_READY	BIT(1)
 #define DP_4NM_TSYNC_DONE	BIT(0)
 
-static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
+static const struct dp_pll_params pll_params_v1[HSCLK_RATE_MAX] = {
+	{0x05, 0x3f, 0x00, 0x04, 0x01, 0x69, 0x00, 0x80, 0x07, 0x6f, 0x08, 0x45, 0x06, 0x36, 0x01,
+		0xe2, 0x18, 0x0f, 0x0e, 0x1f, 0x0a, 0x11},
+	{0x03, 0x3f, 0x00, 0x08, 0x01, 0x69, 0x00, 0x80, 0x07, 0x0f, 0x0e, 0x13, 0x06, 0x40, 0x01,
+		0xe2, 0x18, 0x0f, 0x0e, 0x1f, 0x0a, 0x11},
+	{0x01, 0x3f, 0x00, 0x08, 0x02, 0x8c, 0x00, 0x00, 0x0a, 0x1f, 0x1c, 0x1a, 0x08, 0x40, 0x01,
+		0x2e, 0x21, 0x0f, 0x0e, 0x1f, 0x0a, 0x11},
+	{0x00, 0x3f, 0x00, 0x08, 0x00, 0x69, 0x00, 0x80, 0x07, 0x2f, 0x2a, 0x13, 0x06, 0x40, 0x01,
+		0xe2, 0x18, 0x0f, 0x0e, 0x1f, 0x0a, 0x11},
+};
+
+static const struct dp_pll_params pll_params_v1_1[HSCLK_RATE_MAX] = {
+	{0x05, 0x3f, 0x00, 0x04, 0x01, 0x34, 0x00, 0xc0, 0x0b, 0x37, 0x04, 0x92, 0x01, 0x6b, 0x02,
+		0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c},
+	{0x03, 0x3f, 0x00, 0x08, 0x01, 0x34, 0x00, 0xc0, 0x0b, 0x07, 0x07, 0x92, 0x01, 0x6b, 0x02,
+		0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c},
+	{0x01, 0x3f, 0x00, 0x08, 0x02, 0x46, 0x00, 0x00, 0x05, 0x0f, 0x0e, 0x18, 0x02, 0x6b, 0x02,
+		0x97, 0x10, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c},
+	{0x00, 0x3f, 0x00, 0x08, 0x00, 0x34, 0x00, 0xc0, 0x0b, 0x17, 0x15, 0x92, 0x01, 0x6b, 0x02,
+		0x71, 0x0c, 0x0f, 0x0a, 0x0f, 0x0c, 0x0c}
+
+};
+
+static int set_vco_div(struct dp_pll *pll, unsigned long rate)
 {
-	u32 val = 0;
+	u32 div, val;
 
-	if (!pll) {
-		DP_ERR("invalid input parameters\n");
+	if (!pll)
 		return -EINVAL;
-	}
 
 	if (is_gdsc_disabled(pll))
 		return -EINVAL;
@@ -144,22 +164,22 @@ static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
 	val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV);
 	val &= ~0x03;
 
-	switch (div) {
-	case 2:
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		div = 2;
 		val |= 1;
 		break;
-	case 4:
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		div = 4;
 		val |= 2;
 		break;
-	case 6:
-	/* When div = 6, val is 0, so do nothing here */
-		;
-		break;
-	case 8:
-		val |= 3;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		div = 6;
+		/* val = 0 for this case, so no update needed */
 		break;
 	default:
-		DP_DEBUG("unsupported div value %d\n", div);
+		/* No other link rates are supported */
 		return -EINVAL;
 	}
 
@@ -167,27 +187,16 @@ static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
 	/* Make sure the PHY registers writes are done */
 	wmb();
 
-	DP_DEBUG("val=%d div=%x\n", val, div);
-	return 0;
-}
-
-static int set_vco_div(struct dp_pll *pll, unsigned long rate)
-{
-	int div;
-	int rc = 0;
+	/*
+	 * Set the rate for the link and pixel clock sources so that the
+	 * linux clock framework can appropriately compute the MND values
+	 * whenever the pixel clock rate is set.
+	 */
+	clk_set_rate(pll->clk_data->clks[0], pll->vco_rate / 10);
+	clk_set_rate(pll->clk_data->clks[1], pll->vco_rate / div);
 
-	if (rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
-		div = 6;
-	else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		div = 4;
-	else
-		div = 2;
-
-	rc = dp_vco_clk_set_div(pll, div);
-	if (rc < 0) {
-		DP_DEBUG("set vco div failed\n");
-		return rc;
-	}
+	DP_DEBUG("val=%#x div=%x link_clk rate=%lu vco_div_clk rate=%lu\n",
+			val, div, pll->vco_rate / 10, pll->vco_rate / div);
 
 	return 0;
 }
@@ -205,74 +214,22 @@ static int dp_vco_pll_init_db_4nm(struct dp_pll_db *pdb,
 	DP_DEBUG("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
 			spare_value, pdb->lane_cnt, pdb->orientation);
 
-	pdb->div_frac_start1_mode0 = 0x00;
-	pdb->integloop_gain0_mode0 = 0x3f;
-	pdb->integloop_gain1_mode0 = 0x00;
-
 	switch (rate) {
 	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000);
-		pdb->hsclk_sel = 0x05;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x6f;
-		pdb->lock_cmp2_mode0 = 0x08;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x04;
-		pdb->ssc_step_size1_mode0 = 0x45;
-		pdb->ssc_step_size2_mode0 = 0x06;
-		pdb->ssc_per1 = 0x36;
-		pdb->cmp_code1_mode0 = 0xE2;
-		pdb->cmp_code2_mode0 = 0x18;
+		pdb->rate_idx = HSCLK_RATE_1620MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x03;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x0f;
-		pdb->lock_cmp2_mode0 = 0x0e;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x13;
-		pdb->ssc_step_size2_mode0 = 0x06;
-		pdb->ssc_per1 = 0x40;
-		pdb->cmp_code1_mode0 = 0xE2;
-		pdb->cmp_code2_mode0 = 0x18;
+		pdb->rate_idx = HSCLK_RATE_2700MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x01;
-		pdb->dec_start_mode0 = 0x8c;
-		pdb->div_frac_start2_mode0 = 0x00;
-		pdb->div_frac_start3_mode0 = 0x0a;
-		pdb->lock_cmp1_mode0 = 0x1f;
-		pdb->lock_cmp2_mode0 = 0x1c;
-		pdb->phy_vco_div = 0x2;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x1a;
-		pdb->ssc_step_size2_mode0 = 0x08;
-		pdb->ssc_per1 = 0x40;
-		pdb->cmp_code1_mode0 = 0x2E;
-		pdb->cmp_code2_mode0 = 0x21;
+		pdb->rate_idx = HSCLK_RATE_5400MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000);
-		pdb->hsclk_sel = 0x00;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x2f;
-		pdb->lock_cmp2_mode0 = 0x2a;
-		pdb->phy_vco_div = 0x0;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x13;
-		pdb->ssc_step_size2_mode0 = 0x06;
-		pdb->ssc_per1 = 0x40;
-		pdb->cmp_code1_mode0 = 0xE2;
-		pdb->cmp_code2_mode0 = 0x18;
+		pdb->rate_idx = HSCLK_RATE_8100MHZ;
 		break;
 	default:
 		DP_ERR("unsupported rate %ld\n", rate);
@@ -286,6 +243,7 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 {
 	int rc = 0;
 	struct dp_pll_db *pdb = (struct dp_pll_db *)pll->priv;
+	const struct dp_pll_params *params;
 
 	rc = dp_vco_pll_init_db_4nm(pdb, rate);
 	if (rc < 0) {
@@ -304,6 +262,13 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 		dp_pll_write(dp_phy, DP_PHY_PD_CTL, 0x7d);
 	}
 
+	if (pdb->rate_idx < HSCLK_RATE_MAX) {
+		params = &pdb->pll_params[pdb->rate_idx];
+	} else {
+		DP_ERR("link rate not set\n");
+		return -EINVAL;
+	}
+
 	/* Make sure the PHY register writes are done */
 	wmb();
 
@@ -317,7 +282,7 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	wmb();
 
 	/* PLL Optimization */
-	dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, 0x0f);
+	dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, params->pll_ivco);
 	dp_pll_write(dp_pll, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
 	dp_pll_write(dp_pll, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
 	dp_pll_write(dp_pll, QSERDES_COM_CP_CTRL_MODE0, 0x06);
@@ -325,52 +290,56 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	wmb();
 
 	/* link rate dependent params */
-	dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL_1, pdb->hsclk_sel);
-	dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL_1, params->hsclk_sel);
+	dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, params->dec_start_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+		QSERDES_COM_DIV_FRAC_START1_MODE0, params->div_frac_start1_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+		QSERDES_COM_DIV_FRAC_START2_MODE0, params->div_frac_start2_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
-	dp_pll_write(dp_phy, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+		QSERDES_COM_DIV_FRAC_START3_MODE0, params->div_frac_start3_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, params->lock_cmp1_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, params->lock_cmp2_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, params->lock_cmp_en);
+	dp_pll_write(dp_phy, DP_PHY_VCO_DIV, params->phy_vco_div);
 	/* Make sure the PLL register writes are done */
 	wmb();
 
 	dp_pll_write(dp_pll, QSERDES_COM_CMN_CONFIG_1, 0x12);
-	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
-	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+		params->integloop_gain0_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+		params->integloop_gain1_mode0);
 	dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_MAP, 0x00);
 	/* Make sure the PHY register writes are done */
 	wmb();
 
-	dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, 0x0e);
+	dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, params->bg_timer);
 	dp_pll_write(dp_pll, QSERDES_COM_CORECLK_DIV_MODE0, 0x14);
 	dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
 
 	if (pll->bonding_en)
 		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f);
 	else
-		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1D);
+		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
 
-	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, 0x1f);
-	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0, pdb->cmp_code1_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0, pdb->cmp_code2_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, params->core_clk_en);
+	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0,
+		params->cmp_code1_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0,
+		params->cmp_code2_mode0);
 	/* Make sure the PHY register writes are done */
 	wmb();
 
 	if (pll->ssc_en) {
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_EN_CENTER, 0x01);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_ADJ_PER1, 0x00);
-		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, pdb->ssc_per1);
-		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, 0x01);
+		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, params->ssc_per1);
+		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, params->ssc_per2);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE1_MODE0,
-				pdb->ssc_step_size1_mode0);
+				params->ssc_step_size1_mode0);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE2_MODE0,
-				pdb->ssc_step_size2_mode0);
+				params->ssc_step_size2_mode0);
 	}
 
 	if (pdb->orientation == ORIENTATION_CC2)
@@ -393,8 +362,8 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx);
 	dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04);
 	/* Make sure the PLL register writes are done */
 	wmb();
@@ -409,8 +378,8 @@ static int dp_config_vco_rate_4nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx);
 	dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04);
 	/* Make sure the PHY register writes are done */
 	wmb();
@@ -620,13 +589,14 @@ static int dp_pll_configure(struct dp_pll *pll, unsigned long rate)
 	else
 		rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
 
+	pll->vco_rate = rate;
 	rc = dp_vco_set_rate_4nm(pll, rate);
 	if (rc < 0) {
 		DP_ERR("pll rate %s set failed\n", rate);
+		pll->vco_rate = 0;
 		return rc;
 	}
 
-	pll->vco_rate = rate;
 	DP_DEBUG("pll rate %lu set success\n", rate);
 	return rc;
 }
@@ -645,7 +615,7 @@ static int dp_pll_prepare(struct dp_pll *pll)
 	 * link rate is 8.1Gbps. This will result in voting to place Mx rail in
 	 * turbo as required for V1 hardware PLL functionality.
 	 */
-	if (pll->revision == DP_PLL_4NM_V1 &&
+	if (pll->revision >= DP_PLL_4NM_V1 &&
 	    pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) {
 		rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, true);
 		if (rc < 0) {
@@ -661,7 +631,7 @@ static int dp_pll_prepare(struct dp_pll *pll)
 	return rc;
 }
 
-static int  dp_pll_unprepare(struct dp_pll *pll)
+static int dp_pll_unprepare(struct dp_pll *pll)
 {
 	int rc = 0;
 
@@ -670,7 +640,7 @@ static int  dp_pll_unprepare(struct dp_pll *pll)
 		return -EINVAL;
 	}
 
-	if (pll->revision == DP_PLL_4NM_V1 &&
+	if (pll->revision >= DP_PLL_4NM_V1 &&
 			pll->vco_rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000) {
 		rc = dp_regulator_enable_4nm(pll->parser, DP_PLL_PM, false);
 		if (rc < 0) {
@@ -680,6 +650,7 @@ static int  dp_pll_unprepare(struct dp_pll *pll)
 	}
 
 	dp_pll_disable_4nm(pll);
+	pll->vco_rate = 0;
 
 	return rc;
 }
@@ -893,6 +864,11 @@ int dp_pll_clock_register_4nm(struct dp_pll *pll)
 	pll->priv = &dp_pdb;
 	dp_pdb.pll = pll;
 
+	if (pll->revision == DP_PLL_4NM_V1_1)
+		dp_pdb.pll_params = pll_params_v1_1;
+	else
+		dp_pdb.pll_params = pll_params_v1;
+
 	pll->pll_cfg = dp_pll_configure;
 	pll->pll_prepare = dp_pll_prepare;
 	pll->pll_unprepare = dp_pll_unprepare;

+ 75 - 105
msm/dp/dp_pll_5nm.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -17,7 +18,6 @@
  *
  */
 
-#include <dt-bindings/clock/mdss-5nm-pll-clk.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -37,7 +37,6 @@
 #define DP_PHY_AUX_CFG1				0x0024
 #define DP_PHY_AUX_CFG2				0x0028
 
-#define DP_PHY_VCO_DIV				0x0070
 #define DP_PHY_TX0_TX1_LANE_CTL			0x0078
 #define DP_PHY_TX2_TX3_LANE_CTL			0x009C
 
@@ -129,14 +128,23 @@
 #define DP_5NM_PHY_READY	BIT(1)
 #define DP_5NM_TSYNC_DONE	BIT(0)
 
-static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
+static const struct dp_pll_params pll_params[HSCLK_RATE_MAX] = {
+	{0x05, 0x3f, 0x00, 0x04, 0x01, 0x69, 0x00, 0x80, 0x07, 0x6f, 0x08, 0x45, 0x06, 0x36, 0x01,
+		0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11},
+	{0x03, 0x3f, 0x00, 0x08, 0x01, 0x69, 0x00, 0x80, 0x07, 0x0f, 0x0e, 0x45, 0x06, 0x36, 0x01,
+		0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11},
+	{0x01, 0x3f, 0x00, 0x08, 0x02, 0x8c, 0x00, 0x00, 0x0a, 0x1f, 0x1c, 0x5c, 0x08, 0x36, 0x01,
+		0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11},
+	{0x00, 0x3f, 0x00, 0x08, 0x00, 0x69, 0x00, 0x80, 0x07, 0x2f, 0x2a, 0x45, 0x06, 0x36, 0x01,
+		0x00, 0x00, 0x0f, 0x0a, 0x1f, 0x0a, 0x11},
+};
+
+static int set_vco_div(struct dp_pll *pll, unsigned long rate)
 {
-	u32 val = 0;
+	u32 div, val;
 
-	if (!pll) {
-		DP_ERR("invalid input parameters\n");
+	if (!pll)
 		return -EINVAL;
-	}
 
 	if (is_gdsc_disabled(pll))
 		return -EINVAL;
@@ -144,22 +152,22 @@ static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
 	val = dp_pll_read(dp_phy, DP_PHY_VCO_DIV);
 	val &= ~0x03;
 
-	switch (div) {
-	case 2:
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		div = 2;
 		val |= 1;
 		break;
-	case 4:
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		div = 4;
 		val |= 2;
 		break;
-	case 6:
-	/* When div = 6, val is 0, so do nothing here */
-		;
-		break;
-	case 8:
-		val |= 3;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		div = 6;
+		/* val = 0 for this case, so no update needed */
 		break;
 	default:
-		DP_DEBUG("unsupported div value %d\n", div);
+		/* No other link rates are supported */
 		return -EINVAL;
 	}
 
@@ -167,27 +175,16 @@ static int dp_vco_clk_set_div(struct dp_pll *pll, unsigned int div)
 	/* Make sure the PHY registers writes are done */
 	wmb();
 
-	DP_DEBUG("val=%d div=%x\n", val, div);
-	return 0;
-}
-
-static int set_vco_div(struct dp_pll *pll, unsigned long rate)
-{
-	int div;
-	int rc = 0;
-
-	if (rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
-		div = 6;
-	else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
-		div = 4;
-	else
-		div = 2;
+	/*
+	 * Set the rate for the link and pixel clock sources so that the
+	 * linux clock framework can appropriately compute the MND values
+	 * whenever the pixel clock rate is set.
+	 */
+	clk_set_rate(pll->clk_data->clks[0], pll->vco_rate / 10);
+	clk_set_rate(pll->clk_data->clks[1], pll->vco_rate / div);
 
-	rc = dp_vco_clk_set_div(pll, div);
-	if (rc < 0) {
-		DP_DEBUG("set vco div failed\n");
-		return rc;
-	}
+	DP_DEBUG("val=%#x div=%x link_clk rate=%lu vco_div_clk rate=%lu\n",
+			val, div, pll->vco_rate / 10, pll->vco_rate / div);
 
 	return 0;
 }
@@ -205,62 +202,22 @@ static int dp_vco_pll_init_db_5nm(struct dp_pll_db *pdb,
 	DP_DEBUG("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
 			spare_value, pdb->lane_cnt, pdb->orientation);
 
-	pdb->div_frac_start1_mode0 = 0x00;
-	pdb->integloop_gain0_mode0 = 0x3f;
-	pdb->integloop_gain1_mode0 = 0x00;
-
 	switch (rate) {
 	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000);
-		pdb->hsclk_sel = 0x05;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x6f;
-		pdb->lock_cmp2_mode0 = 0x08;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x04;
-		pdb->ssc_step_size1_mode0 = 0x45;
-		pdb->ssc_step_size2_mode0 = 0x06;
+		pdb->rate_idx = HSCLK_RATE_1620MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x03;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x0f;
-		pdb->lock_cmp2_mode0 = 0x0e;
-		pdb->phy_vco_div = 0x1;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x45;
-		pdb->ssc_step_size2_mode0 = 0x06;
+		pdb->rate_idx = HSCLK_RATE_2700MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
-		pdb->hsclk_sel = 0x01;
-		pdb->dec_start_mode0 = 0x8c;
-		pdb->div_frac_start2_mode0 = 0x00;
-		pdb->div_frac_start3_mode0 = 0x0a;
-		pdb->lock_cmp1_mode0 = 0x1f;
-		pdb->lock_cmp2_mode0 = 0x1c;
-		pdb->phy_vco_div = 0x2;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x5c;
-		pdb->ssc_step_size2_mode0 = 0x08;
+		pdb->rate_idx = HSCLK_RATE_5400MHZ;
 		break;
 	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
 		DP_DEBUG("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000);
-		pdb->hsclk_sel = 0x00;
-		pdb->dec_start_mode0 = 0x69;
-		pdb->div_frac_start2_mode0 = 0x80;
-		pdb->div_frac_start3_mode0 = 0x07;
-		pdb->lock_cmp1_mode0 = 0x2f;
-		pdb->lock_cmp2_mode0 = 0x2a;
-		pdb->phy_vco_div = 0x0;
-		pdb->lock_cmp_en = 0x08;
-		pdb->ssc_step_size1_mode0 = 0x45;
-		pdb->ssc_step_size2_mode0 = 0x06;
+		pdb->rate_idx = HSCLK_RATE_8100MHZ;
 		break;
 	default:
 		DP_ERR("unsupported rate %ld\n", rate);
@@ -274,6 +231,7 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 {
 	int rc = 0;
 	struct dp_pll_db *pdb = (struct dp_pll_db *)pll->priv;
+	const struct dp_pll_params *params;
 
 	rc = dp_vco_pll_init_db_5nm(pdb, rate);
 	if (rc < 0) {
@@ -295,6 +253,13 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 	/* Make sure the PHY register writes are done */
 	wmb();
 
+	if (pdb->rate_idx < HSCLK_RATE_MAX) {
+		params = &pdb->pll_params[pdb->rate_idx];
+	} else {
+		DP_ERR("link rate not set\n");
+		return -EINVAL;
+	}
+
 	dp_pll_write(dp_pll, QSERDES_COM_SVS_MODE_CLK_SEL, 0x05);
 	dp_pll_write(dp_pll, QSERDES_COM_SYSCLK_EN_SEL, 0x3b);
 	dp_pll_write(dp_pll, QSERDES_COM_SYS_CLK_CTRL, 0x02);
@@ -305,7 +270,7 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 	wmb();
 
 	/* PLL Optimization */
-	dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, 0x0f);
+	dp_pll_write(dp_pll, QSERDES_COM_PLL_IVCO, params->pll_ivco);
 	dp_pll_write(dp_pll, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
 	dp_pll_write(dp_pll, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
 	dp_pll_write(dp_pll, QSERDES_COM_CP_CTRL_MODE0, 0x06);
@@ -313,48 +278,50 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 	wmb();
 
 	/* link rate dependent params */
-	dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
-	dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_HSCLK_SEL, params->hsclk_sel);
+	dp_pll_write(dp_pll, QSERDES_COM_DEC_START_MODE0, params->dec_start_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+		QSERDES_COM_DIV_FRAC_START1_MODE0, params->div_frac_start1_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+		QSERDES_COM_DIV_FRAC_START2_MODE0, params->div_frac_start2_mode0);
 	dp_pll_write(dp_pll,
-		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
-	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
-	dp_pll_write(dp_phy, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+		QSERDES_COM_DIV_FRAC_START3_MODE0, params->div_frac_start3_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP1_MODE0, params->lock_cmp1_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP2_MODE0, params->lock_cmp2_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_LOCK_CMP_EN, params->lock_cmp_en);
+	dp_pll_write(dp_phy, DP_PHY_VCO_DIV, params->phy_vco_div);
 	/* Make sure the PLL register writes are done */
 	wmb();
 
 	dp_pll_write(dp_pll, QSERDES_COM_CMN_CONFIG, 0x02);
-	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
-	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+		params->integloop_gain0_mode0);
+	dp_pll_write(dp_pll, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+	params->integloop_gain1_mode0);
 	dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_MAP, 0x00);
 	/* Make sure the PHY register writes are done */
 	wmb();
 
-	dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, 0x0a);
+	dp_pll_write(dp_pll, QSERDES_COM_BG_TIMER, params->bg_timer);
 	dp_pll_write(dp_pll, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
 	dp_pll_write(dp_pll, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
 	if (pll->bonding_en)
 		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1f);
 	else
 		dp_pll_write(dp_pll, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
-	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	dp_pll_write(dp_pll, QSERDES_COM_CORE_CLK_EN, params->core_clk_en);
 	/* Make sure the PHY register writes are done */
 	wmb();
 
 	if (pll->ssc_en) {
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_EN_CENTER, 0x01);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_ADJ_PER1, 0x00);
-		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, 0x36);
-		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, 0x01);
+		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER1, params->ssc_per1);
+		dp_pll_write(dp_pll, QSERDES_COM_SSC_PER2, params->ssc_per1);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE1_MODE0,
-				pdb->ssc_step_size1_mode0);
+				params->ssc_step_size1_mode0);
 		dp_pll_write(dp_pll, QSERDES_COM_SSC_STEP_SIZE2_MODE0,
-				pdb->ssc_step_size2_mode0);
+				params->ssc_step_size2_mode0);
 	}
 
 	if (pdb->orientation == ORIENTATION_CC2)
@@ -377,8 +344,8 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx0, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx0, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx0, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx);
+	dp_pll_write(dp_ln_tx0, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx);
 	dp_pll_write(dp_ln_tx0, TXn_TX_BAND, 0x04);
 	/* Make sure the PLL register writes are done */
 	wmb();
@@ -393,8 +360,8 @@ static int dp_config_vco_rate_5nm(struct dp_pll *pll,
 	dp_pll_write(dp_ln_tx1, DP_TRAN_DRVR_EMP_EN, 0xf);
 	dp_pll_write(dp_ln_tx1, TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
 	dp_pll_write(dp_ln_tx1, DP_TX_INTERFACE_MODE, 0x00);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, 0x0A);
-	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_TX, params->lane_offset_tx);
+	dp_pll_write(dp_ln_tx1, TXn_RES_CODE_LANE_OFFSET_RX, params->lane_offset_rx);
 	dp_pll_write(dp_ln_tx1, TXn_TX_BAND, 0x04);
 	/* Make sure the PHY register writes are done */
 	wmb();
@@ -604,13 +571,14 @@ static int dp_pll_configure(struct dp_pll *pll, unsigned long rate)
 	else
 		rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
 
+	pll->vco_rate = rate;
 	rc = dp_vco_set_rate_5nm(pll, rate);
 	if (rc < 0) {
 		DP_ERR("pll rate %s set failed\n", rate);
+		pll->vco_rate = 0;
 		return rc;
 	}
 
-	pll->vco_rate = rate;
 	DP_DEBUG("pll rate %lu set success\n", rate);
 	return rc;
 }
@@ -645,7 +613,7 @@ static int dp_pll_prepare(struct dp_pll *pll)
 	return rc;
 }
 
-static int  dp_pll_unprepare(struct dp_pll *pll)
+static int dp_pll_unprepare(struct dp_pll *pll)
 {
 	int rc = 0;
 
@@ -664,6 +632,7 @@ static int  dp_pll_unprepare(struct dp_pll *pll)
 	}
 
 	dp_pll_disable_5nm(pll);
+	pll->vco_rate = 0;
 
 	return rc;
 }
@@ -876,6 +845,7 @@ int dp_pll_clock_register_5nm(struct dp_pll *pll)
 	pll->clk_data->clk_num = DP_PLL_NUM_CLKS;
 	pll->priv = &dp_pdb;
 	dp_pdb.pll = pll;
+	dp_pdb.pll_params = pll_params;
 
 	pll->pll_cfg = dp_pll_configure;
 	pll->pll_prepare = dp_pll_prepare;

+ 30 - 22
msm/dp/dp_power.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -80,6 +81,23 @@ static void dp_power_regulator_deinit(struct dp_power_private *power)
 	}
 }
 
+static void dp_power_phy_gdsc(struct dp_power *dp_power, bool on)
+{
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(dp_power->dp_phy_gdsc))
+		return;
+
+	if (on)
+		rc = regulator_enable(dp_power->dp_phy_gdsc);
+	else
+		rc = regulator_disable(dp_power->dp_phy_gdsc);
+
+	if (rc)
+		DP_ERR("Fail to %s dp_phy_gdsc regulator ret =%d\n",
+				on ? "enable" : "disable", rc);
+}
+
 static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
 {
 	int rc = 0, i = 0, j = 0;
@@ -93,6 +111,8 @@ static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
 		 * on the link configuration.
 		 */
 		if (i == DP_PLL_PM) {
+			/* DP GDSC vote is needed for new chipsets, define gdsc phandle if needed */
+			dp_power_phy_gdsc(&power->dp_power, enable);
 			DP_DEBUG("skipping: '%s' vregs for %s\n",
 					enable ? "enable" : "disable",
 					dp_parser_pm_name(i));
@@ -131,23 +151,6 @@ static int dp_power_pinctrl_set(struct dp_power_private *power, bool active)
 	if (IS_ERR_OR_NULL(parser->pinctrl.pin))
 		return 0;
 
-	if (parser->no_aux_switch && parser->lphw_hpd) {
-		pin_state = active ? parser->pinctrl.state_hpd_ctrl
-				: parser->pinctrl.state_hpd_tlmm;
-		if (!IS_ERR_OR_NULL(pin_state)) {
-			rc = pinctrl_select_state(parser->pinctrl.pin,
-				pin_state);
-			if (rc) {
-				DP_ERR("cannot direct hpd line to %s\n",
-					active ? "ctrl" : "tlmm");
-				return rc;
-			}
-		}
-	}
-
-	if (parser->no_aux_switch)
-		return 0;
-
 	pin_state = active ? parser->pinctrl.state_active
 				: parser->pinctrl.state_suspend;
 	if (!IS_ERR_OR_NULL(pin_state)) {
@@ -556,9 +559,6 @@ static int dp_power_config_gpios(struct dp_power_private *power, bool flip,
 	struct dss_module_power *mp;
 	struct dss_gpio *config;
 
-	if (power->parser->no_aux_switch)
-		return 0;
-
 	mp = &power->parser->mp[DP_CORE_PM];
 	config = mp->gpio_config;
 
@@ -784,9 +784,9 @@ static int dp_power_init(struct dp_power *dp_power, bool flip)
 		goto err_gpio;
 	}
 
-	rc = pm_runtime_get_sync(dp_power->drm_dev->dev);
+	rc = pm_runtime_resume_and_get(dp_power->drm_dev->dev);
 	if (rc < 0) {
-		DP_ERR("Power resource enable failed\n");
+		DP_ERR("failed to enable power resource %d\n", rc);
 		goto err_sde_power;
 	}
 
@@ -841,6 +841,7 @@ struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll)
 	int rc = 0;
 	struct dp_power_private *power;
 	struct dp_power *dp_power;
+	struct device *dev;
 
 	if (!parser || !pll) {
 		DP_ERR("invalid input\n");
@@ -859,6 +860,7 @@ struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll)
 	power->pdev = parser->pdev;
 
 	dp_power = &power->dp_power;
+	dev = &power->pdev->dev;
 
 	dp_power->init = dp_power_init;
 	dp_power->deinit = dp_power_deinit;
@@ -871,6 +873,12 @@ struct dp_power *dp_power_get(struct dp_parser *parser, struct dp_pll *pll)
 	dp_power->power_client_deinit = dp_power_client_deinit;
 	dp_power->power_mmrm_init = dp_power_mmrm_init;
 
+	dp_power->dp_phy_gdsc = devm_regulator_get(dev, "dp_phy_gdsc");
+	if (IS_ERR(dp_power->dp_phy_gdsc)) {
+		dp_power->dp_phy_gdsc = NULL;
+		DP_DEBUG("Optional GDSC regulator is missing\n");
+	}
+
 	return dp_power;
 error:
 	return ERR_PTR(rc);

+ 3 - 0
msm/dp/dp_power.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -13,6 +14,7 @@
 /**
  * sruct dp_power - DisplayPort's power related data
  *
+ * @dp_phy_gdsc: GDSC regulator
  * @init: initializes the regulators/core clocks/GPIOs/pinctrl
  * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
  * @clk_enable: enable/disable the DP clocks
@@ -27,6 +29,7 @@
 struct dp_power {
 	struct drm_device *drm_dev;
 	struct sde_power_handle *phandle;
+	struct regulator *dp_phy_gdsc;
 	int (*init)(struct dp_power *power, bool flip);
 	int (*deinit)(struct dp_power *power);
 	int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,

+ 2 - 1
msm/dp/dp_usbpd.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  */
 
@@ -538,7 +539,7 @@ struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb)
 
 	pd = devm_usbpd_get_by_phandle(dev, pd_phandle);
 	if (IS_ERR(pd)) {
-		DP_ERR("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
+		DP_DEBUG("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
 		rc = PTR_ERR(pd);
 		goto error;
 	}

+ 43 - 1
msm/dsi/dsi_catalog.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/errno.h>
@@ -95,6 +96,8 @@ static void dsi_catalog_cmn_init(struct dsi_ctrl_hw *ctrl,
 			dsi_ctrl_hw_22_reset_trigger_controls;
 		ctrl->ops.log_line_count = dsi_ctrl_hw_22_log_line_count;
 		ctrl->ops.splitlink_cmd_setup = dsi_ctrl_hw_22_configure_splitlink;
+		ctrl->ops.setup_misr = dsi_ctrl_hw_22_setup_misr;
+		ctrl->ops.collect_misr = dsi_ctrl_hw_22_collect_misr;
 		break;
 	default:
 		break;
@@ -189,6 +192,7 @@ static void dsi_catalog_phy_3_0_init(struct dsi_phy_hw *phy)
 	phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = NULL;
 	phy->ops.dyn_refresh_ops.cache_phy_timings =
 		dsi_phy_hw_v3_0_cache_phy_timings;
+	phy->ops.phy_idle_off = NULL;
 }
 
 /**
@@ -229,6 +233,37 @@ static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
 		dsi_phy_hw_v4_0_cache_phy_timings;
 	phy->ops.set_continuous_clk = dsi_phy_hw_v4_0_set_continuous_clk;
 	phy->ops.commit_phy_timing = dsi_phy_hw_v4_0_commit_phy_timing;
+	phy->ops.phy_idle_off = NULL;
+}
+
+/**
+ * dsi_catalog_phy_5_0_init() - catalog init for DSI PHY 7nm
+ */
+static void dsi_catalog_phy_5_0_init(struct dsi_phy_hw *phy)
+{
+	phy->ops.regulator_enable = NULL;
+	phy->ops.regulator_disable = NULL;
+	phy->ops.enable = dsi_phy_hw_v5_0_enable;
+	phy->ops.disable = dsi_phy_hw_v5_0_disable;
+	phy->ops.calculate_timing_params = dsi_phy_hw_calculate_timing_params;
+	phy->ops.ulps_ops.wait_for_lane_idle = dsi_phy_hw_v5_0_wait_for_lane_idle;
+	phy->ops.ulps_ops.ulps_request = dsi_phy_hw_v5_0_ulps_request;
+	phy->ops.ulps_ops.ulps_exit = dsi_phy_hw_v5_0_ulps_exit;
+	phy->ops.ulps_ops.get_lanes_in_ulps = dsi_phy_hw_v5_0_get_lanes_in_ulps;
+	phy->ops.ulps_ops.is_lanes_in_ulps = dsi_phy_hw_v5_0_is_lanes_in_ulps;
+	phy->ops.phy_timing_val = dsi_phy_hw_timing_val_v5_0;
+	phy->ops.phy_lane_reset = dsi_phy_hw_v5_0_lane_reset;
+	phy->ops.toggle_resync_fifo = dsi_phy_hw_v5_0_toggle_resync_fifo;
+	phy->ops.reset_clk_en_sel = dsi_phy_hw_v5_0_reset_clk_en_sel;
+
+	phy->ops.dyn_refresh_ops.dyn_refresh_config = dsi_phy_hw_v5_0_dyn_refresh_config;
+	phy->ops.dyn_refresh_ops.dyn_refresh_pipe_delay = dsi_phy_hw_v5_0_dyn_refresh_pipe_delay;
+	phy->ops.dyn_refresh_ops.dyn_refresh_helper = dsi_phy_hw_v5_0_dyn_refresh_helper;
+	phy->ops.dyn_refresh_ops.dyn_refresh_trigger_sel = dsi_phy_hw_v5_0_dyn_refresh_trigger_sel;
+	phy->ops.dyn_refresh_ops.cache_phy_timings = dsi_phy_hw_v5_0_cache_phy_timings;
+	phy->ops.set_continuous_clk = dsi_phy_hw_v5_0_set_continuous_clk;
+	phy->ops.commit_phy_timing = dsi_phy_hw_v5_0_commit_phy_timing;
+	phy->ops.phy_idle_off = dsi_phy_hw_v5_0_phy_idle_off;
 }
 
 /**
@@ -267,9 +302,12 @@ int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
 	case DSI_PHY_VERSION_4_1:
 	case DSI_PHY_VERSION_4_2:
 	case DSI_PHY_VERSION_4_3:
-	case DSI_PHY_VERSION_5_2:
+	case DSI_PHY_VERSION_4_3_2:
 		dsi_catalog_phy_4_0_init(phy);
 		break;
+	case DSI_PHY_VERSION_5_2:
+		dsi_catalog_phy_5_0_init(phy);
+		break;
 	default:
 		return -ENOTSUPP;
 	}
@@ -291,6 +329,10 @@ int dsi_catalog_phy_pll_setup(struct dsi_phy_hw *phy, u32 pll_ver)
 		phy->ops.configure = dsi_pll_5nm_configure;
 		phy->ops.pll_toggle = dsi_pll_5nm_toggle;
 		break;
+	case DSI_PLL_VERSION_4NM:
+		phy->ops.configure = dsi_pll_4nm_configure;
+		phy->ops.pll_toggle = dsi_pll_4nm_toggle;
+		break;
 	default:
 		phy->ops.configure = NULL;
 		phy->ops.pll_toggle = NULL;

+ 39 - 1
msm/dsi/dsi_catalog.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _DSI_CATALOG_H_
@@ -126,6 +127,24 @@ void dsi_phy_hw_v4_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable);
 void dsi_phy_hw_v4_0_commit_phy_timing(struct dsi_phy_hw *phy,
 		struct dsi_phy_per_lane_cfgs *timing);
 
+/* Definitions for 4nm PHY hardware driver */
+void dsi_phy_hw_v5_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v5_0_disable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+int dsi_phy_hw_v5_0_wait_for_lane_idle(struct dsi_phy_hw *phy, u32 lanes);
+void dsi_phy_hw_v5_0_ulps_request(struct dsi_phy_hw *phy,
+		struct dsi_phy_cfg *cfg, u32 lanes);
+void dsi_phy_hw_v5_0_ulps_exit(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg, u32 lanes);
+u32 dsi_phy_hw_v5_0_get_lanes_in_ulps(struct dsi_phy_hw *phy);
+bool dsi_phy_hw_v5_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes);
+int dsi_phy_hw_timing_val_v5_0(struct dsi_phy_per_lane_cfgs *timing_cfg, u32 *timing_val,
+		u32 size);
+int dsi_phy_hw_v5_0_lane_reset(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v5_0_toggle_resync_fifo(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v5_0_reset_clk_en_sel(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v5_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable);
+void dsi_phy_hw_v5_0_commit_phy_timing(struct dsi_phy_hw *phy,
+		struct dsi_phy_per_lane_cfgs *timing);
+
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
 u32 dsi_ctrl_hw_cmn_poll_dma_status(struct dsi_ctrl_hw *ctrl);
@@ -145,7 +164,9 @@ void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
 			       enum dsi_test_pattern  type,
 			       u32 init_val,
 			       u32 stream_id);
-void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable,
+				enum dsi_ctrl_tpg_pattern pattern,
+				enum dsi_op_mode panel_mode);
 void dsi_ctrl_hw_cmn_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
 				 u32 stream_id);
 
@@ -256,6 +277,10 @@ void dsi_ctrl_hw_22_config_clk_gating(struct dsi_ctrl_hw *ctrl, bool enable,
 void dsi_ctrl_hw_cmn_set_continuous_clk(struct dsi_ctrl_hw *ctrl, bool enable);
 void dsi_ctrl_hw_cmn_hs_req_sel(struct dsi_ctrl_hw *ctrl, bool sel_phy);
 
+void dsi_ctrl_hw_22_setup_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode,
+			bool enable, u32 frame_count);
+u32 dsi_ctrl_hw_22_collect_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode);
+
 /* dynamic refresh specific functions */
 void dsi_phy_hw_v3_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
 void dsi_phy_hw_v3_0_dyn_refresh_config(struct dsi_phy_hw *phy,
@@ -279,6 +304,17 @@ void dsi_phy_hw_v4_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
 int dsi_phy_hw_v4_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
 				      u32 *dst, u32 size);
 
+void dsi_phy_hw_v5_0_dyn_refresh_trigger_sel(struct dsi_phy_hw *phy,
+		bool is_master);
+void dsi_phy_hw_v5_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset);
+void dsi_phy_hw_v5_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+				struct dsi_phy_cfg *cfg, bool is_master);
+void dsi_phy_hw_v5_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy,
+					    struct dsi_dyn_clk_delay *delay);
+
+int dsi_phy_hw_v5_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size);
+void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy);
 void dsi_ctrl_hw_22_configure_cmddma_window(struct dsi_ctrl_hw *ctrl,
 		struct dsi_ctrl_cmd_dma_info *cmd,
 		u32 line_no, u32 window);
@@ -290,6 +326,8 @@ u32 dsi_ctrl_hw_22_log_line_count(struct dsi_ctrl_hw *ctrl, bool cmd_mode);
 int dsi_catalog_phy_pll_setup(struct dsi_phy_hw *phy, u32 pll_ver);
 int dsi_pll_5nm_configure(void *pll, bool commit);
 int dsi_pll_5nm_toggle(void *pll, bool prepare);
+int dsi_pll_4nm_configure(void *pll, bool commit);
+int dsi_pll_4nm_toggle(void *pll, bool prepare);
 
 void dsi_ctrl_hw_22_configure_splitlink(struct dsi_ctrl_hw *ctrl,
 		struct dsi_host_common_cfg *common_cfg, u32 sublink);

+ 53 - 23
msm/dsi/dsi_ctrl.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -84,7 +85,7 @@ static const struct of_device_id msm_dsi_of_match[] = {
 	{}
 };
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 static ssize_t debugfs_state_info_read(struct file *file,
 				       char __user *buff,
 				       size_t count,
@@ -417,6 +418,7 @@ static void dsi_ctrl_clear_dma_status(struct dsi_ctrl *dsi_ctrl)
 static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
+	struct dsi_ctrl_hw_ops dsi_hw_ops = dsi_ctrl->hw.ops;
 	struct dsi_clk_ctrl_info clk_info;
 	u32 mask = BIT(DSI_FIFO_OVERFLOW);
 
@@ -433,6 +435,10 @@ static void dsi_ctrl_post_cmd_transfer(struct dsi_ctrl *dsi_ctrl)
 		dsi_ctrl_dma_cmd_wait_for_done(dsi_ctrl);
 	}
 
+	if (dsi_ctrl->hw.reset_trig_ctrl)
+		dsi_hw_ops.reset_trig_ctrl(&dsi_ctrl->hw,
+				&dsi_ctrl->host_config.common_config);
+
 	/* Command engine disable, unmask overflow, remove vote on clocks and gdsc */
 	rc = dsi_ctrl_set_cmd_engine_state(dsi_ctrl, DSI_CTRL_ENGINE_OFF, false);
 	if (rc)
@@ -1004,6 +1010,9 @@ int dsi_ctrl_pixel_format_to_bpp(enum dsi_pixel_format dst_format)
 	case DSI_PIXEL_FORMAT_RGB888:
 		bpp = 24;
 		break;
+	case DSI_PIXEL_FORMAT_RGB101010:
+		bpp = 30;
+		break;
 	default:
 		bpp = 24;
 		break;
@@ -1117,10 +1126,10 @@ static int dsi_ctrl_enable_supplies(struct dsi_ctrl *dsi_ctrl, bool enable)
 	int rc = 0;
 
 	if (enable) {
-		rc = pm_runtime_get_sync(dsi_ctrl->drm_dev->dev);
+		rc = pm_runtime_resume_and_get(dsi_ctrl->drm_dev->dev);
 		if (rc < 0) {
-			DSI_CTRL_ERR(dsi_ctrl,
-				"Power resource enable failed, rc=%d\n", rc);
+			DSI_CTRL_ERR(dsi_ctrl, "failed to enable power resource %d\n", rc);
+			SDE_EVT32(rc, SDE_EVTLOG_ERROR);
 			goto error;
 		}
 
@@ -1370,10 +1379,6 @@ static void dsi_kickoff_msg_tx(struct dsi_ctrl *dsi_ctrl,
 	SDE_EVT32(dsi_ctrl->cell_index, SDE_EVTLOG_FUNC_ENTRY, flags,
 		msg->flags);
 
-	if (dsi_ctrl->hw.reset_trig_ctrl)
-		dsi_hw_ops.reset_trig_ctrl(&dsi_ctrl->hw,
-				&dsi_ctrl->host_config.common_config);
-
 	if (dsi_hw_ops.splitlink_cmd_setup && split_link->enabled)
 		dsi_hw_ops.splitlink_cmd_setup(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.common_config, flags);
@@ -3012,6 +3017,8 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
 		return -EINVAL;
 	}
 
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
 	if (dsi_ctrl->hw.ops.host_setup)
 		dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.common_config);
@@ -3029,9 +3036,11 @@ int dsi_ctrl_host_timing_update(struct dsi_ctrl *dsi_ctrl)
 				0x0, NULL);
 	} else {
 		DSI_CTRL_ERR(dsi_ctrl, "invalid panel mode for resolution switch\n");
+		mutex_unlock(&dsi_ctrl->ctrl_lock);
 		return -EINVAL;
 	}
 
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
 	return 0;
 }
 
@@ -3392,9 +3401,10 @@ int dsi_ctrl_transfer_prepare(struct dsi_ctrl *dsi_ctrl, u32 flags)
 	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, dsi_ctrl->cell_index, flags);
 
 	/* Vote for clocks, gdsc, enable command engine, mask overflow */
-	rc = pm_runtime_get_sync(dsi_ctrl->drm_dev->dev);
+	rc = pm_runtime_resume_and_get(dsi_ctrl->drm_dev->dev);
 	if (rc < 0) {
-		DSI_CTRL_ERR(dsi_ctrl, "failed gdsc voting\n");
+		DSI_CTRL_ERR(dsi_ctrl, "failed to enable power resource %d\n", rc);
+		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
 		return rc;
 	}
 
@@ -3731,7 +3741,9 @@ error:
  *
  * Return: error code.
  */
-int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on,
+			enum dsi_test_pattern type, u32 init_val,
+			enum dsi_ctrl_tpg_pattern pattern)
 {
 	int rc = 0;
 
@@ -3750,19 +3762,13 @@ int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
 	}
 
 	if (on) {
-		if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
-			dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw,
-							  DSI_TEST_PATTERN_INC,
-							  0xFFFF);
-		} else {
-			dsi_ctrl->hw.ops.cmd_test_pattern_setup(
-							&dsi_ctrl->hw,
-							DSI_TEST_PATTERN_INC,
-							0xFFFF,
-							0x0);
-		}
+		if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE)
+			dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw, type, init_val);
+		else
+			dsi_ctrl->hw.ops.cmd_test_pattern_setup(&dsi_ctrl->hw, type, init_val, 0x0);
 	}
-	dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on);
+	dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on, pattern,
+			dsi_ctrl->host_config.panel_mode);
 
 	DSI_CTRL_DEBUG(dsi_ctrl, "Set test pattern state=%d\n", on);
 	dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
@@ -3771,6 +3777,30 @@ error:
 	return rc;
 }
 
+/**
+ * dsi_ctrl_trigger_test_pattern() - trigger a command mode frame update with test pattern
+ * @dsi_ctrl:           DSI controller handle.
+ *
+ * Trigger a command mode frame update with chosen test pattern.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_trigger_test_pattern(struct dsi_ctrl *dsi_ctrl)
+{
+	int ret = 0;
+
+	if (!dsi_ctrl) {
+		DSI_CTRL_ERR(dsi_ctrl, "Invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+	dsi_ctrl->hw.ops.trigger_cmd_test_pattern(&dsi_ctrl->hw, 0);
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+	return ret;
+}
+
 /**
  * dsi_ctrl_set_host_engine_state() - set host engine state
  * @dsi_ctrl:            DSI Controller handle.

+ 17 - 1
msm/dsi/dsi_ctrl.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -573,13 +574,28 @@ int dsi_ctrl_set_roi(struct dsi_ctrl *dsi_ctrl, struct dsi_rect *roi,
  * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
  * @dsi_ctrl:          DSI controller handle.
  * @on:                enable/disable test pattern.
+ * @type:              type of test pattern to generate.
+ * @init_val:          seed value for generating test pattern.
+ * @pattern:           test pattern to generate.
  *
  * Test pattern can be enabled only after Video engine (for video mode panels)
  * or command engine (for cmd mode panels) is enabled.
  *
  * Return: error code.
  */
-int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on,
+		enum dsi_test_pattern type, u32 init_val,
+		enum dsi_ctrl_tpg_pattern pattern);
+
+/**
+ * dsi_ctrl_trigger_test_pattern() - trigger a command mode frame update with test pattern
+ * @dsi_ctrl:          DSI controller handle.
+ *
+ * Trigger a command mode frame update with chosen test pattern.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_trigger_test_pattern(struct dsi_ctrl *dsi_ctrl);
 
 /**
  * dsi_ctrl_transfer_prepare() - Set up a command transfer

+ 31 - 1
msm/dsi/dsi_ctrl_hw.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -41,6 +42,29 @@
  */
 #define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER            0x1
 
+/**
+ * enum dsi_ctrl_tpg_pattern - type of TPG pattern
+ * @DSI_CTRL_TPG_COUNTER:
+ * @DSI_CTRL_TPG_FIXED:
+ * @DSI_CTRL_TPG_COLOR_RAMP_64L_64P:
+ * @DSI_CTRL_TPG_COLOR_RAMP_64L_256P:
+ * @DSI_CTRL_TPG_GRAYSCALE_RAMP:
+ * @DSI_CTRL_TPG_COLOR_SQUARE:
+ * @DSI_CTRL_TPG_CHECKERED_RECTANGLE:
+ * @DSI_CTRL_TPG_BASIC_COLOR_CHANGING:
+ */
+enum dsi_ctrl_tpg_pattern {
+	DSI_CTRL_TPG_COUNTER = 0,
+	DSI_CTRL_TPG_FIXED,
+	DSI_CTRL_TPG_COLOR_RAMP_64L_64P,
+	DSI_CTRL_TPG_COLOR_RAMP_64L_256P,
+	DSI_CTRL_TPG_BLACK_WHITE_VERTICAL_LINES,
+	DSI_CTRL_TPG_GRAYSCALE_RAMP,
+	DSI_CTRL_TPG_COLOR_SQUARE,
+	DSI_CTRL_TPG_CHECKERED_RECTANGLE,
+	DSI_CTRL_TPG_BASIC_COLOR_CHANGING
+};
+
 /**
  * enum dsi_ctrl_version - version of the dsi host controller
  * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
@@ -92,12 +116,14 @@ enum dsi_ctrl_hw_features {
  * @DSI_TEST_PATTERN_FIXED:     Test pattern is fixed, based on init value.
  * @DSI_TEST_PATTERN_INC:       Incremental test pattern, base on init value.
  * @DSI_TEST_PATTERN_POLY:      Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_GENERAL:   MDSS general test pattern.
  * @DSI_TEST_PATTERN_MAX:
  */
 enum dsi_test_pattern {
 	DSI_TEST_PATTERN_FIXED = 0,
 	DSI_TEST_PATTERN_INC,
 	DSI_TEST_PATTERN_POLY,
+	DSI_TEST_PATTERN_GENERAL,
 	DSI_TEST_PATTERN_MAX
 };
 
@@ -727,8 +753,12 @@ struct dsi_ctrl_hw_ops {
 	 * test_pattern_enable() - enable test pattern engine
 	 * @ctrl:          Pointer to the controller host hardware.
 	 * @enable:        Enable/Disable test pattern engine.
+	 * @pattern:       Type of TPG pattern
+	 * @panel_mode:    DSI operation mode
 	 */
-	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable,
+					   enum dsi_ctrl_tpg_pattern pattern,
+					   enum dsi_op_mode panel_mode);
 
 	/**
 	 * clear_phy0_ln_err() - clear DSI PHY lane-0 errors

+ 36 - 0
msm/dsi/dsi_ctrl_hw_2_2.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/iopoll.h>
 #include "dsi_ctrl_hw.h"
@@ -20,6 +21,9 @@
 #define MDP_INTF_TEAR_LINE_COUNT_OFFSET 0x30
 #define MDP_INTF_LINE_COUNT_OFFSET 0xB0
 
+#define DSI_MDP_MISR_CTRL 0x364
+#define DSI_MDP_MISR_SIGNATURE 0x368
+
 void dsi_ctrl_hw_22_setup_lane_map(struct dsi_ctrl_hw *ctrl,
 		       struct dsi_lane_map *lane_map)
 {
@@ -312,3 +316,35 @@ void dsi_ctrl_hw_22_configure_splitlink(struct dsi_ctrl_hw *ctrl,
 	/* Make sure the split link config is updated */
 	wmb();
 }
+
+void dsi_ctrl_hw_22_setup_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode,
+			bool enable, u32 frame_count)
+{
+	u32 config = 0;
+
+	DSI_W32(ctrl, DSI_MDP_MISR_CTRL, config);
+	wmb(); /* clear misr data */
+
+	if (enable) {
+		config = (frame_count & 0xffff);
+		config |= BIT(8) | BIT(24) | BIT(31); /* enable, panel data-only, free run mode */
+	}
+
+	DSI_CTRL_HW_DBG(ctrl, "MISR enable:%d, frame_count:%d, config:0x%x\n",
+			enable, frame_count, config);
+	DSI_W32(ctrl, DSI_MDP_MISR_CTRL, config);
+	wmb(); /* make sure MISR is configured */
+}
+
+u32 dsi_ctrl_hw_22_collect_misr(struct dsi_ctrl_hw *ctrl, enum dsi_op_mode panel_mode)
+{
+	u32 enabled;
+	u32 misr = 0;
+
+	enabled = DSI_R32(ctrl, DSI_MDP_MISR_CTRL) & BIT(8);
+	if (enabled)
+		misr = DSI_R32(ctrl, DSI_MDP_MISR_SIGNATURE);
+
+	DSI_CTRL_HW_DBG(ctrl, "MISR enabled:%d value:0x%x\n", enabled, misr);
+	return misr;
+}

+ 40 - 10
msm/dsi/dsi_ctrl_hw_cmn.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -41,9 +42,9 @@ static bool dsi_compression_enabled(struct dsi_mode_info *mode)
 
 /* Unsupported formats default to RGB888 */
 static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4, 0x9 };
 static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
-	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3, 0x4 };
 
 /**
  * dsi_split_link_setup() - setup dsi split link configurations
@@ -543,10 +544,13 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 	u32 reg = 0, offset = 0;
 	int pic_width = 0, this_frame_slices = 0, intf_ip_w = 0;
 	u32 pkt_per_line = 0, eol_byte_num = 0, bytes_in_slice = 0;
+	u32 bpp;
 
 	if (roi && (!roi->w || !roi->h))
 		return;
 
+	bpp = dsi_pixel_format_to_bpp(cfg->dst_format);
+
 	if (dsi_dsc_compression_enabled(mode)) {
 		struct msm_display_dsc_info dsc;
 
@@ -580,11 +584,11 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 		bytes_in_slice = vdc.bytes_in_slice;
 	} else if (roi) {
 		width_final = roi->w;
-		stride_final = roi->w * 3;
+		stride_final = DIV_ROUND_UP(roi->w * bpp, 8);
 		height_final = roi->h;
 	} else {
 		width_final = mode->h_active;
-		stride_final = mode->h_active * 3;
+		stride_final = DIV_ROUND_UP(mode->h_active * bpp, 8);
 		height_final = mode->v_active;
 	}
 
@@ -629,7 +633,7 @@ void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 	}
 
 	/* HS Timer value */
-	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x49C3C);
 
 	stream_ctrl = (stride_final + 1) << 16;
 	stream_ctrl |= (vc_id & 0x3) << 8;
@@ -701,7 +705,7 @@ void dsi_ctrl_hw_cmn_video_engine_setup(struct dsi_ctrl_hw *ctrl,
 	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
 	reg |= (cfg->traffic_mode & 0x3) << 8;
 	reg |= (cfg->vc_id & 0x3);
-	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x7) << 4;
 	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
 
 	reg = (common_cfg->swap_mode & 0x7) << 12;
@@ -1436,6 +1440,9 @@ void dsi_ctrl_hw_cmn_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
 	else
 		int_ctrl &= ~BIT(25);
 
+	/* Do not clear interrupt status */
+	int_ctrl &= 0xAAEEAAFE;
+
 	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
 		int_mask0 &= ~BIT(0);
 	if (errors & DSI_RDBK_MULTI_ECC_ERR)
@@ -1509,20 +1516,23 @@ void dsi_ctrl_hw_cmn_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
 					     enum dsi_test_pattern type,
 					     u32 init_val)
 {
-	u32 reg = 0;
+	u32 reg = 0, pattern_sel_shift = 4;
 
 	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
 
 	switch (type) {
 	case DSI_TEST_PATTERN_FIXED:
-		reg |= (0x2 << 4);
+		reg |= (0x2 << pattern_sel_shift);
 		break;
 	case DSI_TEST_PATTERN_INC:
-		reg |= (0x1 << 4);
+		reg |= (0x1 << pattern_sel_shift);
 		break;
 	case DSI_TEST_PATTERN_POLY:
 		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
 		break;
+	case DSI_TEST_PATTERN_GENERAL:
+		reg |= (0x3 << pattern_sel_shift);
+		break;
 	default:
 		break;
 	}
@@ -1583,6 +1593,9 @@ void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
 	case DSI_TEST_PATTERN_POLY:
 		DSI_W32(ctrl, poly_offset, 0xF0F0F);
 		break;
+	case DSI_TEST_PATTERN_GENERAL:
+		reg |= (0x3 << pattern_sel_shift);
+		break;
 	default:
 		break;
 	}
@@ -1595,11 +1608,28 @@ void dsi_ctrl_hw_cmn_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
  * test_pattern_enable() - enable test pattern engine
  * @ctrl:          Pointer to the controller host hardware.
  * @enable:        Enable/Disable test pattern engine.
+ * @pattern:       Type of TPG pattern
+ * @panel_mode:    DSI operation mode
  */
 void dsi_ctrl_hw_cmn_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
-					bool enable)
+					bool enable, enum dsi_ctrl_tpg_pattern pattern,
+					enum dsi_op_mode panel_mode)
 {
 	u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+	u32 reg_tpg_main_control = 0;
+	u32 reg_tpg_video_config = BIT(0);
+
+	reg_tpg_video_config |= BIT(2);
+
+	if (panel_mode == DSI_OP_CMD_MODE) {
+		reg_tpg_main_control = BIT(pattern);
+		DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL2, reg_tpg_main_control);
+	} else {
+		reg_tpg_main_control = BIT(pattern + 1);
+		DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, reg_tpg_main_control);
+	}
+
+	DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, reg_tpg_video_config);
 
 	if (enable)
 		reg |= BIT(0);

+ 17 - 0
msm/dsi/dsi_defs.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -39,6 +40,7 @@
  * @DSI_PIXEL_FORMAT_RGB111:
  * @DSI_PIXEL_FORMAT_RGB332:
  * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_RGB101010:
  * @DSI_PIXEL_FORMAT_MAX:
  */
 enum dsi_pixel_format {
@@ -49,6 +51,7 @@ enum dsi_pixel_format {
 	DSI_PIXEL_FORMAT_RGB111,
 	DSI_PIXEL_FORMAT_RGB332,
 	DSI_PIXEL_FORMAT_RGB444,
+	DSI_PIXEL_FORMAT_RGB101010,
 	DSI_PIXEL_FORMAT_MAX
 };
 
@@ -496,6 +499,8 @@ struct dsi_split_link_config {
  *			 cmd it points to the line after TE.
  * @dma_sched_window:	 Determines the width of the window during the
  *			 DSI command will be sent by the HW.
+ * @vpadding:			 panel stacking height.
+ * @line_insertion_enable: line insertion support enable.
  */
 struct dsi_host_common_cfg {
 	enum dsi_pixel_format dst_format;
@@ -523,6 +528,8 @@ struct dsi_host_common_cfg {
 	u32 byte_intf_clk_div;
 	u32 dma_sched_line;
 	u32 dma_sched_window;
+	u32 vpadding;
+	bool line_insertion_enable;
 };
 
 /**
@@ -610,6 +617,10 @@ struct dsi_host_config {
  * @panel_prefill_lines:  Panel prefill lines for RSC
  * @mdp_transfer_time_us:   Specifies the mdp transfer time for command mode
  *                          panels in microseconds.
+ * @mdp_transfer_time_us_min:   Specifies the minimum possible mdp transfer time
+ *                              for command mode panels in microseconds.
+ * @mdp_transfer_time_us_max:   Specifies the maximum possible mdp transfer time
+ *                              for command mode panels in microseconds.
  * @dsi_transfer_time_us: Specifies the dsi transfer time for cmd panels.
  * @qsync_min_fps:        Qsync min fps value for the mode
  * @clk_rate_hz:          DSI bit clock per lane in hz.
@@ -618,6 +629,7 @@ struct dsi_host_config {
  * @topology:             Topology selected for the panel
  * @dsc:                  DSC compression info
  * @vdc:                  VDC compression info
+ * @wd_jitter:            WD Jitter config.
  * @dsc_enabled:          DSC compression enabled
  * @vdc_enabled:          VDC compression enabled
  * @pclk_scale:           pclk scale factor, target bpp to source bpp
@@ -636,6 +648,8 @@ struct dsi_display_mode_priv_info {
 	u32 panel_jitter_denom;
 	u32 panel_prefill_lines;
 	u32 mdp_transfer_time_us;
+	u32 mdp_transfer_time_us_min;
+	u32 mdp_transfer_time_us_max;
 	u32 dsi_transfer_time_us;
 	u32 qsync_min_fps;
 	u64 clk_rate_hz;
@@ -645,6 +659,7 @@ struct dsi_display_mode_priv_info {
 	struct msm_display_topology topology;
 	struct msm_display_dsc_info dsc;
 	struct msm_display_vdc_info vdc;
+	struct msm_display_wd_jitter_config wd_jitter;
 	bool dsc_enabled;
 	bool vdc_enabled;
 	struct msm_ratio pclk_scale;
@@ -767,6 +782,8 @@ static inline int dsi_pixel_format_to_bpp(enum dsi_pixel_format fmt)
 		return 8;
 	case DSI_PIXEL_FORMAT_RGB444:
 		return 12;
+	case DSI_PIXEL_FORMAT_RGB101010:
+		return 30;
 	}
 	return 24;
 }

+ 205 - 76
msm/dsi/dsi_display.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -65,7 +66,8 @@ static bool is_sim_panel(struct dsi_display *display)
 	if (!display || !display->panel)
 		return false;
 
-	return display->panel->te_using_watchdog_timer;
+	return (display->panel->te_using_watchdog_timer ||
+			display->panel->panel_ack_disabled);
 }
 
 static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display,
@@ -705,14 +707,18 @@ static void dsi_display_set_cmd_tx_ctrl_flags(struct dsi_display *display,
 		/*
 		 * Set flags for command scheduling.
 		 * 1) In video mode command DMA scheduling is default.
-		 * 2) In command mode command DMA scheduling depends on message
+		 * 2) In command mode unicast command DMA scheduling depends on message
 		 * flag and TE needs to be running.
+		 * 3) In command mode broadcast command DMA scheduling is default and
+		 * TE needs to be running.
 		 */
 		if (display->panel->panel_mode == DSI_OP_VIDEO_MODE) {
 			flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 		} else {
 			if (msg->flags & MIPI_DSI_MSG_CMD_DMA_SCHED)
 				flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
+			if (flags & DSI_CTRL_CMD_BROADCAST)
+				flags |= DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 			if (!display->enabled)
 				flags &= ~DSI_CTRL_CMD_CUSTOM_DMA_SCHED;
 		}
@@ -1362,7 +1368,7 @@ int dsi_display_set_power(struct drm_connector *connector,
 	return rc;
 }
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 static bool dsi_display_is_te_based_esd(struct dsi_display *display)
 {
 	u32 status_mode = 0;
@@ -2743,21 +2749,48 @@ static int dsi_display_set_clk_src(struct dsi_display *display, bool set_xo)
 	return 0;
 }
 
-int dsi_display_phy_pll_toggle(void *priv, bool prepare)
+static int dsi_display_phy_pll_enable(struct dsi_display *display)
 {
 	int rc = 0;
-	struct dsi_display *display = priv;
 	struct dsi_display_ctrl *m_ctrl;
 
-	if (!display) {
-		DSI_ERR("invalid arguments\n");
+	m_ctrl = &display->ctrl[display->clk_master_idx];
+	if (!m_ctrl->phy) {
+		DSI_ERR("[%s] PHY not found\n", display->name);
 		return -EINVAL;
 	}
 
-	if (is_skip_op_required(display))
-		return 0;
+	/*
+	 * It is recommended to turn on the PLL before switching parent
+	 * of RCG to PLL because when RCG is on, both the old and new
+	 * sources should be on while switching the RCG parent.
+	 *
+	 * Note: Branch clocks and in turn RCG might not get turned off
+	 * during clock disable sequence if there is a vote from dispcc
+	 * or any of its other consumers.
+	 */
+
+	rc = dsi_phy_pll_toggle(m_ctrl->phy, true);
+	if (rc)
+		return rc;
+
+	return dsi_display_set_clk_src(display, false);
+}
+
+static int dsi_display_phy_pll_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	struct dsi_display_ctrl *m_ctrl;
+
+	/*
+	 * It is recommended to turn off the PLL after switching parent
+	 * of RCG to PLL because when RCG is on, both the old and new
+	 * sources should be on while switching the RCG parent.
+	 */
 
-	rc = dsi_display_set_clk_src(display, !prepare);
+	rc = dsi_display_set_clk_src(display, true);
+	if (rc)
+		return rc;
 
 	m_ctrl = &display->ctrl[display->clk_master_idx];
 	if (!m_ctrl->phy) {
@@ -2765,9 +2798,25 @@ int dsi_display_phy_pll_toggle(void *priv, bool prepare)
 		return -EINVAL;
 	}
 
-	rc = dsi_phy_pll_toggle(m_ctrl->phy, prepare);
+	return dsi_phy_pll_toggle(m_ctrl->phy, false);
+}
 
-	return rc;
+int dsi_display_phy_pll_toggle(void *priv, bool prepare)
+{
+	struct dsi_display *display = priv;
+
+	if (!display) {
+		DSI_ERR("invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (is_skip_op_required(display))
+		return 0;
+
+	if (prepare)
+		return dsi_display_phy_pll_enable(display);
+	else
+		return dsi_display_phy_pll_disable(display);
 }
 
 int dsi_display_phy_configure(void *priv, bool commit)
@@ -5283,10 +5332,10 @@ int dsi_display_cont_splash_config(void *dsi_display)
 		return -EINVAL;
 	}
 
-	rc = pm_runtime_get_sync(display->drm_dev->dev);
+	rc = pm_runtime_resume_and_get(display->drm_dev->dev);
 	if (rc < 0) {
-		DSI_ERR("failed to vote gdsc for continuous splash, rc=%d\n",
-							rc);
+		DSI_ERR("failed to enable power resource %d\n", rc);
+		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
 		return rc;
 	}
 
@@ -5362,7 +5411,22 @@ int dsi_display_splash_res_cleanup(struct  dsi_display *display)
 
 static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
 {
-	int rc = 0;
+	int rc = 0, i = 0;
+	struct dsi_display_ctrl *ctrl;
+
+	/*
+	 * The force update dsi clock, is the only clock update function that toggles the state of
+	 * DSI clocks without any ref count protection. With the addition of ASYNC command wait,
+	 * there is a need for adding a check for any queued waits before updating these clocks.
+	 */
+	display_for_each_ctrl(i, display) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || !(ctrl->ctrl->post_tx_queued))
+			continue;
+		flush_workqueue(display->post_cmd_tx_workq);
+		cancel_work_sync(&ctrl->ctrl->post_cmd_tx_work);
+		ctrl->ctrl->post_tx_queued = false;
+	}
 
 	rc = dsi_display_link_clk_force_update_ctrl(display->dsi_clk_handle);
 
@@ -5938,7 +6002,7 @@ int dsi_display_dev_probe(struct platform_device *pdev)
 		panel_node = of_parse_phandle(node,
 				"qcom,dsi-default-panel", 0);
 		if (!panel_node)
-			DSI_WARN("%s default panel not found\n", display->display_type);
+			DSI_INFO("%s default panel not found\n", display->display_type);
 	}
 
 	boot_disp->node = pdev->dev.of_node;
@@ -6536,7 +6600,8 @@ int dsi_display_drm_ext_bridge_init(struct dsi_display *display,
 			ext_bridge->funcs = &ext_bridge_info->bridge_funcs;
 		}
 
-		rc = drm_bridge_attach(encoder, ext_bridge, prev_bridge, 0);
+		rc = drm_bridge_attach(encoder, ext_bridge, prev_bridge,
+					DRM_BRIDGE_ATTACH_NO_CONNECTOR);
 		if (rc) {
 			DSI_ERR("[%s] ext brige attach failed, %d\n",
 				display->name, rc);
@@ -6950,58 +7015,15 @@ void dsi_display_put_mode(struct dsi_display *display,
 	dsi_panel_put_mode(mode);
 }
 
-int dsi_display_get_modes(struct dsi_display *display,
-			  struct dsi_display_mode **out_modes)
+int dsi_display_get_modes_helper(struct dsi_display *display,
+	struct dsi_display_ctrl *ctrl, u32 timing_mode_count,
+	struct dsi_dfps_capabilities dfps_caps, struct dsi_qsync_capabilities *qsync_caps,
+	struct dsi_dyn_clk_caps *dyn_clk_caps)
 {
-	struct dsi_dfps_capabilities dfps_caps;
-	struct dsi_display_ctrl *ctrl;
-	struct dsi_host_common_cfg *host = &display->panel->host_config;
+	int dsc_modes = 0, nondsc_modes = 0, rc = 0, i, start, end;
+	u32 num_dfps_rates, mode_idx, sublinks_count, array_idx = 0;
 	bool is_split_link, support_cmd_mode, support_video_mode;
-	u32 num_dfps_rates, timing_mode_count, display_mode_count;
-	u32 sublinks_count, mode_idx, array_idx = 0;
-	struct dsi_dyn_clk_caps *dyn_clk_caps;
-	int i, start, end, rc = -EINVAL;
-	int dsc_modes = 0, nondsc_modes = 0;
-	struct dsi_qsync_capabilities *qsync_caps;
-
-	if (!display || !out_modes) {
-		DSI_ERR("Invalid params\n");
-		return -EINVAL;
-	}
-
-	*out_modes = NULL;
-	ctrl = &display->ctrl[0];
-
-	mutex_lock(&display->display_lock);
-
-	if (display->modes)
-		goto exit;
-
-	display_mode_count = display->panel->num_display_modes;
-
-	display->modes = kcalloc(display_mode_count, sizeof(*display->modes),
-			GFP_KERNEL);
-	if (!display->modes) {
-		rc = -ENOMEM;
-		goto error;
-	}
-
-	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
-	if (rc) {
-		DSI_ERR("[%s] failed to get dfps caps from panel\n",
-				display->name);
-		goto error;
-	}
-
-	qsync_caps = &(display->panel->qsync_caps);
-	dyn_clk_caps = &(display->panel->dyn_clk_caps);
-
-	timing_mode_count = display->panel->num_timing_nodes;
-
-	/* Validate command line timing */
-	if ((display->cmdline_timing != NO_OVERRIDE) &&
-		(display->cmdline_timing >= timing_mode_count))
-		display->cmdline_timing = NO_OVERRIDE;
+	struct dsi_host_common_cfg *host = &display->panel->host_config;
 
 	for (mode_idx = 0; mode_idx < timing_mode_count; mode_idx++) {
 		struct dsi_display_mode display_mode;
@@ -7018,7 +7040,8 @@ int dsi_display_get_modes(struct dsi_display *display,
 		if (rc) {
 			DSI_ERR("[%s] failed to get mode idx %d from panel\n",
 				   display->name, mode_idx);
-			goto error;
+			rc = -EINVAL;
+			return rc;
 		}
 
 		if (display->cmdline_timing == display_mode.mode_idx) {
@@ -7084,7 +7107,7 @@ int dsi_display_get_modes(struct dsi_display *display,
 			if (!sub_mode) {
 				DSI_ERR("invalid mode data\n");
 				rc = -EFAULT;
-				goto error;
+				return rc;
 			}
 
 			memcpy(sub_mode, &display_mode, sizeof(display_mode));
@@ -7110,14 +7133,14 @@ int dsi_display_get_modes(struct dsi_display *display,
 					sizeof(*sub_mode->priv_info), GFP_KERNEL);
 			if (!sub_mode->priv_info) {
 				rc = -ENOMEM;
-				goto error;
+				return rc;
 			}
 
 			rc = dsi_display_mode_dyn_clk_cpy(display,
 					&display_mode, sub_mode);
 			if (rc) {
 				DSI_ERR("unable to copy dyn clock list\n");
-				goto error;
+				return rc;
 			}
 
 			sub_mode->mode_idx += (array_idx - 1);
@@ -7158,6 +7181,63 @@ int dsi_display_get_modes(struct dsi_display *display,
 	if (dsc_modes && nondsc_modes)
 		display->panel->dsc_switch_supported = true;
 
+	return rc;
+}
+
+int dsi_display_get_modes(struct dsi_display *display,
+			  struct dsi_display_mode **out_modes)
+{
+	struct dsi_dfps_capabilities dfps_caps;
+	struct dsi_display_ctrl *ctrl;
+	u32 timing_mode_count, display_mode_count;
+	struct dsi_dyn_clk_caps *dyn_clk_caps;
+	int rc = -EINVAL;
+	struct dsi_qsync_capabilities *qsync_caps;
+
+	if (!display || !out_modes) {
+		DSI_ERR("Invalid params\n");
+		return -EINVAL;
+	}
+
+	*out_modes = NULL;
+	ctrl = &display->ctrl[0];
+
+	mutex_lock(&display->display_lock);
+
+	if (display->modes)
+		goto exit;
+
+	display_mode_count = display->panel->num_display_modes;
+
+	display->modes = kcalloc(display_mode_count, sizeof(*display->modes),
+			GFP_KERNEL);
+	if (!display->modes) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+	if (rc) {
+		DSI_ERR("[%s] failed to get dfps caps from panel\n",
+				display->name);
+		goto error;
+	}
+
+	qsync_caps = &(display->panel->qsync_caps);
+	dyn_clk_caps = &(display->panel->dyn_clk_caps);
+
+	timing_mode_count = display->panel->num_timing_nodes;
+
+	/* Validate command line timing */
+	if ((display->cmdline_timing != NO_OVERRIDE) &&
+		(display->cmdline_timing >= timing_mode_count))
+		display->cmdline_timing = NO_OVERRIDE;
+
+	rc = dsi_display_get_modes_helper(display, ctrl, timing_mode_count,
+			dfps_caps, qsync_caps, dyn_clk_caps);
+	if (rc)
+		goto error;
+
 exit:
 	*out_modes = display->modes;
 	rc = 0;
@@ -7276,6 +7356,46 @@ int dsi_display_get_avr_step_req_fps(void *display_dsi, u32 mode_fps)
 	return step;
 }
 
+int dsi_display_update_transfer_time(void *display, u32 transfer_time)
+{
+	struct dsi_display *disp = (struct dsi_display *)display;
+	int rc = 0, i = 0;
+	u32 transfer_time_min, transfer_time_max;
+	struct dsi_display_ctrl *ctrl;
+
+	if (!disp->panel || !disp->panel->cur_mode || !disp->panel->cur_mode->priv_info)
+		return -EINVAL;
+
+	transfer_time_min = disp->panel->cur_mode->priv_info->mdp_transfer_time_us_min;
+	transfer_time_max = disp->panel->cur_mode->priv_info->mdp_transfer_time_us_max;
+
+	if (!transfer_time_min || !transfer_time_max)
+		return 0;
+
+	if (transfer_time < transfer_time_min || transfer_time > transfer_time_max) {
+		DSI_ERR("invalid transfer time %u, min: %u, max: %u\n",
+			transfer_time, transfer_time_min, transfer_time_max);
+		return -EINVAL;
+	}
+
+	disp->panel->cur_mode->priv_info->mdp_transfer_time_us = transfer_time;
+	disp->panel->cur_mode->priv_info->dsi_transfer_time_us = transfer_time;
+
+	display_for_each_ctrl(i, disp) {
+		ctrl = &disp->ctrl[i];
+		rc = dsi_ctrl_update_host_config(ctrl->ctrl, &disp->config,
+				disp->panel->cur_mode, 0x0,
+				disp->dsi_clk_handle);
+		if (rc) {
+			DSI_ERR("[%s] failed to update ctrl config, rc=%d\n", disp->name, rc);
+			return rc;
+		}
+	}
+	atomic_set(&disp->clkrate_change_pending, 1);
+
+	return 0;
+}
+
 static bool dsi_display_match_timings(const struct dsi_display_mode *mode1,
 		struct dsi_display_mode *mode2, unsigned int match_flags)
 {
@@ -7643,7 +7763,10 @@ error:
 	return rc;
 }
 
-int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable,
+			enum dsi_test_pattern type,
+			u32 init_val,
+			enum dsi_ctrl_tpg_pattern pattern)
 {
 	int rc = 0;
 	int i;
@@ -7656,12 +7779,18 @@ int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
 
 	display_for_each_ctrl(i, display) {
 		ctrl = &display->ctrl[i];
-		rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
+		rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable, type, init_val, pattern);
 		if (rc) {
-			DSI_ERR("[%s] failed to set tpg state for host_%d\n",
-			       display->name, i);
+			DSI_ERR("[%s] failed to set tpg state for host_%d\n", display->name, i);
 			goto error;
 		}
+		if (enable && ctrl->ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+			rc = dsi_ctrl_trigger_test_pattern(ctrl->ctrl);
+			if (rc) {
+				DSI_ERR("[%s] failed to start tpg for host_%d\n", display->name, i);
+				goto error;
+			}
+		}
 	}
 
 	display->is_tpg_enabled = enable;

+ 14 - 1
msm/dsi/dsi_display.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -633,7 +634,10 @@ int dsi_pre_clkon_cb(void *priv, enum dsi_clk_type clk_type,
  */
 int dsi_display_unprepare(struct dsi_display *display);
 
-int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable,
+		enum dsi_test_pattern type,
+		u32 init_val,
+		enum dsi_ctrl_tpg_pattern pattern);
 
 int dsi_display_clock_gate(struct dsi_display *display, bool enable);
 int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
@@ -831,4 +835,13 @@ int dsi_display_restore_bit_clk(struct dsi_display *display, struct dsi_display_
 bool dsi_display_mode_match(const struct dsi_display_mode *mode1,
 		struct dsi_display_mode *mode2, unsigned int match_flags);
 
+/**
+ * dsi_display_update_transfer_time() - update DSI transfer time and clocks
+ * @display:     handle to display
+ * @transfer_time: transfer time value to be updated
+ *
+ * Return: error code
+ */
+int dsi_display_update_transfer_time(void *display, u32 transfer_time);
+
 #endif /* _DSI_DISPLAY_H_ */

+ 75 - 49
msm/dsi/dsi_drm.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -395,6 +396,61 @@ static void dsi_bridge_mode_set(struct drm_bridge *bridge,
 	DSI_DEBUG("clk_rate: %llu\n", c_bridge->dsi_mode.timing.clk_rate_hz);
 }
 
+static bool _dsi_bridge_mode_validate_and_fixup(struct drm_bridge *bridge,
+		struct drm_crtc_state *crtc_state, struct dsi_display *display,
+		struct dsi_display_mode *adj_mode)
+{
+	int rc = 0;
+	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+	struct dsi_display_mode cur_dsi_mode;
+	struct sde_connector_state *old_conn_state;
+	struct drm_display_mode *cur_mode;
+
+	if (!bridge->encoder || !bridge->encoder->crtc || !crtc_state->crtc)
+		return 0;
+
+	cur_mode = &crtc_state->crtc->state->mode;
+	old_conn_state = to_sde_connector_state(display->drm_conn->state);
+
+	convert_to_dsi_mode(cur_mode, &cur_dsi_mode);
+	msm_parse_mode_priv_info(&old_conn_state->msm_mode, &cur_dsi_mode);
+
+	rc = dsi_display_validate_mode_change(c_bridge->display, &cur_dsi_mode, adj_mode);
+	if (rc) {
+		DSI_ERR("[%s] seamless mode mismatch failure rc=%d\n", c_bridge->display->name, rc);
+		return rc;
+	}
+
+	/*
+	 * DMS Flag if set during active changed condition cannot be
+	 * treated as seamless. Hence, removing DMS flag in such cases.
+	 */
+	if ((adj_mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) &&
+			crtc_state->active_changed)
+		adj_mode->dsi_mode_flags &= ~DSI_MODE_FLAG_DMS;
+
+	/* No DMS/VRR when drm pipeline is changing */
+	if (!dsi_display_mode_match(&cur_dsi_mode, adj_mode,
+		DSI_MODE_MATCH_FULL_TIMINGS) &&
+		(!(adj_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
+		(!(adj_mode->dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
+		(!(adj_mode->dsi_mode_flags & DSI_MODE_FLAG_POMS_TO_VID)) &&
+		(!(adj_mode->dsi_mode_flags & DSI_MODE_FLAG_POMS_TO_CMD)) &&
+		(!crtc_state->active_changed ||
+		 display->is_cont_splash_enabled)) {
+		adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+
+		SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
+			adj_mode->timing.h_active,
+			adj_mode->timing.v_active,
+			adj_mode->timing.refresh_rate,
+			adj_mode->pixel_clk_khz,
+			adj_mode->panel_mode_caps);
+	}
+
+	return rc;
+}
+
 static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 				  const struct drm_display_mode *mode,
 				  struct drm_display_mode *adjusted_mode)
@@ -402,10 +458,10 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 	int rc = 0;
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
 	struct dsi_display *display;
-	struct dsi_display_mode dsi_mode, cur_dsi_mode, *panel_dsi_mode;
+	struct dsi_display_mode dsi_mode, *panel_dsi_mode;
 	struct drm_crtc_state *crtc_state;
 	struct drm_connector_state *drm_conn_state;
-	struct sde_connector_state *conn_state, *old_conn_state;
+	struct sde_connector_state *conn_state;
 	struct msm_sub_mode new_sub_mode;
 
 	crtc_state = container_of(mode, struct drm_crtc_state, mode);
@@ -481,49 +537,10 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
 		return false;
 	}
 
-	if (bridge->encoder && bridge->encoder->crtc &&
-			crtc_state->crtc) {
-		const struct drm_display_mode *cur_mode =
-				&crtc_state->crtc->state->mode;
-		old_conn_state = to_sde_connector_state(display->drm_conn->state);
-
-		convert_to_dsi_mode(cur_mode, &cur_dsi_mode);
-		msm_parse_mode_priv_info(&old_conn_state->msm_mode, &cur_dsi_mode);
-
-		rc = dsi_display_validate_mode_change(c_bridge->display,
-					&cur_dsi_mode, &dsi_mode);
-		if (rc) {
-			DSI_ERR("[%s] seamless mode mismatch failure rc=%d\n",
-				c_bridge->display->name, rc);
-			return false;
-		}
-
-		/*
-		 * DMS Flag if set during active changed condition cannot be
-		 * treated as seamless. Hence, removing DMS flag in such cases.
-		 */
-		if ((dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DMS) &&
-				crtc_state->active_changed)
-			dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DMS;
-
-		/* No DMS/VRR when drm pipeline is changing */
-		if (!dsi_display_mode_match(&cur_dsi_mode, &dsi_mode,
-			DSI_MODE_MATCH_FULL_TIMINGS) &&
-			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR)) &&
-			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK)) &&
-			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS_TO_VID)) &&
-			(!(dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_POMS_TO_CMD)) &&
-			(!crtc_state->active_changed ||
-			 display->is_cont_splash_enabled)) {
-			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
-
-			SDE_EVT32(SDE_EVTLOG_FUNC_CASE2,
-				dsi_mode.timing.h_active,
-				dsi_mode.timing.v_active,
-				dsi_mode.timing.refresh_rate,
-				dsi_mode.pixel_clk_khz,
-				dsi_mode.panel_mode_caps);
-		}
+	rc = _dsi_bridge_mode_validate_and_fixup(bridge, crtc_state, display, &dsi_mode);
+	if (rc) {
+		DSI_ERR("[%s] failed to validate dsi bridge mode.\n", display->name);
+		return false;
 	}
 
 	/* Reject seamless transition when active changed */
@@ -604,7 +621,7 @@ int dsi_conn_get_mode_info(struct drm_connector *connector,
 
 	convert_to_dsi_mode(drm_mode, &partial_dsi_mode);
 	rc = dsi_display_find_mode(dsi_display, &partial_dsi_mode, sub_mode, &dsi_mode);
-	if (rc || !dsi_mode->priv_info)
+	if (rc || !dsi_mode->priv_info || !dsi_display || !dsi_display->panel)
 		return -EINVAL;
 
 	memset(mode_info, 0, sizeof(*mode_info));
@@ -617,10 +634,18 @@ int dsi_conn_get_mode_info(struct drm_connector *connector,
 	mode_info->jitter_denom = dsi_mode->priv_info->panel_jitter_denom;
 	mode_info->dfps_maxfps = dsi_drm_get_dfps_maxfps(display);
 	mode_info->panel_mode_caps = dsi_mode->panel_mode_caps;
-	mode_info->mdp_transfer_time_us =
-		dsi_mode->priv_info->mdp_transfer_time_us;
+	mode_info->mdp_transfer_time_us = dsi_mode->priv_info->mdp_transfer_time_us;
+	mode_info->mdp_transfer_time_us_min = dsi_mode->priv_info->mdp_transfer_time_us_min;
+	mode_info->mdp_transfer_time_us_max = dsi_mode->priv_info->mdp_transfer_time_us_max;
 	mode_info->disable_rsc_solver = dsi_mode->priv_info->disable_rsc_solver;
 	mode_info->qsync_min_fps = dsi_mode->timing.qsync_min_fps;
+	mode_info->wd_jitter = dsi_mode->priv_info->wd_jitter;
+
+	mode_info->vpadding = dsi_display->panel->host_config.vpadding;
+	if (mode_info->vpadding < drm_mode->vdisplay) {
+		mode_info->vpadding = 0;
+		dsi_display->panel->host_config.line_insertion_enable = 0;
+	}
 
 	memcpy(&mode_info->topology, &dsi_mode->priv_info->topology,
 			sizeof(struct msm_display_topology));
@@ -1367,7 +1392,8 @@ struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
 	bridge->base.funcs = &dsi_bridge_ops;
 	bridge->base.encoder = encoder;
 
-	rc = drm_bridge_attach(encoder, &bridge->base, NULL, 0);
+	rc = drm_bridge_attach(encoder, &bridge->base, NULL,
+				DRM_BRIDGE_ATTACH_NO_CONNECTOR);
 	if (rc) {
 		DSI_ERR("failed to attach bridge, rc=%d\n", rc);
 		goto error_free_bridge;

+ 102 - 14
msm/dsi/dsi_panel.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -781,6 +782,7 @@ static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
 	u64 tmp64 = 0;
 	struct dsi_display_mode *display_mode;
 	struct dsi_display_mode_priv_info *priv_info;
+	u32 usecs_fps = 0;
 
 	display_mode = container_of(mode, struct dsi_display_mode, timing);
 
@@ -803,11 +805,22 @@ static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
 
 	rc = utils->read_u32(utils->data, "qcom,mdss-mdp-transfer-time-us",
 				&mode->mdp_transfer_time_us);
-	if (!rc)
-		display_mode->priv_info->mdp_transfer_time_us =
-			mode->mdp_transfer_time_us;
-	else
-		display_mode->priv_info->mdp_transfer_time_us = 0;
+	if (rc)
+		mode->mdp_transfer_time_us = 0;
+
+	rc = utils->read_u32(utils->data, "qcom,mdss-mdp-transfer-time-us-min",
+				&priv_info->mdp_transfer_time_us_min);
+	if (rc)
+		priv_info->mdp_transfer_time_us_min = 0;
+	else if (!rc && mode->mdp_transfer_time_us < priv_info->mdp_transfer_time_us_min)
+		mode->mdp_transfer_time_us = priv_info->mdp_transfer_time_us_min;
+
+	rc = utils->read_u32(utils->data, "qcom,mdss-mdp-transfer-time-us-max",
+				&priv_info->mdp_transfer_time_us_max);
+	if (rc)
+		priv_info->mdp_transfer_time_us_max = 0;
+	else if (!rc && mode->mdp_transfer_time_us > priv_info->mdp_transfer_time_us_max)
+		mode->mdp_transfer_time_us = priv_info->mdp_transfer_time_us_max;
 
 	priv_info->disable_rsc_solver = utils->read_bool(utils->data, "qcom,disable-rsc-solver");
 
@@ -820,6 +833,11 @@ static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
 		goto error;
 	}
 
+	usecs_fps = DIV_ROUND_UP((1 * 1000 * 1000), mode->refresh_rate);
+	if (mode->mdp_transfer_time_us > usecs_fps)
+		mode->mdp_transfer_time_us = 0;
+	priv_info->mdp_transfer_time_us = mode->mdp_transfer_time_us;
+
 	rc = utils->read_u32(utils->data, "qcom,mdss-dsi-panel-width",
 				  &mode->h_active);
 	if (rc) {
@@ -945,6 +963,9 @@ static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
 	case 18:
 		fmt = DSI_PIXEL_FORMAT_RGB666;
 		break;
+	case 30:
+		fmt = DSI_PIXEL_FORMAT_RGB101010;
+		break;
 	case 24:
 	default:
 		fmt = DSI_PIXEL_FORMAT_RGB888;
@@ -1158,7 +1179,8 @@ static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
 		host->dma_sched_window = 0;
 	else
 		host->dma_sched_window = window;
-
+	rc = utils->read_u32(utils->data, "qcom,vert-padding-value", &host->vpadding);
+	host->line_insertion_enable = (rc || host->vpadding <= 0) ? false : true;
 	DSI_DEBUG("[%s] DMA scheduling parameters Line: %d Window: %d\n", name,
 			host->dma_sched_line, host->dma_sched_window);
 	return 0;
@@ -1326,8 +1348,10 @@ static int dsi_panel_parse_qsync_caps(struct dsi_panel *panel,
 	 */
 	qsync_caps->qsync_min_fps_list_len = utils->count_u32_elems(utils->data,
 				  "qcom,dsi-supported-qsync-min-fps-list");
-	if (qsync_caps->qsync_min_fps_list_len < 1)
+	if (qsync_caps->qsync_min_fps_list_len < 1) {
+		qsync_caps->qsync_min_fps_list_len = 0;
 		goto qsync_support;
+	}
 
 	/**
 	 * qcom,dsi-supported-qsync-min-fps-list cannot be defined
@@ -1763,6 +1787,9 @@ static int dsi_panel_parse_panel_mode(struct dsi_panel *panel)
 					"qcom,poms-align-panel-vsync");
 	panel->panel_mode = panel_mode;
 	panel->panel_mode_switch_enabled = panel_mode_switch_enabled;
+
+	panel->panel_ack_disabled = utils->read_bool(utils->data,
+					"qcom,panel-ack-disabled");
 error:
 	return rc;
 }
@@ -2197,16 +2224,71 @@ static int dsi_panel_parse_misc_features(struct dsi_panel *panel)
 	return 0;
 }
 
+static int dsi_panel_parse_wd_jitter_config(struct dsi_display_mode_priv_info *priv_info,
+		struct dsi_parser_utils *utils, u32 *jitter)
+{
+	int rc = 0;
+	struct msm_display_wd_jitter_config *wd_jitter = &priv_info->wd_jitter;
+	u32 ltj[DEFAULT_PANEL_JITTER_ARRAY_SIZE] = {0, 1};
+	u32 ltj_time = 0;
+	const u32 max_ltj = 10;
+
+	if (!(utils->read_bool(utils->data, "qcom,dsi-wd-jitter-enable"))) {
+		priv_info->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+		priv_info->panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
+		return 0;
+	}
+
+	rc = utils->read_u32_array(utils->data, "qcom,dsi-wd-ltj-max-jitter", ltj,
+			DEFAULT_PANEL_JITTER_ARRAY_SIZE);
+	rc |= utils->read_u32(utils->data, "qcom,dsi-wd-ltj-time-sec", &ltj_time);
+	if (rc || !ltj[1] || !ltj_time || (ltj[0] / ltj[1] >= max_ltj)) {
+		DSI_DEBUG("No valid long term jitter defined\n");
+		priv_info->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+		priv_info->panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
+		rc = -EINVAL;
+	} else {
+		wd_jitter->ltj_max_numer = ltj[0];
+		wd_jitter->ltj_max_denom = ltj[1];
+		wd_jitter->ltj_time_sec = ltj_time;
+		wd_jitter->jitter_type = MSM_DISPLAY_WD_LTJ_JITTER;
+	}
+
+	if (jitter[0] && jitter[1]) {
+		if (jitter[0] / jitter[1] > MAX_PANEL_JITTER) {
+			wd_jitter->inst_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+			wd_jitter->inst_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
+		} else {
+			wd_jitter->inst_jitter_numer = jitter[0];
+			wd_jitter->inst_jitter_denom = jitter[1];
+		}
+		wd_jitter->jitter_type |= MSM_DISPLAY_WD_INSTANTANEOUS_JITTER;
+	} else if (rc) {
+		wd_jitter->inst_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
+		wd_jitter->inst_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
+		wd_jitter->jitter_type |= MSM_DISPLAY_WD_INSTANTANEOUS_JITTER;
+	}
+
+	priv_info->panel_jitter_numer = rc ?
+			wd_jitter->inst_jitter_numer : wd_jitter->ltj_max_numer;
+	priv_info->panel_jitter_denom = rc ?
+			wd_jitter->inst_jitter_denom : wd_jitter->ltj_max_denom;
+
+	return 0;
+}
+
 static int dsi_panel_parse_jitter_config(
 				struct dsi_display_mode *mode,
 				struct dsi_parser_utils *utils)
 {
 	int rc;
 	struct dsi_display_mode_priv_info *priv_info;
+	struct dsi_panel *panel;
 	u32 jitter[DEFAULT_PANEL_JITTER_ARRAY_SIZE] = {0, 0};
 	u64 jitter_val = 0;
 
 	priv_info = mode->priv_info;
+	panel = container_of(utils, struct dsi_panel, utils);
 
 	rc = utils->read_u32_array(utils->data, "qcom,mdss-dsi-panel-jitter",
 				jitter, DEFAULT_PANEL_JITTER_ARRAY_SIZE);
@@ -2217,10 +2299,11 @@ static int dsi_panel_parse_jitter_config(
 		jitter_val = div_u64(jitter_val, jitter[1]);
 	}
 
-	if (rc || !jitter_val || (jitter_val > MAX_PANEL_JITTER)) {
+	if (panel->te_using_watchdog_timer) {
+		dsi_panel_parse_wd_jitter_config(priv_info, utils, jitter);
+	} else if (rc || !jitter_val || (jitter_val > MAX_PANEL_JITTER)) {
 		priv_info->panel_jitter_numer = DEFAULT_PANEL_JITTER_NUMERATOR;
-		priv_info->panel_jitter_denom =
-					DEFAULT_PANEL_JITTER_DENOMINATOR;
+		priv_info->panel_jitter_denom = DEFAULT_PANEL_JITTER_DENOMINATOR;
 	} else {
 		priv_info->panel_jitter_numer = jitter[0];
 		priv_info->panel_jitter_denom = jitter[1];
@@ -3935,6 +4018,7 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 	struct dsi_mode_info *timing = &mode->timing;
 	struct dsi_display_mode *display_mode;
 	u32 jitter_numer, jitter_denom, prefill_lines;
+	u32 default_prefill_lines, actual_prefill_lines, vtotal;
 	u32 min_threshold_us, prefill_time_us, max_transfer_us, packet_overhead;
 	u16 bpp;
 
@@ -3998,11 +4082,15 @@ void dsi_panel_calc_dsi_transfer_time(struct dsi_host_common_cfg *config,
 	 * Increase the prefill_lines proportionately as recommended
 	 * 40lines for 60fps, 60 for 90fps, 120lines for 120fps, and so on.
 	 */
-	prefill_lines = mult_frac(MIN_PREFILL_LINES,
-			timing->refresh_rate, 60);
+	default_prefill_lines = mult_frac(MIN_PREFILL_LINES, timing->refresh_rate, 60);
+
+	actual_prefill_lines = timing->v_back_porch + timing->v_front_porch + timing->v_sync_width;
+	vtotal = actual_prefill_lines + timing->v_active;
+
+	/* consider the max of default prefill lines and actual prefill lines */
+	prefill_lines = max(actual_prefill_lines, default_prefill_lines);
 
-	prefill_time_us = mult_frac(frame_time_us, prefill_lines,
-			(timing->v_active));
+	prefill_time_us = mult_frac(frame_time_us, prefill_lines, vtotal);
 
 	min_threshold_us = min_threshold_us + prefill_time_us;
 

+ 2 - 0
msm/dsi/dsi_panel.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -215,6 +216,7 @@ struct dsi_panel {
 	const char *type;
 	struct device_node *panel_of_node;
 	struct mipi_dsi_device mipi_device;
+	bool panel_ack_disabled;
 
 	struct mutex panel_lock;
 	struct drm_panel drm_panel;

+ 2 - 1
msm/dsi/dsi_parser.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -10,7 +11,7 @@
 #include <linux/of_gpio.h>
 #include <linux/version.h>
 
-#ifdef CONFIG_DSI_PARSER
+#if IS_ENABLED(CONFIG_DSI_PARSER)
 void *dsi_parser_get(struct device *dev);
 void dsi_parser_put(void *data);
 int dsi_parser_dbg_init(void *parser, struct dentry *dir);

+ 11 - 0
msm/dsi/dsi_phy.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/of_device.h>
@@ -71,6 +72,14 @@ static const struct dsi_ver_spec_info dsi_phy_v4_3 = {
 	.timing_cfg_count = 14,
 };
 
+static const struct dsi_ver_spec_info dsi_phy_v4_3_2 = {
+	.version = DSI_PHY_VERSION_4_3_2,
+	.lane_cfg_count = 4,
+	.strength_cfg_count = 2,
+	.regulator_cfg_count = 0,
+	.timing_cfg_count = 14,
+};
+
 static const struct dsi_ver_spec_info dsi_phy_v5_2 = {
 	.version = DSI_PHY_VERSION_5_2,
 	.lane_cfg_count = 4,
@@ -90,6 +99,8 @@ static const struct of_device_id msm_dsi_phy_of_match[] = {
 	  .data = &dsi_phy_v4_2,},
 	{ .compatible = "qcom,dsi-phy-v4.3",
 	  .data = &dsi_phy_v4_3,},
+	{ .compatible = "qcom,dsi-phy-v4.3.2",
+	  .data = &dsi_phy_v4_3_2,},
 	{ .compatible = "qcom,dsi-phy-v5.2",
 	  .data = &dsi_phy_v5_2,},
 	{}

+ 5 - 0
msm/dsi/dsi_phy_hw.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _DSI_PHY_HW_H_
@@ -34,6 +35,7 @@
  * @DSI_PHY_VERSION_4_1:	7nm
  * @DSI_PHY_VERSION_4_2:        5nm
  * @DSI_PHY_VERSION_4_3:        5nm
+ * @DSI_PHY_VERSION_4_3_2:	4nm (v4.3 specific to SM8475)
  * @DSI_PHY_VERSION_5_2:        4nm
  * @DSI_PHY_VERSION_MAX:
  */
@@ -44,17 +46,20 @@ enum dsi_phy_version {
 	DSI_PHY_VERSION_4_1, /* 7nm */
 	DSI_PHY_VERSION_4_2, /* 5nm */
 	DSI_PHY_VERSION_4_3, /* 5nm */
+	DSI_PHY_VERSION_4_3_2, /* 4nm */
 	DSI_PHY_VERSION_5_2, /* 4nm */
 	DSI_PHY_VERSION_MAX
 };
 
 /**
  * enum dsi_pll_version - DSI PHY PLL version enumeration
+ * @DSI_PLL_VERSION_4NM:        4nm PLL
  * @DSI_PLL_VERSION_5NM:        5nm PLL
  * @DSI_PLL_VERSION_10NM:	10nm PLL
  * @DSI_PLL_VERSION_UNKNOWN:	Unknown PLL version
  */
 enum dsi_pll_version {
+	DSI_PLL_VERSION_4NM,
 	DSI_PLL_VERSION_5NM,
 	DSI_PLL_VERSION_10NM,
 	DSI_PLL_VERSION_UNKNOWN

+ 16 - 3
msm/dsi/dsi_phy_hw_v4_0.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/math64.h>
@@ -247,6 +248,7 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 	u32 minor_ver = 0;
 	/* For C-PHY, no low power settings for lower clk rate */
 	u32 vreg_ctrl_0 = 0x51;
+	u32 vreg_ctrl_1 = 0x55;
 	u32 glbl_str_swi_cal_sel_ctrl = 0;
 	u32 glbl_hstx_str_ctrl_0 = 0;
 	u32 glbl_rescode_top_ctrl = 0;
@@ -272,6 +274,11 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 		glbl_rescode_bot_ctrl = 0x3c;
 	}
 
+	if (phy->version == DSI_PHY_VERSION_4_3_2) {
+		vreg_ctrl_0 = 0x45;
+		vreg_ctrl_1 = 0x41;
+	}
+
 	/* de-assert digital and pll power down */
 	data = BIT(6) | BIT(5);
 	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
@@ -295,7 +302,7 @@ static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy,
 
 	/* Enable LDO */
 	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x55);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
 					glbl_str_swi_cal_sel_ctrl);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
@@ -356,6 +363,7 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 	u32 minor_ver = 0;
 	bool less_than_1500_mhz = false;
 	u32 vreg_ctrl_0 = 0;
+	u32 vreg_ctrl_1 = 0x5c;
 	u32 glbl_str_swi_cal_sel_ctrl = 0;
 	u32 glbl_hstx_str_ctrl_0 = 0;
 	u32 glbl_rescode_top_ctrl = 0;
@@ -390,6 +398,11 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 	if (phy->version >= DSI_PHY_VERSION_4_3)
 		glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
 
+	if (phy->version == DSI_PHY_VERSION_4_3_2){
+		vreg_ctrl_0 = 0x44;
+		vreg_ctrl_1 = 0x19;
+	}
+
 	split_link_enabled = cfg->split_link.enabled;
 	lanes_per_sublink = cfg->split_link.lanes_per_sublink;
 	/* de-assert digital and pll power down */
@@ -418,7 +431,7 @@ static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy,
 
 	/* Enable LDO */
 	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
-	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x5c);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
 	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x00);
 	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
 					glbl_str_swi_cal_sel_ctrl);
@@ -491,7 +504,7 @@ void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
 		pr_warn("PLL turned on before configuring PHY\n");
 
 	/* Request for REFGEN ready */
-	if (phy->version == DSI_PHY_VERSION_4_3) {
+	if (phy->version >= DSI_PHY_VERSION_4_3) {
 		DSI_W32(phy, DSIPHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
 		udelay(500);
 	}

+ 889 - 0
msm/dsi/dsi_phy_hw_v5_0.c

@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include "dsi_hw.h"
+#include "dsi_defs.h"
+#include "dsi_phy_hw.h"
+#include "dsi_catalog.h"
+
+#define DSIPHY_CMN_REVISION_ID0                                   0x000
+#define DSIPHY_CMN_REVISION_ID1                                   0x004
+#define DSIPHY_CMN_REVISION_ID2                                   0x008
+#define DSIPHY_CMN_REVISION_ID3                                   0x00C
+#define DSIPHY_CMN_CLK_CFG0                                       0x010
+#define DSIPHY_CMN_CLK_CFG1                                       0x014
+#define DSIPHY_CMN_GLBL_CTRL                                      0x018
+#define DSIPHY_CMN_RBUF_CTRL                                      0x01C
+#define DSIPHY_CMN_VREG_CTRL_0                                    0x020
+#define DSIPHY_CMN_CTRL_0                                         0x024
+#define DSIPHY_CMN_CTRL_1                                         0x028
+#define DSIPHY_CMN_CTRL_2                                         0x02C
+#define DSIPHY_CMN_CTRL_3                                         0x030
+#define DSIPHY_CMN_LANE_CFG0                                      0x034
+#define DSIPHY_CMN_LANE_CFG1                                      0x038
+#define DSIPHY_CMN_PLL_CNTRL                                      0x03C
+#define DSIPHY_CMN_DPHY_SOT                                       0x040
+#define DSIPHY_CMN_LANE_CTRL0                                     0x0A0
+#define DSIPHY_CMN_LANE_CTRL1                                     0x0A4
+#define DSIPHY_CMN_LANE_CTRL2                                     0x0A8
+#define DSIPHY_CMN_LANE_CTRL3                                     0x0AC
+#define DSIPHY_CMN_LANE_CTRL4                                     0x0B0
+#define DSIPHY_CMN_TIMING_CTRL_0                                  0x0B4
+#define DSIPHY_CMN_TIMING_CTRL_1                                  0x0B8
+#define DSIPHY_CMN_TIMING_CTRL_2                                  0x0Bc
+#define DSIPHY_CMN_TIMING_CTRL_3                                  0x0C0
+#define DSIPHY_CMN_TIMING_CTRL_4                                  0x0C4
+#define DSIPHY_CMN_TIMING_CTRL_5                                  0x0C8
+#define DSIPHY_CMN_TIMING_CTRL_6                                  0x0CC
+#define DSIPHY_CMN_TIMING_CTRL_7                                  0x0D0
+#define DSIPHY_CMN_TIMING_CTRL_8                                  0x0D4
+#define DSIPHY_CMN_TIMING_CTRL_9                                  0x0D8
+#define DSIPHY_CMN_TIMING_CTRL_10                                 0x0DC
+#define DSIPHY_CMN_TIMING_CTRL_11                                 0x0E0
+#define DSIPHY_CMN_TIMING_CTRL_12                                 0x0E4
+#define DSIPHY_CMN_TIMING_CTRL_13                                 0x0E8
+#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0                           0x0EC
+#define DSIPHY_CMN_GLBL_HSTX_STR_CTRL_1                           0x0F0
+#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL                   0x0F4
+#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL                   0x0F8
+#define DSIPHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL                   0x0FC
+#define DSIPHY_CMN_GLBL_LPTX_STR_CTRL                             0x100
+#define DSIPHY_CMN_GLBL_PEMPH_CTRL_0                              0x104
+#define DSIPHY_CMN_GLBL_PEMPH_CTRL_1                              0x108
+#define DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL                      0x10C
+#define DSIPHY_CMN_VREG_CTRL_1                                    0x110
+#define DSIPHY_CMN_CTRL_4                                         0x114
+#define DSIPHY_CMN_PHY_STATUS                                     0x140
+#define DSIPHY_CMN_LANE_STATUS0                                   0x148
+#define DSIPHY_CMN_LANE_STATUS1                                   0x14C
+#define DSIPHY_CMN_GLBL_DIGTOP_SPARE10                            0x1AC
+#define DSIPHY_CMN_SL_DSI_LANE_CTRL1                              0x1B4
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_LNX_CFG0(n)                         (0x200 + (0x80 * (n)))
+#define DSIPHY_LNX_CFG1(n)                         (0x204 + (0x80 * (n)))
+#define DSIPHY_LNX_CFG2(n)                         (0x208 + (0x80 * (n)))
+#define DSIPHY_LNX_TEST_DATAPATH(n)                (0x20C + (0x80 * (n)))
+#define DSIPHY_LNX_PIN_SWAP(n)                     (0x210 + (0x80 * (n)))
+#define DSIPHY_LNX_LPRX_CTRL(n)                    (0x214 + (0x80 * (n)))
+#define DSIPHY_LNX_TX_DCTRL(n)                     (0x218 + (0x80 * (n)))
+
+/* dynamic refresh control registers */
+#define DSI_DYN_REFRESH_CTRL                   (0x000)
+#define DSI_DYN_REFRESH_PIPE_DELAY             (0x004)
+#define DSI_DYN_REFRESH_PIPE_DELAY2            (0x008)
+#define DSI_DYN_REFRESH_PLL_DELAY              (0x00C)
+#define DSI_DYN_REFRESH_STATUS                 (0x010)
+#define DSI_DYN_REFRESH_PLL_CTRL0              (0x014)
+#define DSI_DYN_REFRESH_PLL_CTRL1              (0x018)
+#define DSI_DYN_REFRESH_PLL_CTRL2              (0x01C)
+#define DSI_DYN_REFRESH_PLL_CTRL3              (0x020)
+#define DSI_DYN_REFRESH_PLL_CTRL4              (0x024)
+#define DSI_DYN_REFRESH_PLL_CTRL5              (0x028)
+#define DSI_DYN_REFRESH_PLL_CTRL6              (0x02C)
+#define DSI_DYN_REFRESH_PLL_CTRL7              (0x030)
+#define DSI_DYN_REFRESH_PLL_CTRL8              (0x034)
+#define DSI_DYN_REFRESH_PLL_CTRL9              (0x038)
+#define DSI_DYN_REFRESH_PLL_CTRL10             (0x03C)
+#define DSI_DYN_REFRESH_PLL_CTRL11             (0x040)
+#define DSI_DYN_REFRESH_PLL_CTRL12             (0x044)
+#define DSI_DYN_REFRESH_PLL_CTRL13             (0x048)
+#define DSI_DYN_REFRESH_PLL_CTRL14             (0x04C)
+#define DSI_DYN_REFRESH_PLL_CTRL15             (0x050)
+#define DSI_DYN_REFRESH_PLL_CTRL16             (0x054)
+#define DSI_DYN_REFRESH_PLL_CTRL17             (0x058)
+#define DSI_DYN_REFRESH_PLL_CTRL18             (0x05C)
+#define DSI_DYN_REFRESH_PLL_CTRL19             (0x060)
+#define DSI_DYN_REFRESH_PLL_CTRL20             (0x064)
+#define DSI_DYN_REFRESH_PLL_CTRL21             (0x068)
+#define DSI_DYN_REFRESH_PLL_CTRL22             (0x06C)
+#define DSI_DYN_REFRESH_PLL_CTRL23             (0x070)
+#define DSI_DYN_REFRESH_PLL_CTRL24             (0x074)
+#define DSI_DYN_REFRESH_PLL_CTRL25             (0x078)
+#define DSI_DYN_REFRESH_PLL_CTRL26             (0x07C)
+#define DSI_DYN_REFRESH_PLL_CTRL27             (0x080)
+#define DSI_DYN_REFRESH_PLL_CTRL28             (0x084)
+#define DSI_DYN_REFRESH_PLL_CTRL29             (0x088)
+#define DSI_DYN_REFRESH_PLL_CTRL30             (0x08C)
+#define DSI_DYN_REFRESH_PLL_CTRL31             (0x090)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR         (0x094)
+#define DSI_DYN_REFRESH_PLL_UPPER_ADDR2        (0x098)
+
+static int dsi_phy_hw_v5_0_is_pll_on(struct dsi_phy_hw *phy)
+{
+	u32 data = 0;
+
+	data = DSI_R32(phy, DSIPHY_CMN_PLL_CNTRL);
+	mb(); /*make sure read happened */
+	return (data & BIT(0));
+}
+
+static bool dsi_phy_hw_v5_0_is_split_link_enabled(struct dsi_phy_hw *phy)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(phy, DSIPHY_CMN_GLBL_CTRL);
+	mb(); /*make sure read happened */
+	return (reg & BIT(5));
+}
+
+static void dsi_phy_hw_v5_0_config_lpcdrx(struct dsi_phy_hw *phy,
+	struct dsi_phy_cfg *cfg, bool enable)
+{
+	int phy_lane_0 = dsi_phy_conv_logical_to_phy_lane(&cfg->lane_map, DSI_LOGICAL_LANE_0);
+
+	/*
+	 * LPRX and CDRX need to enabled only for physical data lane
+	 * corresponding to the logical data lane 0
+	 */
+
+	if (enable)
+		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), cfg->strength.lane[phy_lane_0][1]);
+	else
+		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v5_0_lane_swap_config(struct dsi_phy_hw *phy,
+		struct dsi_lane_map *lane_map)
+{
+	DSI_W32(phy, DSIPHY_CMN_LANE_CFG0,
+		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_0] |
+		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_1] << 4)));
+	DSI_W32(phy, DSIPHY_CMN_LANE_CFG1,
+		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_2] |
+		(lane_map->lane_map_v2[DSI_LOGICAL_LANE_3] << 4)));
+}
+
+static void dsi_phy_hw_v5_0_lane_settings(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	int i;
+	u8 tx_dctrl[] = {0x40, 0x40, 0x40, 0x46, 0x41};
+	bool split_link_enabled;
+	u32 lanes_per_sublink;
+
+	split_link_enabled = cfg->split_link.enabled;
+	lanes_per_sublink = cfg->split_link.lanes_per_sublink;
+
+	/* Strength ctrl settings */
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		/*
+		 * Disable LPRX and CDRX for all lanes. And later on, it will
+		 * be only enabled for the physical data lane corresponding
+		 * to the logical data lane 0
+		 */
+		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
+		DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
+	}
+	dsi_phy_hw_v5_0_config_lpcdrx(phy, cfg, true);
+
+	/* other settings */
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		DSI_W32(phy, DSIPHY_LNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+		DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+		DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+		DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
+	}
+
+	/* remove below check if cphy splitlink is enabled */
+	if (split_link_enabled && (cfg->phy_type == DSI_PHY_TYPE_CPHY))
+		return;
+
+	/* Configure the splitlink clock lane with clk lane settings */
+	if (split_link_enabled) {
+		DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(5), 0x0);
+		DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(5), 0x0);
+		DSI_W32(phy, DSIPHY_LNX_CFG0(5), cfg->lanecfg.lane[4][0]);
+		DSI_W32(phy, DSIPHY_LNX_CFG1(5), cfg->lanecfg.lane[4][1]);
+		DSI_W32(phy, DSIPHY_LNX_CFG2(5), cfg->lanecfg.lane[4][2]);
+		DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(5), tx_dctrl[4]);
+	}
+}
+
+void dsi_phy_hw_v5_0_commit_phy_timing(struct dsi_phy_hw *phy,
+		struct dsi_phy_per_lane_cfgs *timing)
+{
+	/* Commit DSI PHY timings */
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_1, timing->lane_v4[1]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_2, timing->lane_v4[2]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_3, timing->lane_v4[3]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_12, timing->lane_v4[12]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_13, timing->lane_v4[13]);
+}
+
+/**
+ * cphy_enable() - Enable CPHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+static void dsi_phy_hw_cphy_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg)
+{
+	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+	u32 data;
+	/* For C-PHY, no low power settings for lower clk rate */
+	u32 glbl_str_swi_cal_sel_ctrl = 0;
+	u32 glbl_hstx_str_ctrl_0 = 0;
+
+	/* de-assert digital and pll power down */
+	data = BIT(6) | BIT(5);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
+
+	/* Assert PLL core reset */
+	DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
+
+	/* turn off resync FIFO */
+	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
+
+	/* program CMN_CTRL_4 for minor_ver greater than 2 chipsets*/
+	DSI_W32(phy, DSIPHY_CMN_CTRL_4, 0x04);
+
+	/* Configure PHY lane swap */
+	dsi_phy_hw_v5_0_lane_swap_config(phy, &cfg->lane_map);
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_CTRL, BIT(6));
+
+	/* Enable LDO */
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, 0x45);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x41);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL, glbl_str_swi_cal_sel_ctrl);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_PEMPH_CTRL_0, 0x11);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL, 0x00);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL, 0x00);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+	/* Remove power down from all blocks */
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
+
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x17);
+
+	switch (cfg->pll_source) {
+	case DSI_PLL_SOURCE_STANDALONE:
+	case DSI_PLL_SOURCE_NATIVE:
+		data = 0x0; /* internal PLL */
+		break;
+	case DSI_PLL_SOURCE_NON_NATIVE:
+		data = 0x1; /* external PLL */
+		break;
+	default:
+		break;
+	}
+	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
+
+	/* DSI PHY timings */
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_0, timing->lane_v4[0]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_4, timing->lane_v4[4]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_5, timing->lane_v4[5]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_6, timing->lane_v4[6]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_7, timing->lane_v4[7]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_8, timing->lane_v4[8]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_9, timing->lane_v4[9]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_10, timing->lane_v4[10]);
+	DSI_W32(phy, DSIPHY_CMN_TIMING_CTRL_11, timing->lane_v4[11]);
+
+	/* DSI lane settings */
+	dsi_phy_hw_v5_0_lane_settings(phy, cfg);
+
+	DSI_PHY_DBG(phy, "C-Phy enabled\n");
+}
+
+/**
+ * dphy_enable() - Enable DPHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+static void dsi_phy_hw_dphy_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg)
+{
+	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+	u32 data;
+	bool less_than_1500_mhz = false;
+	u32 vreg_ctrl_0 = 0;
+	u32 glbl_str_swi_cal_sel_ctrl = 0;
+	u32 glbl_hstx_str_ctrl_0 = 0;
+	u32 glbl_rescode_top_ctrl = 0;
+	u32 glbl_rescode_bot_ctrl = 0;
+	bool split_link_enabled;
+	u32 lanes_per_sublink;
+
+	/* Alter PHY configurations if data rate less than 1.5GHZ*/
+	if (cfg->bit_clk_rate_hz <= 1500000000)
+		less_than_1500_mhz = true;
+
+	vreg_ctrl_0 = 0x44;
+	glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
+	glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
+	glbl_str_swi_cal_sel_ctrl = 0x00;
+	glbl_hstx_str_ctrl_0 = 0x88;
+
+
+	split_link_enabled = cfg->split_link.enabled;
+	lanes_per_sublink = cfg->split_link.lanes_per_sublink;
+	/* de-assert digital and pll power down */
+	data = BIT(6) | BIT(5);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
+
+	if (split_link_enabled) {
+		data = DSI_R32(phy, DSIPHY_CMN_GLBL_CTRL);
+		/* set SPLIT_LINK_ENABLE in global control */
+		DSI_W32(phy, DSIPHY_CMN_GLBL_CTRL, (data | BIT(5)));
+	}
+	/* Assert PLL core reset */
+	DSI_W32(phy, DSIPHY_CMN_PLL_CNTRL, 0x00);
+
+	/* turn off resync FIFO */
+	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
+
+	/* program CMN_CTRL_4 for minor_ver greater than 2 chipsets*/
+	DSI_W32(phy, DSIPHY_CMN_CTRL_4, 0x04);
+
+	/* Configure PHY lane swap */
+	dsi_phy_hw_v5_0_lane_swap_config(phy, &cfg->lane_map);
+
+	/* Enable LDO */
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+	DSI_W32(phy, DSIPHY_CMN_VREG_CTRL_1, 0x19);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x00);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+					glbl_str_swi_cal_sel_ctrl);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_HSTX_STR_CTRL_0, glbl_hstx_str_ctrl_0);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+			glbl_rescode_top_ctrl);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+			glbl_rescode_bot_ctrl);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+	if (split_link_enabled) {
+		if (lanes_per_sublink == 1) {
+			/* remove Lane1 and Lane3 configs */
+			DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0xed);
+			DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x35);
+		} else {
+			/* enable all together with sublink clock */
+			DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0xff);
+			DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x3F);
+		}
+
+		DSI_W32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1, 0x03);
+	} else {
+		/* Remove power down from all blocks */
+		DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x7f);
+		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0x1F);
+	}
+
+	/* Select full-rate mode */
+	DSI_W32(phy, DSIPHY_CMN_CTRL_2, 0x40);
+
+	switch (cfg->pll_source) {
+	case DSI_PLL_SOURCE_STANDALONE:
+	case DSI_PLL_SOURCE_NATIVE:
+		data = 0x0; /* internal PLL */
+		break;
+	case DSI_PLL_SOURCE_NON_NATIVE:
+		data = 0x1; /* external PLL */
+		break;
+	default:
+		break;
+	}
+	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, (data << 2)); /* set PLL src */
+
+	/* DSI PHY timings */
+	dsi_phy_hw_v5_0_commit_phy_timing(phy, timing);
+
+	/* DSI lane settings */
+	dsi_phy_hw_v5_0_lane_settings(phy, cfg);
+
+	DSI_PHY_DBG(phy, "D-Phy enabled\n");
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+void dsi_phy_hw_v5_0_enable(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	int rc = 0;
+	u32 status;
+	u32 const delay_us = 5;
+	u32 const timeout_us = 1000;
+
+	if (dsi_phy_hw_v5_0_is_pll_on(phy))
+		DSI_PHY_WARN(phy, "PLL turned on before configuring PHY\n");
+
+	/* Request for REFGEN ready */
+	DSI_W32(phy, DSIPHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
+	udelay(500);
+
+	/* wait for REFGEN READY */
+	rc = DSI_READ_POLL_TIMEOUT_ATOMIC(phy, DSIPHY_CMN_PHY_STATUS,
+		status, (status & BIT(0)), delay_us, timeout_us);
+	if (rc) {
+		DSI_PHY_ERR(phy, "Ref gen not ready. Aborting\n");
+		return;
+	}
+
+	if (cfg->phy_type == DSI_PHY_TYPE_CPHY)
+		dsi_phy_hw_cphy_enable(phy, cfg);
+	else /* Default PHY type is DPHY */
+		dsi_phy_hw_dphy_enable(phy, cfg);
+
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v5_0_disable(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	u32 data = 0;
+
+	if (dsi_phy_hw_v5_0_is_pll_on(phy))
+		DSI_PHY_WARN(phy, "Turning OFF PHY while PLL is on\n");
+
+	dsi_phy_hw_v5_0_config_lpcdrx(phy, cfg, false);
+
+	/* Turn off REFGEN Vote */
+	DSI_W32(phy, DSIPHY_CMN_GLBL_DIGTOP_SPARE10, 0x0);
+	wmb();
+	/* Delay to ensure HW removes vote before PHY shut down */
+	udelay(2);
+
+	data = DSI_R32(phy, DSIPHY_CMN_CTRL_0);
+	/* disable all lanes and splitlink clk lane*/
+	data &= ~0x9F;
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, data);
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL0, 0);
+
+	/* Turn off all PHY blocks */
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0x00);
+	/* make sure phy is turned off */
+	wmb();
+	DSI_PHY_DBG(phy, "Phy disabled\n");
+}
+
+void dsi_phy_hw_v5_0_toggle_resync_fifo(struct dsi_phy_hw *phy)
+{
+	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x00);
+	/* ensure that the FIFO is off */
+	wmb();
+	DSI_W32(phy, DSIPHY_CMN_RBUF_CTRL, 0x1);
+	/* ensure that the FIFO is toggled back on */
+	wmb();
+}
+
+void dsi_phy_hw_v5_0_reset_clk_en_sel(struct dsi_phy_hw *phy)
+{
+	u32 data = 0;
+
+	/*Turning off CLK_EN_SEL after retime buffer sync */
+	data = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
+	data &= ~BIT(4);
+	DSI_W32(phy, DSIPHY_CMN_CLK_CFG1, data);
+	/* ensure that clk_en_sel bit is turned off */
+	wmb();
+}
+
+int dsi_phy_hw_v5_0_wait_for_lane_idle(
+		struct dsi_phy_hw *phy, u32 lanes)
+{
+	int rc = 0, val = 0;
+	u32 stop_state_mask = 0;
+	u32 const sleep_us = 10;
+	u32 const timeout_us = 100;
+	bool split_link_enabled = dsi_phy_hw_v5_0_is_split_link_enabled(phy);
+
+	stop_state_mask = BIT(4); /* clock lane */
+	if (split_link_enabled)
+		stop_state_mask |= BIT(5);
+	if (lanes & DSI_DATA_LANE_0)
+		stop_state_mask |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		stop_state_mask |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		stop_state_mask |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		stop_state_mask |= BIT(3);
+
+	DSI_PHY_DBG(phy, "polling for lanes to be in stop state, mask=0x%08x\n", stop_state_mask);
+	rc = DSI_READ_POLL_TIMEOUT(phy, DSIPHY_CMN_LANE_STATUS1, val,
+				((val & stop_state_mask) == stop_state_mask),
+				sleep_us, timeout_us);
+	if (rc) {
+		DSI_PHY_ERR(phy, "lanes not in stop state, LANE_STATUS=0x%08x\n", val);
+		return rc;
+	}
+
+	return 0;
+}
+
+void dsi_phy_hw_v5_0_ulps_request(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg, u32 lanes)
+{
+	u32 reg = 0, sl_lane_ctrl1 = 0;
+
+	if (lanes & DSI_CLOCK_LANE)
+		reg = BIT(4);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(3);
+	if (cfg->split_link.enabled)
+		reg |= BIT(7);
+
+	if (cfg->force_clk_lane_hs) {
+		reg |= BIT(5) | BIT(6);
+		if (cfg->split_link.enabled) {
+			sl_lane_ctrl1 = DSI_R32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1);
+			sl_lane_ctrl1 |= BIT(2);
+			DSI_W32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1, sl_lane_ctrl1);
+		}
+	}
+
+	/*
+	 * ULPS entry request. Wait for short time to make sure
+	 * that the lanes enter ULPS. Recommended as per HPG.
+	 */
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
+	usleep_range(100, 110);
+
+	/* disable LPRX and CDRX */
+	dsi_phy_hw_v5_0_config_lpcdrx(phy, cfg, false);
+
+	DSI_PHY_DBG(phy, "ULPS requested for lanes 0x%x\n", lanes);
+}
+
+int dsi_phy_hw_v5_0_lane_reset(struct dsi_phy_hw *phy)
+{
+	int ret = 0, loop = 10, u_dly = 200;
+	u32 ln_status = 0;
+
+	while ((ln_status != 0x1f) && loop) {
+		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x1f);
+		wmb(); /* ensure register is committed */
+		loop--;
+		udelay(u_dly);
+		ln_status = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS1);
+		DSI_PHY_DBG(phy, "trial no: %d\n", loop);
+	}
+
+	if (!loop)
+		DSI_PHY_DBG(phy, "could not reset phy lanes\n");
+
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0x0);
+	wmb(); /* ensure register is committed */
+
+	return ret;
+}
+
+void dsi_phy_hw_v5_0_ulps_exit(struct dsi_phy_hw *phy,
+			struct dsi_phy_cfg *cfg, u32 lanes)
+{
+	u32 reg = 0, sl_lane_ctrl1 = 0;
+
+	if (lanes & DSI_CLOCK_LANE)
+		reg = BIT(4);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(3);
+	if (cfg->split_link.enabled)
+		reg |= BIT(5);
+
+	/* enable LPRX and CDRX */
+	dsi_phy_hw_v5_0_config_lpcdrx(phy, cfg, true);
+
+	/* ULPS exit request */
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, reg);
+	usleep_range(1000, 1010);
+
+	/* Clear ULPS request flags on all lanes */
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, 0);
+	/* Clear ULPS exit flags on all lanes */
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL2, 0);
+
+	/*
+	 * Sometimes when exiting ULPS, it is possible that some DSI
+	 * lanes are not in the stop state which could lead to DSI
+	 * commands not going through. To avoid this, force the lanes
+	 * to be in stop state.
+	 */
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, reg);
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL3, 0);
+	usleep_range(100, 110);
+
+	if (cfg->force_clk_lane_hs) {
+		reg = BIT(5) | BIT(6);
+		DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
+		if (cfg->split_link.enabled) {
+			sl_lane_ctrl1 = DSI_R32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1);
+			sl_lane_ctrl1 |= BIT(2);
+			DSI_W32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1, sl_lane_ctrl1);
+		}
+	}
+}
+
+u32 dsi_phy_hw_v5_0_get_lanes_in_ulps(struct dsi_phy_hw *phy)
+{
+	u32 lanes = 0;
+
+	lanes = DSI_R32(phy, DSIPHY_CMN_LANE_STATUS0);
+	DSI_PHY_DBG(phy, "lanes in ulps = 0x%x\n", lanes);
+	return lanes;
+}
+
+bool dsi_phy_hw_v5_0_is_lanes_in_ulps(u32 lanes, u32 ulps_lanes)
+{
+	if (lanes & ulps_lanes)
+		return false;
+
+	return true;
+}
+
+int dsi_phy_hw_timing_val_v5_0(struct dsi_phy_per_lane_cfgs *timing_cfg,
+		u32 *timing_val, u32 size)
+{
+	int i = 0;
+
+	if (size != DSI_PHY_TIMING_V4_SIZE) {
+		DSI_ERR("Unexpected timing array size %d\n", size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		timing_cfg->lane_v4[i] = timing_val[i];
+	return 0;
+}
+
+void dsi_phy_hw_v5_0_dyn_refresh_config(struct dsi_phy_hw *phy,
+					struct dsi_phy_cfg *cfg, bool is_master)
+{
+	u32 reg;
+	bool is_cphy = (cfg->phy_type == DSI_PHY_TYPE_CPHY) ? true : false;
+
+	if (is_master) {
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL19,
+				DSIPHY_CMN_TIMING_CTRL_0, DSIPHY_CMN_TIMING_CTRL_1,
+				cfg->timing.lane_v4[0], cfg->timing.lane_v4[1]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL20,
+				DSIPHY_CMN_TIMING_CTRL_2, DSIPHY_CMN_TIMING_CTRL_3,
+				cfg->timing.lane_v4[2], cfg->timing.lane_v4[3]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL21,
+				DSIPHY_CMN_TIMING_CTRL_4, DSIPHY_CMN_TIMING_CTRL_5,
+				cfg->timing.lane_v4[4], cfg->timing.lane_v4[5]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL22,
+				DSIPHY_CMN_TIMING_CTRL_6, DSIPHY_CMN_TIMING_CTRL_7,
+				cfg->timing.lane_v4[6], cfg->timing.lane_v4[7]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL23,
+				DSIPHY_CMN_TIMING_CTRL_8, DSIPHY_CMN_TIMING_CTRL_9,
+				cfg->timing.lane_v4[8], cfg->timing.lane_v4[9]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL24,
+				DSIPHY_CMN_TIMING_CTRL_10, DSIPHY_CMN_TIMING_CTRL_11,
+				cfg->timing.lane_v4[10], cfg->timing.lane_v4[11]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL25,
+				DSIPHY_CMN_TIMING_CTRL_12, DSIPHY_CMN_TIMING_CTRL_13,
+				cfg->timing.lane_v4[12], cfg->timing.lane_v4[13]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL26,
+				DSIPHY_CMN_CTRL_0, DSIPHY_CMN_LANE_CTRL0, 0x7f,
+				is_cphy ? 0x17 : 0x1f);
+
+	} else {
+		reg = DSI_R32(phy, DSIPHY_CMN_CLK_CFG1);
+		reg &= ~BIT(5);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL0,
+				DSIPHY_CMN_CLK_CFG1, DSIPHY_CMN_PLL_CNTRL, reg, 0x0);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL1,
+				DSIPHY_CMN_RBUF_CTRL, DSIPHY_CMN_TIMING_CTRL_0, 0x0,
+				cfg->timing.lane_v4[0]);
+
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL2,
+				DSIPHY_CMN_TIMING_CTRL_1, DSIPHY_CMN_TIMING_CTRL_2,
+				cfg->timing.lane_v4[1], cfg->timing.lane_v4[2]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL3,
+				DSIPHY_CMN_TIMING_CTRL_3, DSIPHY_CMN_TIMING_CTRL_4,
+				cfg->timing.lane_v4[3], cfg->timing.lane_v4[4]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL4,
+				DSIPHY_CMN_TIMING_CTRL_5, DSIPHY_CMN_TIMING_CTRL_6,
+				cfg->timing.lane_v4[5], cfg->timing.lane_v4[6]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL5,
+				DSIPHY_CMN_TIMING_CTRL_7, DSIPHY_CMN_TIMING_CTRL_8,
+				cfg->timing.lane_v4[7], cfg->timing.lane_v4[8]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL6,
+				DSIPHY_CMN_TIMING_CTRL_9, DSIPHY_CMN_TIMING_CTRL_10,
+				cfg->timing.lane_v4[9], cfg->timing.lane_v4[10]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL7,
+				DSIPHY_CMN_TIMING_CTRL_11, DSIPHY_CMN_TIMING_CTRL_12,
+				cfg->timing.lane_v4[11], cfg->timing.lane_v4[12]);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL8,
+				DSIPHY_CMN_TIMING_CTRL_13, DSIPHY_CMN_CTRL_0,
+				cfg->timing.lane_v4[13], 0x7f);
+		DSI_DYN_REF_REG_W(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_CTRL9,
+				DSIPHY_CMN_LANE_CTRL0, DSIPHY_CMN_CTRL_2,
+				is_cphy ? 0x17 : 0x1f, 0x40);
+		/*
+		 * fill with dummy register writes since controller will blindly
+		 * send these values to DSI PHY.
+		 */
+		reg = DSI_DYN_REFRESH_PLL_CTRL11;
+		while (reg <= DSI_DYN_REFRESH_PLL_CTRL29) {
+			DSI_DYN_REF_REG_W(phy->dyn_pll_base, reg, DSIPHY_CMN_LANE_CTRL0,
+					DSIPHY_CMN_CTRL_0, is_cphy ? 0x17 : 0x1f, 0x7f);
+			reg += 0x4;
+		}
+
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_UPPER_ADDR, 0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_UPPER_ADDR2, 0);
+	}
+
+	wmb(); /* make sure all registers are updated */
+}
+
+void dsi_phy_hw_v5_0_dyn_refresh_pipe_delay(struct dsi_phy_hw *phy, struct dsi_dyn_clk_delay *delay)
+{
+	if (!delay)
+		return;
+
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY, delay->pipe_delay);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PIPE_DELAY2, delay->pipe_delay2);
+	DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_PLL_DELAY, delay->pll_delay);
+}
+
+void dsi_phy_hw_v5_0_dyn_refresh_trigger_sel(struct dsi_phy_hw *phy, bool is_master)
+{
+	u32 reg;
+
+	/*
+	 * Dynamic refresh will take effect at next mdp flush event.
+	 * This makes sure that any update to frame timings together
+	 * with dfps will take effect in one vsync at next mdp flush.
+	 */
+	if (is_master) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(17);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+}
+
+void dsi_phy_hw_v5_0_dyn_refresh_helper(struct dsi_phy_hw *phy, u32 offset)
+{
+	u32 reg;
+
+	/*
+	 * if no offset is mentioned then this means we want to clear
+	 * the dynamic refresh ctrl register which is the last step
+	 * of dynamic refresh sequence.
+	 */
+	if (!offset) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg &= ~(BIT(0) | BIT(8) | BIT(13) | BIT(16) | BIT(17));
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is cleared */
+		return;
+	}
+
+	if (offset & BIT(DYN_REFRESH_INTF_SEL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(13);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SYNC_MODE)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(16);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SWI_CTRL)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(0);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+	}
+
+	if (offset & BIT(DYN_REFRESH_SW_TRIGGER)) {
+		reg = DSI_GEN_R32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL);
+		reg |= BIT(8);
+		DSI_GEN_W32(phy->dyn_pll_base, DSI_DYN_REFRESH_CTRL, reg);
+		wmb(); /* ensure dynamic fps is triggered */
+	}
+}
+
+int dsi_phy_hw_v5_0_cache_phy_timings(struct dsi_phy_per_lane_cfgs *timings,
+				      u32 *dst, u32 size)
+{
+	int i;
+
+	if (!timings || !dst || !size)
+		return -EINVAL;
+
+	if (size != DSI_PHY_TIMING_V4_SIZE) {
+		DSI_ERR("size mis-match\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		dst[i] = timings->lane_v4[i];
+
+	return 0;
+}
+
+void dsi_phy_hw_v5_0_set_continuous_clk(struct dsi_phy_hw *phy, bool enable)
+{
+	u32 reg = 0, sl_lane_ctrl1 = 0;
+	bool is_split_link_enabled = dsi_phy_hw_v5_0_is_split_link_enabled(phy);
+
+	reg = DSI_R32(phy, DSIPHY_CMN_LANE_CTRL1);
+
+	if (enable)
+		reg |= BIT(5) | BIT(6);
+	else
+		reg &= ~(BIT(5) | BIT(6));
+
+	DSI_W32(phy, DSIPHY_CMN_LANE_CTRL1, reg);
+
+	if (is_split_link_enabled) {
+		sl_lane_ctrl1 = DSI_R32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1);
+		if (enable)
+			sl_lane_ctrl1 |= BIT(2);
+		else
+			sl_lane_ctrl1 &= ~BIT(2);
+		DSI_W32(phy, DSIPHY_CMN_SL_DSI_LANE_CTRL1, sl_lane_ctrl1);
+	}
+
+	wmb(); /* make sure request is set */
+}
+
+void dsi_phy_hw_v5_0_phy_idle_off(struct dsi_phy_hw *phy)
+{
+	/* enable clamping of PADS */
+	DSI_W32(phy, DSIPHY_CMN_CTRL_4, 0x1);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_3, 0x0);
+	wmb();
+}

+ 3 - 1
msm/dsi/dsi_phy_timing_calc.c

@@ -1,12 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "dsi_phy_timing_calc.h"
 
 static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
-	16, 18, 18, 24, 3, 8, 12 };
+	16, 18, 18, 24, 3, 8, 12, 30 };
 
 static int dsi_phy_cmn_validate_and_set(struct timing_entry *t,
 	char const *t_name)
@@ -994,6 +995,7 @@ int dsi_phy_timing_calc_init(struct dsi_phy_hw *phy,
 	case DSI_PHY_VERSION_4_1:
 	case DSI_PHY_VERSION_4_2:
 	case DSI_PHY_VERSION_4_3:
+	case DSI_PHY_VERSION_4_3_2:
 	case DSI_PHY_VERSION_5_2:
 		ops->get_default_phy_params =
 			dsi_phy_hw_v4_0_get_default_phy_params;

+ 11 - 9
msm/dsi/dsi_pll.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
@@ -23,6 +24,9 @@ static int dsi_pll_clock_register(struct platform_device *pdev,
 	case DSI_PLL_5NM:
 		rc = dsi_pll_clock_register_5nm(pdev, pll_res);
 		break;
+	case DSI_PLL_4NM:
+		rc = dsi_pll_clock_register_4nm(pdev, pll_res);
+		break;
 	default:
 		rc = -EINVAL;
 		break;
@@ -243,7 +247,6 @@ int dsi_pll_init(struct platform_device *pdev, struct dsi_pll_resource **pll)
 	int rc = 0;
 	const char *label;
 	struct dsi_pll_resource *pll_res = NULL;
-	bool in_trusted_vm = false;
 
 	if (!pdev->dev.of_node) {
 		pr_err("Invalid DSI PHY node\n");
@@ -265,12 +268,10 @@ int dsi_pll_init(struct platform_device *pdev, struct dsi_pll_resource **pll)
 
 	DSI_PLL_INFO(pll_res, "DSI pll label = %s\n", label);
 
-	/**
-	  * Currently, Only supports 5nm. Will add
-	  * support for other versions as needed.
-	  */
 
-	if (!strcmp(label, "dsi_pll_5nm"))
+	if (!strcmp(label, "dsi_pll_4nm"))
+		pll_res->pll_revision = DSI_PLL_4NM;
+	else if (!strcmp(label, "dsi_pll_5nm"))
 		pll_res->pll_revision = DSI_PLL_5NM;
 	else
 		return -ENOTSUPP;
@@ -325,9 +326,10 @@ int dsi_pll_init(struct platform_device *pdev, struct dsi_pll_resource **pll)
 	if (dsi_pll_get_ioresources(pdev, &pll_res->gdsc_base, "gdsc_base"))
 		DSI_PLL_DBG(pll_res, "Unable to remap gdsc base resources\n");
 
-	in_trusted_vm = of_property_read_bool(pdev->dev.of_node,
+	pll_res->in_trusted_vm = of_property_read_bool(pdev->dev.of_node,
 						"qcom,dsi-pll-in-trusted-vm");
-	if (in_trusted_vm) {
+
+	if (pll_res->in_trusted_vm) {
 		DSI_PLL_INFO(pll_res,
 			"Bypassing PLL clock register for Trusted VM\n");
 		return rc;
@@ -344,7 +346,7 @@ int dsi_pll_init(struct platform_device *pdev, struct dsi_pll_resource **pll)
 
 void dsi_pll_parse_dfps_data(struct platform_device *pdev, struct dsi_pll_resource *pll_res)
 {
-	if (!(pll_res->index)) {
+	if (!(pll_res->index) && !(pll_res->in_trusted_vm)) {
 		if (dsi_pll_parse_dfps_from_dt(pdev, pll_res))
 			dsi_pll_parse_dfps(pdev, pll_res);
 	}

+ 4 - 0
msm/dsi/dsi_pll.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef __DSI_PLL_H
@@ -59,6 +60,7 @@ struct lpfr_cfg {
 };
 
 enum {
+	DSI_PLL_4NM,
 	DSI_PLL_5NM,
 	DSI_PLL_10NM,
 	DSI_UNKNOWN_PLL,
@@ -171,6 +173,7 @@ struct dsi_pll_resource {
 	 * DSI PHY type DPHY/CPHY
 	 */
 	enum dsi_phy_type type;
+	bool in_trusted_vm;
 };
 
 struct dsi_pll_clk {
@@ -203,6 +206,7 @@ static inline struct dsi_pll_clk *to_pll_clk_hw(struct clk_hw *hw)
 
 int dsi_pll_clock_register_5nm(struct platform_device *pdev,
 				  struct dsi_pll_resource *pll_res);
+int dsi_pll_clock_register_4nm(struct platform_device *pdev, struct dsi_pll_resource *pll_res);
 
 int dsi_pll_init(struct platform_device *pdev,
 				struct dsi_pll_resource **pll_res);

+ 1487 - 0
msm/dsi/dsi_pll_4nm.c

@@ -0,0 +1,1487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include "dsi_pll_4nm.h"
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
+#define MHZ_1900	1900000000UL
+#define MHZ_3000	3000000000UL
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct dsi_pll_4nm {
+	struct dsi_pll_resource *rsc;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+	bool cphy_enabled;
+};
+
+static inline bool dsi_pll_4nm_is_hw_revision(struct dsi_pll_resource *rsc)
+{
+	return (rsc->pll_revision == DSI_PLL_4NM) ? true : false;
+}
+
+static inline void dsi_pll_set_pll_post_div(struct dsi_pll_resource *pll, u32 pll_post_div)
+{
+	u32 pll_post_div_val = 0;
+
+	if (pll_post_div == 1)
+		pll_post_div_val = 0;
+	if (pll_post_div == 2)
+		pll_post_div_val = 1;
+	if (pll_post_div == 4)
+		pll_post_div_val = 2;
+	if (pll_post_div == 8)
+		pll_post_div_val = 3;
+
+	DSI_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE, pll_post_div_val);
+	if (pll->slave)
+		DSI_PLL_REG_W(pll->slave->pll_base, PLL_PLL_OUTDIV_RATE, pll_post_div_val);
+}
+
+static inline int dsi_pll_get_pll_post_div(struct dsi_pll_resource *pll)
+{
+	u32 reg_val;
+
+	reg_val = DSI_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+
+	return (1 << reg_val);
+}
+
+static inline void dsi_pll_set_phy_post_div(struct dsi_pll_resource *pll, u32 phy_post_div)
+{
+	u32 reg_val = 0;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0x0F;
+	reg_val |= phy_post_div;
+	DSI_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+	/* For slave PLL, this divider always should be set to 1 */
+	if (pll->slave) {
+		reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+		reg_val &= ~0x0F;
+		reg_val |= 0x1;
+		DSI_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+	}
+}
+
+
+static inline int dsi_pll_get_phy_post_div(struct dsi_pll_resource *pll)
+{
+	u32 reg_val = 0;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+
+	return (reg_val & 0xF);
+}
+
+
+static inline void dsi_pll_set_dsi_clk(struct dsi_pll_resource *pll, u32 dsi_clk)
+{
+	u32 reg_val = 0;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+	reg_val &= ~0x3;
+	reg_val |= dsi_clk;
+	DSI_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+	if (pll->slave) {
+		reg_val = DSI_PLL_REG_R(pll->slave->phy_base, PHY_CMN_CLK_CFG1);
+		reg_val &= ~0x3;
+		reg_val |= dsi_clk;
+		DSI_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+	}
+}
+
+static inline int dsi_pll_get_dsi_clk(struct dsi_pll_resource *pll)
+{
+	u32 reg_val;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+
+	return (reg_val & 0x3);
+}
+
+static inline void dsi_pll_set_pclk_div(struct dsi_pll_resource *pll, u32 pclk_div)
+{
+	u32 reg_val = 0;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0xF0;
+	reg_val |= (pclk_div << 4);
+	DSI_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+	if (pll->slave) {
+		reg_val = DSI_PLL_REG_R(pll->slave->phy_base, PHY_CMN_CLK_CFG0);
+		reg_val &= ~0xF0;
+		reg_val |= (pclk_div << 4);
+		DSI_PLL_REG_W(pll->slave->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+	}
+}
+
+static inline int dsi_pll_get_pclk_div(struct dsi_pll_resource *pll)
+{
+	u32 reg_val;
+
+	reg_val = DSI_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+
+	return ((reg_val & 0xF0) >> 4);
+}
+
+static struct dsi_pll_resource *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_4nm plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct dsi_pll_resource *rsc)
+{
+	u32 reg;
+	struct dsi_pll_resource *orsc = pll_rsc_db[DSI_PLL_1];
+
+	if (!rsc)
+		return;
+
+	/* Only DSI PLL0 can act as a master */
+	if (rsc->index != DSI_PLL_0)
+		return;
+
+	/* default configuration: source is either internal or ref clock */
+	rsc->slave = NULL;
+
+	if (!orsc) {
+		DSI_PLL_WARN(rsc, "slave PLL unavilable, assuming standalone config\n");
+		return;
+	}
+
+	/* check to see if the source of DSI1 PLL bitclk is set to external */
+	reg = DSI_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+	reg &= (BIT(2) | BIT(3));
+	if (reg == 0x04)
+		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+	DSI_PLL_DBG(rsc, "Slave PLL %s\n",
+			rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = 19200000;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 4800;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+	config->enable_ssc = rsc->ssc_en;
+	config->ssc_center = rsc->ssc_center;
+
+	if (config->enable_ssc) {
+		if (rsc->ssc_freq)
+			config->ssc_freq = rsc->ssc_freq;
+		if (rsc->ssc_ppm)
+			config->ssc_offset = rsc->ssc_ppm;
+	}
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = rsc->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = rsc->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	if (pll_freq <= 1300000000ULL)
+		regs->pll_clock_inverters = 0xA0;
+	else if (pll_freq <= 2500000000ULL)
+		regs->pll_clock_inverters = 0x20;
+	else if (pll_freq <= 4000000000ULL)
+		regs->pll_clock_inverters = 0x00;
+	else
+		regs->pll_clock_inverters = 0x40;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+	regs->pll_prop_gain_rate = 10;
+}
+
+static void dsi_pll_calc_ssc(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		DSI_PLL_DBG(rsc, "SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low | (regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	DSI_PLL_DBG(rsc, "SCC: Dec:%d, frac:%llu, frac_bits:%d\n", regs->decimal_div_start, frac,
+			config->frac_bits);
+	DSI_PLL_DBG(rsc, "SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", ssc_per,
+			(u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		DSI_PLL_DBG(rsc, "SSC is enabled\n");
+		DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1, regs->ssc_stepsize_low);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1, regs->ssc_stepsize_high);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1, regs->ssc_div_per_low);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1, regs->ssc_div_per_high);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1, regs->ssc_adjper_low);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1, regs->ssc_adjper_high);
+		DSI_PLL_REG_W(pll_base, PLL_SSC_CONTROL, SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	u64 vco_rate = rsc->vco_current_rate;
+
+	if (vco_rate < 3100000000ULL)
+		DSI_PLL_REG_W(pll_base,
+				PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
+	else
+		DSI_PLL_REG_W(pll_base,
+				PLL_ANALOG_CONTROLS_FIVE_1, 0x03);
+
+	if (vco_rate < 1557000000ULL)
+		DSI_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x08);
+	else
+		DSI_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x01);
+
+
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE, 0x01);
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+	DSI_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+	DSI_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+	DSI_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	DSI_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+	DSI_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+	DSI_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+	DSI_PLL_REG_W(pll_base, PLL_PFILT, 0x2f);
+	DSI_PLL_REG_W(pll_base, PLL_IFILT, 0x2a);
+	DSI_PLL_REG_W(pll_base, PLL_IFILT, 0x3F);
+	DSI_PLL_REG_W(pll_base, PLL_PERF_OPTIMIZE, 0x22);
+	if (rsc->slave)
+		DSI_PLL_REG_W(rsc->slave->pll_base, PLL_PERF_OPTIMIZE, 0x22);
+}
+
+static void dsi_pll_init_val(struct dsi_pll_resource *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x0000003F);
+	DSI_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x00000080);
+	DSI_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_CMODE, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_PSM_CTRL, 0x00000020);
+	DSI_PLL_REG_W(pll_base, PLL_RSM_CTRL, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_TUNE_MAP, 0x00000002);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_CNTRL, 0x0000001C);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x00000002);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x00000020);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0x000000FF);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x0000000A);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x00000025);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0x000000BA);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x0000004F);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0000000A);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0000000C);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DETECT_THRESH, 0x00000020);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_LOW, 0x000000FF);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_HIGH, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_LOW, 0x00000046);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_GAIN, 0x00000054);
+	DSI_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_LOCKDET, 0x00000040);
+	DSI_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x00000004);
+	DSI_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x00000008);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x00000008);
+	DSI_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x00000003);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x00000040);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_2, 0x00000040);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0000000C);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_2, 0x0000000A);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0x000000C0);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x00000054);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_2, 0x00000054);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x0000004C);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_2, 0x0000004C);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_FASTLOCK_EN_BAND, 0x00000003);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MID, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x00000080);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x00000006);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x00000019);
+	DSI_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x00000040);
+	DSI_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x00000020);
+	DSI_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_COMMON_STATUS_ONE, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_COMMON_STATUS_TWO, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FD_OUT_LOW, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FD_OUT_HIGH, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_STATUS_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_MISC_CONFIG, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_CONFIG, 0x00000002);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_FREQ_ACQ_TIME, 0x00000011);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_CODE0, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_CODE1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_GAIN0, 0x00000080);
+	DSI_PLL_REG_W(pll_base, PLL_FLL_GAIN1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_SW_RESET, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_FAST_PWRUP, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_LOCKTIME0, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_LOCKTIME1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_DEBUG_BUS_SEL, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_DEBUG_BUS0, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_DEBUG_BUS1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_DEBUG_BUS2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_DEBUG_BUS3, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_FLL_CONTROL_OVERRIDES, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_CONFIG, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE0_STATUS, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE1_STATUS, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_RESET_SM_STATUS, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_TDC_OFFSET, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_PS3_PWRDOWN_CONTROLS, 0x0000001D);
+	DSI_PLL_REG_W(pll_base, PLL_PS4_PWRDOWN_CONTROLS, 0x0000001C);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_RST_CONTROLS, 0x000000FF);
+	DSI_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x00000022);
+	DSI_PLL_REG_W(pll_base, PLL_PSM_CLK_CONTROLS, 0x00000009);
+	DSI_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_VCO_CONFIG_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1, 0x00000040);
+	DSI_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_2, 0x00000000);
+	DSI_PLL_REG_W(pll_base, PLL_CMODE_1, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_CMODE_2, 0x00000010);
+	DSI_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_2, 0x00000003);
+
+}
+
+static void dsi_pll_detect_phy_mode(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	u32 reg_val;
+
+	reg_val = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_GLBL_CTRL);
+	pll->cphy_enabled = (reg_val & BIT(6)) ? true : false;
+}
+
+static void dsi_pll_commit(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	DSI_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+	DSI_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
+	DSI_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+	DSI_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+	DSI_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
+	DSI_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+	DSI_PLL_REG_W(pll_base, PLL_CMODE_1, pll->cphy_enabled ? 0x00 : 0x10);
+	DSI_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1, reg->pll_clock_inverters);
+}
+
+static int dsi_pll_4nm_lock_status(struct dsi_pll_resource *pll)
+{
+	int rc;
+	u32 status;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = DSI_READ_POLL_TIMEOUT_ATOMIC_GEN(pll->pll_base, pll->index, PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		DSI_PLL_ERR(pll, "lock failed, status=0x%08x\n", status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_resource *rsc)
+{
+	u32 data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	DSI_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_resource *rsc)
+{
+	u32 data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+	DSI_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_resource *rsc)
+{
+	u32 data;
+
+	data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_resource *rsc)
+{
+	u32 data;
+
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_3, 0x04);
+
+	data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+
+	/* Turn on clk_en_sel bit prior to resync toggle fifo */
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5) | BIT(4)));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_resource *rsc)
+{
+	/*
+	 * Reset the PHY digital domain. This would be needed when
+	 * coming out of a CX or analog rail power collapse while
+	 * ensuring that the pads maintain LP00 or LP11 state
+	 */
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+	wmb(); /* Ensure that the reset is asserted */
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+	wmb(); /* Ensure that the reset is deasserted */
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_resource *rsc)
+{
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(rsc);
+}
+
+static void dsi_pll_unprepare_stub(struct clk_hw *hw)
+{
+	return;
+}
+
+static int dsi_pll_prepare_stub(struct clk_hw *hw)
+{
+	return 0;
+}
+
+static int dsi_pll_set_rate_stub(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
+{
+	return 0;
+}
+
+static long dsi_pll_byteclk_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *parent_rate)
+{
+	struct dsi_pll_clk *pll = to_pll_clk_hw(hw);
+	struct dsi_pll_resource *pll_res = pll->priv;
+
+	return pll_res->byteclk_rate;
+}
+
+static long dsi_pll_pclk_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *parent_rate)
+{
+	struct dsi_pll_clk *pll = to_pll_clk_hw(hw);
+	struct dsi_pll_resource *pll_res = pll->priv;
+
+	return pll_res->pclk_rate;
+}
+
+static unsigned long dsi_pll_vco_recalc_rate(struct dsi_pll_resource *pll)
+{
+	u64 ref_clk;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u32 pll_post_div;
+	u64 pll_freq, tmp64;
+	u64 vco_rate;
+	struct dsi_pll_4nm *pll_4nm;
+	struct dsi_pll_config *config;
+
+	ref_clk = pll->vco_ref_clk_rate;
+	pll_4nm = pll->priv;
+	if (!pll_4nm) {
+		DSI_PLL_ERR(pll, "pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	config = &pll_4nm->pll_configuration;
+
+	dec = DSI_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+	dec &= 0xFF;
+
+	frac = DSI_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((DSI_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) & 0xFF) << 8);
+	frac |= ((DSI_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) & 0x3) << 16);
+
+	multiplier = 1 << config->frac_bits;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	pll_post_div = dsi_pll_get_pll_post_div(pll);
+
+	vco_rate = div_u64(pll_freq, pll_post_div);
+
+	return vco_rate;
+}
+
+static unsigned long dsi_pll_byteclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct dsi_pll_clk *byte_pll = to_pll_clk_hw(hw);
+	struct dsi_pll_resource *pll = NULL;
+	u64 vco_rate = 0;
+	u64 byte_rate = 0;
+	u32 phy_post_div;
+
+	if (!byte_pll->priv) {
+		DSI_PLL_INFO(pll, "pll priv is null\n");
+		return 0;
+	}
+
+	pll = byte_pll->priv;
+
+	/*
+	 * In the case when byteclk rate is set, the recalculation function
+	 * should  return the current rate. Recalc rate is also called during
+	 * clock registration, during which the function should reverse
+	 * calculate clock rates that were set as part of UEFI.
+	 */
+	if (pll->byteclk_rate != 0) {
+		DSI_PLL_DBG(pll, "returning byte clk rate = %lld %lld\n", pll->byteclk_rate,
+				parent_rate);
+		return  pll->byteclk_rate;
+	}
+
+	vco_rate = dsi_pll_vco_recalc_rate(pll);
+
+	phy_post_div = dsi_pll_get_phy_post_div(pll);
+	byte_rate = div_u64(vco_rate, phy_post_div);
+
+	if (pll->type == DSI_PHY_TYPE_DPHY)
+		byte_rate = div_u64(byte_rate, 8);
+	else
+		byte_rate = div_u64(byte_rate, 7);
+
+	return byte_rate;
+}
+
+static unsigned long dsi_pll_pclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct dsi_pll_clk *pix_pll = to_pll_clk_hw(hw);
+	struct dsi_pll_resource *pll = NULL;
+	u64 vco_rate = 0;
+	u64 pclk_rate = 0;
+	u32 phy_post_div, pclk_div;
+
+	if (!pix_pll->priv) {
+		DSI_PLL_INFO(pll, "pll priv is null\n");
+		return 0;
+	}
+
+	pll = pix_pll->priv;
+
+	/*
+	 * In the case when pclk rate is set, the recalculation function
+	 * should  return the current rate. Recalc rate is also called during
+	 * clock registration, during which the function should reverse
+	 * calculate the clock rates that were set as part of UEFI.
+	 */
+	if (pll->pclk_rate != 0) {
+		DSI_PLL_DBG(pll, "returning pclk rate = %lld %lld\n", pll->pclk_rate, parent_rate);
+		return pll->pclk_rate;
+	}
+
+	vco_rate = dsi_pll_vco_recalc_rate(pll);
+
+	if (pll->type == DSI_PHY_TYPE_DPHY) {
+		phy_post_div = dsi_pll_get_phy_post_div(pll);
+		pclk_rate = div_u64(vco_rate, phy_post_div);
+		pclk_rate = div_u64(pclk_rate, 2);
+		pclk_div = dsi_pll_get_pclk_div(pll);
+		pclk_rate = div_u64(pclk_rate, pclk_div);
+	} else {
+		pclk_rate = vco_rate * 2;
+		pclk_rate = div_u64(pclk_rate, 7);
+		pclk_div = dsi_pll_get_pclk_div(pll);
+		pclk_rate = div_u64(pclk_rate, pclk_div);
+	}
+
+	return pclk_rate;
+}
+
+static const struct clk_ops pll_byteclk_ops = {
+	.recalc_rate = dsi_pll_byteclk_recalc_rate,
+	.set_rate = dsi_pll_set_rate_stub,
+	.round_rate = dsi_pll_byteclk_round_rate,
+	.prepare = dsi_pll_prepare_stub,
+	.unprepare = dsi_pll_unprepare_stub,
+};
+
+static const struct clk_ops pll_pclk_ops = {
+	.recalc_rate = dsi_pll_pclk_recalc_rate,
+	.set_rate = dsi_pll_set_rate_stub,
+	.round_rate = dsi_pll_pclk_round_rate,
+	.prepare = dsi_pll_prepare_stub,
+	.unprepare = dsi_pll_unprepare_stub,
+};
+
+/*
+ * Clock tree for generating DSI byte and pclk.
+ *
+ *
+ *  +-------------------------------+		+----------------------------+
+ *  |    dsi_phy_pll_out_byteclk    |		|    dsi_phy_pll_out_dsiclk  |
+ *  +---------------+---------------+		+--------------+-------------+
+ *                  |                                          |
+ *                  |                                          |
+ *                  v                                          v
+ *            dsi_byte_clk                                  dsi_pclk
+ *
+ *
+ */
+
+static struct dsi_pll_clk dsi0_phy_pll_out_byteclk = {
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_byteclk",
+			.ops = &pll_byteclk_ops,
+	},
+};
+
+static struct dsi_pll_clk dsi1_phy_pll_out_byteclk = {
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_byteclk",
+			.ops = &pll_byteclk_ops,
+	},
+};
+
+static struct dsi_pll_clk dsi0_phy_pll_out_dsiclk = {
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_dsiclk",
+			.ops = &pll_pclk_ops,
+	},
+};
+
+static struct dsi_pll_clk dsi1_phy_pll_out_dsiclk = {
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_dsiclk",
+			.ops = &pll_pclk_ops,
+	},
+};
+
+int dsi_pll_clock_register_4nm(struct platform_device *pdev, struct dsi_pll_resource *pll_res)
+{
+	int rc = 0, ndx;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = 4;
+
+	if (!pdev || !pdev->dev.of_node || !pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+		DSI_PLL_ERR(pll_res, "Invalid params\n");
+		return -EINVAL;
+	}
+
+	ndx = pll_res->index;
+
+	if (ndx >= DSI_PLL_MAX) {
+		DSI_PLL_ERR(pll_res, "not supported\n");
+		return -EINVAL;
+	}
+
+	pll_rsc_db[ndx] = pll_res;
+	plls[ndx].rsc = pll_res;
+	pll_res->priv = &plls[ndx];
+	pll_res->vco_delay = VCO_DELAY_USEC;
+	pll_res->vco_min_rate = 600000000;
+	pll_res->vco_ref_clk_rate = 19200000UL;
+
+	dsi_pll_setup_config(pll_res->priv, pll_res);
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks * sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	/* Establish client data */
+	if (ndx == 0) {
+		dsi0_phy_pll_out_byteclk.priv = pll_res;
+		dsi0_phy_pll_out_dsiclk.priv = pll_res;
+
+		clk = devm_clk_register(&pdev->dev, &dsi0_phy_pll_out_byteclk.hw);
+		if (IS_ERR(clk)) {
+			DSI_PLL_ERR(pll_res, "clk registration failed for DSI clock\n");
+			rc = -EINVAL;
+			goto clk_register_fail;
+		}
+		clk_data->clks[0] = clk;
+
+		clk = devm_clk_register(&pdev->dev, &dsi0_phy_pll_out_dsiclk.hw);
+		if (IS_ERR(clk)) {
+			DSI_PLL_ERR(pll_res, "clk registration failed for DSI clock\n");
+			rc = -EINVAL;
+			goto clk_register_fail;
+		}
+		clk_data->clks[1] = clk;
+
+
+		rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get, clk_data);
+	} else {
+		dsi1_phy_pll_out_byteclk.priv = pll_res;
+		dsi1_phy_pll_out_dsiclk.priv = pll_res;
+
+		clk = devm_clk_register(&pdev->dev, &dsi1_phy_pll_out_byteclk.hw);
+		if (IS_ERR(clk)) {
+			DSI_PLL_ERR(pll_res, "clk registration failed for DSI clock\n");
+			rc = -EINVAL;
+			goto clk_register_fail;
+		}
+		clk_data->clks[2] = clk;
+
+		clk = devm_clk_register(&pdev->dev, &dsi1_phy_pll_out_dsiclk.hw);
+		if (IS_ERR(clk)) {
+			DSI_PLL_ERR(pll_res, "clk registration failed for DSI clock\n");
+			rc = -EINVAL;
+			goto clk_register_fail;
+		}
+		clk_data->clks[3] = clk;
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		DSI_PLL_INFO(pll_res, "Registered clocks successfully\n");
+
+		return rc;
+	}
+clk_register_fail:
+	return rc;
+}
+
+static int dsi_pll_4nm_set_byteclk_div(struct dsi_pll_resource *pll, bool commit)
+{
+
+	int i = 0;
+	int table_size;
+	u32 pll_post_div = 0, phy_post_div = 0;
+	struct dsi_pll_div_table *table;
+	u64 bitclk_rate;
+	u64 const phy_rate_split = 1500000000UL;
+
+	if (pll->type == DSI_PHY_TYPE_DPHY) {
+		bitclk_rate = pll->byteclk_rate * 8;
+
+		if (bitclk_rate <= phy_rate_split) {
+			table = pll_4nm_dphy_lb;
+			table_size = ARRAY_SIZE(pll_4nm_dphy_lb);
+		} else {
+			table = pll_4nm_dphy_hb;
+			table_size = ARRAY_SIZE(pll_4nm_dphy_hb);
+		}
+	} else {
+		bitclk_rate = pll->byteclk_rate * 7;
+
+		if (bitclk_rate <= phy_rate_split) {
+			table = pll_4nm_cphy_lb;
+			table_size = ARRAY_SIZE(pll_4nm_cphy_lb);
+		} else {
+			table = pll_4nm_cphy_hb;
+			table_size = ARRAY_SIZE(pll_4nm_cphy_hb);
+		}
+	}
+
+	for (i = 0; i < table_size; i++) {
+		if ((table[i].min_hz <= bitclk_rate) && (bitclk_rate <= table[i].max_hz)) {
+			pll_post_div = table[i].pll_div;
+			phy_post_div = table[i].phy_div;
+			break;
+		}
+	}
+
+	DSI_PLL_DBG(pll, "bit clk rate: %llu, pll_post_div: %d, phy_post_div: %d\n",
+			bitclk_rate, pll_post_div, phy_post_div);
+
+	if (commit) {
+		dsi_pll_set_pll_post_div(pll, pll_post_div);
+		dsi_pll_set_phy_post_div(pll, phy_post_div);
+	}
+
+	pll->vco_rate = bitclk_rate * pll_post_div * phy_post_div;
+
+	return 0;
+}
+
+static int dsi_pll_calc_dphy_pclk_div(struct dsi_pll_resource *pll)
+{
+	u32 m_val, n_val; /* M and N values of MND trio */
+	u32 pclk_div;
+
+	if (pll->bpp == 30 && pll->lanes == 4) {
+		/* RGB101010 */
+		m_val = 2;
+		n_val = 3;
+	} else if (pll->bpp == 18 && pll->lanes == 2) {
+		/* RGB666_packed */
+		m_val = 2;
+		n_val = 9;
+	} else if (pll->bpp == 18 && pll->lanes == 4) {
+		/* RGB666_packed */
+		m_val = 4;
+		n_val = 9;
+	} else if (pll->bpp == 16 && pll->lanes == 3) {
+		/* RGB565 */
+		m_val = 3;
+		n_val = 8;
+	} else {
+		m_val = 1;
+		n_val = 1;
+	}
+
+	/* Calculating pclk_div assuming dsiclk_sel to be 1 */
+	pclk_div = pll->bpp;
+	pclk_div = mult_frac(pclk_div, m_val, n_val);
+	do_div(pclk_div, 2);
+	do_div(pclk_div, pll->lanes);
+
+	DSI_PLL_DBG(pll, "bpp: %d, lanes: %d, m_val: %u, n_val: %u, pclk_div: %u\n",
+                          pll->bpp, pll->lanes, m_val, n_val, pclk_div);
+
+	return pclk_div;
+}
+
+static int dsi_pll_calc_cphy_pclk_div(struct dsi_pll_resource *pll)
+{
+	u32 m_val, n_val; /* M and N values of MND trio */
+	u32 pclk_div;
+	u32 phy_post_div = dsi_pll_get_phy_post_div(pll);
+
+	if (pll->bpp == 24 && pll->lanes == 2) {
+		/*
+		 * RGB888 or DSC is enabled
+		 * Skipping DSC enabled check
+		 */
+		m_val = 2;
+		n_val = 3;
+	} else if (pll->bpp == 30) {
+		/* RGB101010 */
+		if (pll->lanes == 1) {
+			m_val = 4;
+			n_val = 15;
+		} else {
+			m_val = 16;
+			n_val = 35;
+		}
+	} else if (pll->bpp == 18) {
+		/* RGB666_packed */
+		if (pll->lanes == 1) {
+			m_val = 8;
+			n_val = 63;
+		} else if (pll->lanes == 2) {
+			m_val = 16;
+			n_val = 63;
+		} else if (pll->lanes == 3) {
+			m_val = 8;
+			n_val = 21;
+		} else {
+			m_val = 1;
+			n_val = 1;
+		}
+	} else if (pll->bpp == 16 && pll->lanes == 3) {
+		/* RGB565 */
+		m_val = 3;
+		n_val = 7;
+	} else {
+		m_val = 1;
+		n_val = 1;
+	}
+
+	/* Calculating pclk_div assuming dsiclk_sel to be 3 */
+	pclk_div =  pll->bpp * phy_post_div;
+	pclk_div = mult_frac(pclk_div, m_val, n_val);
+	do_div(pclk_div, 8);
+	do_div(pclk_div, pll->lanes);
+
+	DSI_PLL_DBG(pll, "bpp: %d, lanes: %d, m_val: %u, n_val: %u, phy_post_div: %u pclk_div: %u\n",
+                          pll->bpp, pll->lanes, m_val, n_val, phy_post_div, pclk_div);
+
+	return pclk_div;
+}
+
+static int dsi_pll_4nm_set_pclk_div(struct dsi_pll_resource *pll, bool commit)
+{
+
+	int dsi_clk = 0, pclk_div = 0;
+	u64 pclk_src_rate;
+	u32 pll_post_div;
+	u32 phy_post_div;
+
+	pll_post_div = dsi_pll_get_pll_post_div(pll);
+	pclk_src_rate = div_u64(pll->vco_rate, pll_post_div);
+	if (pll->type == DSI_PHY_TYPE_DPHY) {
+		dsi_clk = 0x1;
+		phy_post_div = dsi_pll_get_phy_post_div(pll);
+		pclk_src_rate = div_u64(pclk_src_rate, phy_post_div);
+		pclk_src_rate = div_u64(pclk_src_rate, 2);
+		pclk_div = dsi_pll_calc_dphy_pclk_div(pll);
+	} else {
+		dsi_clk = 0x3;
+		pclk_src_rate *= 2;
+		pclk_src_rate = div_u64(pclk_src_rate, 7);
+		pclk_div = dsi_pll_calc_cphy_pclk_div(pll);
+	}
+
+	pll->pclk_rate = div_u64(pclk_src_rate, pclk_div);
+
+	DSI_PLL_DBG(pll, "pclk rate: %llu, dsi_clk: %d, pclk_div: %d\n",
+			pll->pclk_rate, dsi_clk, pclk_div);
+
+	if (commit) {
+		dsi_pll_set_dsi_clk(pll, dsi_clk);
+		dsi_pll_set_pclk_div(pll, pclk_div);
+	}
+
+	return 0;
+
+}
+
+static int dsi_pll_4nm_vco_set_rate(struct dsi_pll_resource *pll_res)
+{
+	struct dsi_pll_4nm *pll;
+
+	pll = pll_res->priv;
+	if (!pll) {
+		DSI_PLL_ERR(pll_res, "pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	DSI_PLL_DBG(pll_res, "rate=%lu\n", pll_res->vco_rate);
+
+	pll_res->vco_current_rate = pll_res->vco_rate;
+
+	dsi_pll_detect_phy_mode(pll, pll_res);
+
+	dsi_pll_calc_dec_frac(pll, pll_res);
+
+	dsi_pll_calc_ssc(pll, pll_res);
+
+	dsi_pll_commit(pll, pll_res);
+
+	dsi_pll_config_hzindep_reg(pll, pll_res);
+
+	dsi_pll_ssc_commit(pll, pll_res);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	return 0;
+}
+
+static int dsi_pll_read_stored_trim_codes(struct dsi_pll_resource *pll_res,
+					  unsigned long vco_clk_rate)
+{
+	int i;
+	bool found = false;
+
+	if (!pll_res->dfps)
+		return -EINVAL;
+
+	for (i = 0; i < pll_res->dfps->vco_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info = &pll_res->dfps->codes_dfps[i];
+
+		DSI_PLL_DBG(pll_res, "valid=%d vco_rate=%d, code %d %d %d\n",
+			codes_info->is_valid, codes_info->clk_rate,
+			codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2,
+			codes_info->pll_codes.pll_codes_3);
+
+		if (vco_clk_rate != codes_info->clk_rate && codes_info->is_valid)
+			continue;
+
+		pll_res->cache_pll_trim_codes[0] = codes_info->pll_codes.pll_codes_1;
+		pll_res->cache_pll_trim_codes[1] = codes_info->pll_codes.pll_codes_2;
+		pll_res->cache_pll_trim_codes[2] = codes_info->pll_codes.pll_codes_3;
+		found = true;
+		break;
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	DSI_PLL_DBG(pll_res, "trim_code_0=0x%x trim_code_1=0x%x trim_code_2=0x%x\n",
+			pll_res->cache_pll_trim_codes[0],
+			pll_res->cache_pll_trim_codes[1],
+			pll_res->cache_pll_trim_codes[2]);
+
+	return 0;
+}
+
+static void dsi_pll_4nm_dynamic_refresh(struct dsi_pll_4nm *pll, struct dsi_pll_resource *rsc)
+{
+	u32 data;
+	u32 offset = DSI_PHY_TO_PLL_OFFSET;
+	u32 upper_addr = 0;
+	u32 upper_addr2 = 0;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	data &= ~BIT(5);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL0, PHY_CMN_CLK_CFG1,
+			PHY_CMN_PLL_CNTRL, data, 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_CLK_CFG1) << 0);
+	upper_addr |= (upper_8_bit(PHY_CMN_PLL_CNTRL) << 1);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+			   PHY_CMN_RBUF_CTRL, (PLL_CORE_INPUT_OVERRIDE + offset), 0, 0x12);
+	upper_addr |= (upper_8_bit(PHY_CMN_RBUF_CTRL) << 2);
+	upper_addr |= (upper_8_bit(PLL_CORE_INPUT_OVERRIDE + offset) << 3);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+			(PLL_DECIMAL_DIV_START_1 + offset), (PLL_FRAC_DIV_START_LOW_1 + offset),
+			reg->decimal_div_start, reg->frac_div_start_low);
+	upper_addr |= (upper_8_bit(PLL_DECIMAL_DIV_START_1 + offset) << 4);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_LOW_1 + offset) << 5);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+			(PLL_FRAC_DIV_START_MID_1 + offset), (PLL_FRAC_DIV_START_HIGH_1 + offset),
+			reg->frac_div_start_mid, reg->frac_div_start_high);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_MID_1 + offset) << 6);
+	upper_addr |= (upper_8_bit(PLL_FRAC_DIV_START_HIGH_1 + offset) << 7);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+			(PLL_SYSTEM_MUXES + offset), (PLL_PLL_LOCKDET_RATE_1 + offset), 0xc0, 0x10);
+	upper_addr |= (upper_8_bit(PLL_SYSTEM_MUXES + offset) << 8);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCKDET_RATE_1 + offset) << 9);
+
+	data = DSI_PLL_REG_R(rsc->pll_base, PLL_PLL_OUTDIV_RATE) & 0x03;
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+			(PLL_PLL_OUTDIV_RATE + offset), (PLL_PLL_LOCK_DELAY + offset), data, 0x06);
+
+	upper_addr |= (upper_8_bit(PLL_PLL_OUTDIV_RATE + offset) << 10);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_DELAY + offset) << 11);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+			(PLL_CMODE_1 + offset), (PLL_CLOCK_INVERTERS_1 + offset),
+			pll->cphy_enabled ? 0x00 : 0x10, reg->pll_clock_inverters);
+	upper_addr |= (upper_8_bit(PLL_CMODE_1 + offset) << 12);
+	upper_addr |= (upper_8_bit(PLL_CLOCK_INVERTERS_1 + offset) << 13);
+
+	data = DSI_PLL_REG_R(rsc->pll_base, PLL_VCO_CONFIG_1);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+			(PLL_ANALOG_CONTROLS_FIVE_1 + offset), (PLL_VCO_CONFIG_1 + offset), 0x01,
+			data);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_FIVE_1 + offset) << 14);
+	upper_addr |= (upper_8_bit(PLL_VCO_CONFIG_1 + offset) << 15);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+			(PLL_ANALOG_CONTROLS_FIVE + offset), (PLL_ANALOG_CONTROLS_TWO + offset),
+			0x01, 0x03);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_FIVE + offset) << 16);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_TWO + offset) << 17);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL9,
+			(PLL_ANALOG_CONTROLS_THREE + offset), (PLL_DSM_DIVIDER + offset),
+			rsc->cache_pll_trim_codes[2], 0x00);
+	upper_addr |= (upper_8_bit(PLL_ANALOG_CONTROLS_THREE + offset) << 18);
+	upper_addr |= (upper_8_bit(PLL_DSM_DIVIDER + offset) << 19);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+			(PLL_FEEDBACK_DIVIDER + offset), (PLL_CALIBRATION_SETTINGS + offset),
+			0x4E, 0x40);
+	upper_addr |= (upper_8_bit(PLL_FEEDBACK_DIVIDER + offset) << 20);
+	upper_addr |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 21);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL11,
+			(PLL_BAND_SEL_CAL_SETTINGS_THREE + offset),
+			(PLL_FREQ_DETECT_SETTINGS_ONE + offset), 0xBA, 0x0C);
+	upper_addr |= (upper_8_bit(PLL_BAND_SEL_CAL_SETTINGS_THREE + offset) << 22);
+	upper_addr |= (upper_8_bit(PLL_FREQ_DETECT_SETTINGS_ONE + offset) << 23);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL12,
+			(PLL_OUTDIV + offset), (PLL_CORE_OVERRIDE + offset), 0, 0);
+	upper_addr |= (upper_8_bit(PLL_OUTDIV + offset) << 24);
+	upper_addr |= (upper_8_bit(PLL_CORE_OVERRIDE + offset) << 25);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL13,
+			(PLL_PLL_DIGITAL_TIMERS_TWO + offset), (PLL_PLL_PROP_GAIN_RATE_1 + offset),
+			0x08, reg->pll_prop_gain_rate);
+	upper_addr |= (upper_8_bit(PLL_PLL_DIGITAL_TIMERS_TWO + offset) << 26);
+	upper_addr |= (upper_8_bit(PLL_PLL_PROP_GAIN_RATE_1 + offset) << 27);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+			(PLL_PLL_BAND_SEL_RATE_1 + offset),
+			(PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset), 0xC0, 0x82);
+	upper_addr |= (upper_8_bit(PLL_PLL_BAND_SEL_RATE_1 + offset) << 28);
+	upper_addr |= (upper_8_bit(PLL_PLL_INT_GAIN_IFILT_BAND_1 + offset) << 29);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+			(PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset),
+			(PLL_PLL_LOCK_OVERRIDE + offset), 0x4c, 0x80);
+	upper_addr |= (upper_8_bit(PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 + offset) << 30);
+	upper_addr |= (upper_8_bit(PLL_PLL_LOCK_OVERRIDE + offset) << 31);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL16,
+			(PLL_PFILT + offset), (PLL_IFILT + offset),
+			0x29, 0x3f);
+	upper_addr2 |= (upper_8_bit(PLL_PFILT + offset) << 0);
+	upper_addr2 |= (upper_8_bit(PLL_IFILT + offset) << 1);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL17,
+			(PLL_SYSTEM_MUXES + offset), (PLL_CALIBRATION_SETTINGS + offset),
+			0xe0, 0x44);
+	upper_addr2 |= (upper_8_bit(PLL_BAND_SEL_CAL + offset) << 2);
+	upper_addr2 |= (upper_8_bit(PLL_CALIBRATION_SETTINGS + offset) << 3);
+
+	data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL18,
+			PHY_CMN_CTRL_2, PHY_CMN_CLK_CFG0, 0x40, data);
+
+	if (rsc->slave)
+		DSI_DYN_PLL_REG_W(rsc->slave->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL10,
+				PHY_CMN_CLK_CFG0, PHY_CMN_CTRL_0, data, 0x7f);
+
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+			PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+			PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+			PHY_CMN_PLL_CNTRL, PHY_CMN_PLL_CNTRL, 0x01, 0x01);
+
+	data = DSI_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+			PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+	DSI_DYN_PLL_REG_W(rsc->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+			PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+
+	if (rsc->slave) {
+		data = DSI_PLL_REG_R(rsc->slave->phy_base, PHY_CMN_CLK_CFG1) | BIT(5);
+
+		DSI_DYN_PLL_REG_W(rsc->slave->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL30,
+				PHY_CMN_CLK_CFG1, PHY_CMN_RBUF_CTRL, data, 0x01);
+		DSI_DYN_PLL_REG_W(rsc->slave->dyn_pll_base, DSI_DYNAMIC_REFRESH_PLL_CTRL31,
+				PHY_CMN_CLK_CFG1, PHY_CMN_CLK_CFG1, data, data);
+	}
+
+	DSI_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, upper_addr);
+	DSI_PLL_REG_W(rsc->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, upper_addr2);
+	wmb(); /* commit register writes */
+}
+
+static int dsi_pll_4nm_dynamic_clk_vco_set_rate(struct dsi_pll_resource *rsc)
+{
+	int rc;
+	struct dsi_pll_4nm *pll;
+	u32 rate;
+
+	if (!rsc) {
+		DSI_PLL_ERR(rsc, "pll resource not found\n");
+		return -EINVAL;
+	}
+
+	rate = rsc->vco_rate;
+	pll = rsc->priv;
+	if (!pll) {
+		DSI_PLL_ERR(rsc, "pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = dsi_pll_read_stored_trim_codes(rsc, rate);
+	if (rc) {
+		DSI_PLL_ERR(rsc, "cannot find pll codes rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	DSI_PLL_DBG(rsc, "ndx=%d, rate=%lu\n", rsc->index, rate);
+	rsc->vco_current_rate = rate;
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	/* program dynamic refresh control registers */
+	dsi_pll_4nm_dynamic_refresh(pll, rsc);
+
+	return 0;
+}
+
+static int dsi_pll_4nm_enable(struct dsi_pll_resource *rsc)
+{
+	int rc = 0;
+
+	/* Start PLL */
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_4nm_lock_status(rsc);
+	if (rc) {
+		DSI_PLL_ERR(rsc, "lock failed\n");
+		goto error;
+	}
+
+	/*
+	 * assert power on reset for PHY digital in case the PLL is
+	 * enabled after CX of analog domain power collapse. This needs
+	 * to be done before enabling the global clk.
+	 */
+	dsi_pll_phy_dig_reset(rsc);
+	if (rsc->slave)
+		dsi_pll_phy_dig_reset(rsc->slave);
+
+	dsi_pll_enable_global_clk(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_global_clk(rsc->slave);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+error:
+	return rc;
+}
+
+static int dsi_pll_4nm_disable(struct dsi_pll_resource *rsc)
+{
+	int rc = 0;
+
+	DSI_PLL_DBG(rsc, "stop PLL\n");
+
+	/*
+	 * To avoid any stray glitches while
+	 * abruptly powering down the PLL
+	 * make sure to gate the clock using
+	 * the clock enable bit before powering
+	 * down the PLL
+	 */
+	dsi_pll_disable_global_clk(rsc);
+	DSI_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(rsc);
+	if (rsc->slave) {
+		dsi_pll_disable_global_clk(rsc->slave);
+		dsi_pll_disable_sub(rsc->slave);
+	}
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	return rc;
+}
+
+int dsi_pll_4nm_configure(void *pll, bool commit)
+{
+
+	int rc = 0;
+	struct dsi_pll_resource *rsc = (struct dsi_pll_resource *)pll;
+
+	dsi_pll_config_slave(rsc);
+
+	/* PLL power needs to be enabled before accessing PLL registers */
+	dsi_pll_enable_pll_bias(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_pll_bias(rsc->slave);
+
+	dsi_pll_init_val(rsc);
+
+	rc = dsi_pll_4nm_set_byteclk_div(rsc, commit);
+
+	if (commit) {
+		rc = dsi_pll_4nm_set_pclk_div(rsc, commit);
+		rc = dsi_pll_4nm_vco_set_rate(rsc);
+	} else {
+		rc = dsi_pll_4nm_dynamic_clk_vco_set_rate(rsc);
+	}
+
+	return 0;
+}
+
+int dsi_pll_4nm_toggle(void *pll, bool prepare)
+{
+	int rc = 0;
+	struct dsi_pll_resource *pll_res = (struct dsi_pll_resource *)pll;
+
+	if (!pll_res) {
+		DSI_PLL_ERR(pll_res, "dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	if (prepare) {
+		rc = dsi_pll_4nm_enable(pll_res);
+		if (rc)
+			DSI_PLL_ERR(pll_res, "enable failed: %d\n", rc);
+	} else {
+		rc = dsi_pll_4nm_disable(pll_res);
+		if (rc)
+			DSI_PLL_ERR(pll_res, "disable failed: %d\n", rc);
+	}
+
+	return rc;
+}

+ 299 - 0
msm/dsi/dsi_pll_4nm.h

@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dsi_pll.h"
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE			0x0000
+#define PLL_ANALOG_CONTROLS_TWO			0x0004
+#define PLL_INT_LOOP_SETTINGS			0x0008
+#define PLL_INT_LOOP_SETTINGS_TWO		0x000C
+#define PLL_ANALOG_CONTROLS_THREE		0x0010
+#define PLL_ANALOG_CONTROLS_FOUR		0x0014
+#define PLL_ANALOG_CONTROLS_FIVE		0x0018
+#define PLL_INT_LOOP_CONTROLS			0x001C
+#define PLL_DSM_DIVIDER				0x0020
+#define PLL_FEEDBACK_DIVIDER			0x0024
+#define PLL_SYSTEM_MUXES			0x0028
+#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES	0x002C
+#define PLL_CMODE				0x0030
+#define PLL_PSM_CTRL				0x0034
+#define PLL_RSM_CTRL				0x0038
+#define PLL_VCO_TUNE_MAP			0x003C
+#define PLL_PLL_CNTRL				0x0040
+#define PLL_CALIBRATION_SETTINGS		0x0044
+#define PLL_BAND_SEL_CAL_TIMER_LOW		0x0048
+#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x004C
+#define PLL_BAND_SEL_CAL_SETTINGS		0x0050
+#define PLL_BAND_SEL_MIN			0x0054
+#define PLL_BAND_SEL_MAX			0x0058
+#define PLL_BAND_SEL_PFILT			0x005C
+#define PLL_BAND_SEL_IFILT			0x0060
+#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x0064
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x0068
+#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x006C
+#define PLL_BAND_SEL_ICODE_HIGH			0x0070
+#define PLL_BAND_SEL_ICODE_LOW			0x0074
+#define PLL_FREQ_DETECT_SETTINGS_ONE		0x0078
+#define PLL_FREQ_DETECT_THRESH			0x007C
+#define PLL_FREQ_DET_REFCLK_HIGH		0x0080
+#define PLL_FREQ_DET_REFCLK_LOW			0x0084
+#define PLL_FREQ_DET_PLLCLK_HIGH		0x0088
+#define PLL_FREQ_DET_PLLCLK_LOW			0x008C
+#define PLL_PFILT				0x0090
+#define PLL_IFILT				0x0094
+#define PLL_PLL_GAIN				0x0098
+#define PLL_ICODE_LOW				0x009C
+#define PLL_ICODE_HIGH				0x00A0
+#define PLL_LOCKDET				0x00A4
+#define PLL_OUTDIV				0x00A8
+#define PLL_FASTLOCK_CONTROL			0x00AC
+#define PLL_PASS_OUT_OVERRIDE_ONE		0x00B0
+#define PLL_PASS_OUT_OVERRIDE_TWO		0x00B4
+#define PLL_CORE_OVERRIDE			0x00B8
+#define PLL_CORE_INPUT_OVERRIDE			0x00BC
+#define PLL_RATE_CHANGE				0x00C0
+#define PLL_PLL_DIGITAL_TIMERS			0x00C4
+#define PLL_PLL_DIGITAL_TIMERS_TWO		0x00C8
+#define PLL_DECIMAL_DIV_START			0x00CC
+#define PLL_FRAC_DIV_START_LOW			0x00D0
+#define PLL_FRAC_DIV_START_MID			0x00D4
+#define PLL_FRAC_DIV_START_HIGH			0x00D8
+#define PLL_DEC_FRAC_MUXES			0x00DC
+#define PLL_DECIMAL_DIV_START_1			0x00E0
+#define PLL_FRAC_DIV_START_LOW_1		0x00E4
+#define PLL_FRAC_DIV_START_MID_1		0x00E8
+#define PLL_FRAC_DIV_START_HIGH_1		0x00EC
+#define PLL_DECIMAL_DIV_START_2			0x00F0
+#define PLL_FRAC_DIV_START_LOW_2		0x00F4
+#define PLL_FRAC_DIV_START_MID_2		0x00F8
+#define PLL_FRAC_DIV_START_HIGH_2		0x00FC
+#define PLL_MASH_CONTROL			0x0100
+#define PLL_SSC_STEPSIZE_LOW			0x0104
+#define PLL_SSC_STEPSIZE_HIGH			0x0108
+#define PLL_SSC_DIV_PER_LOW			0x010C
+#define PLL_SSC_DIV_PER_HIGH			0x0110
+#define PLL_SSC_ADJPER_LOW			0x0114
+#define PLL_SSC_ADJPER_HIGH			0x0118
+#define PLL_SSC_MUX_CONTROL			0x011C
+#define PLL_SSC_STEPSIZE_LOW_1			0x0120
+#define PLL_SSC_STEPSIZE_HIGH_1			0x0124
+#define PLL_SSC_DIV_PER_LOW_1			0x0128
+#define PLL_SSC_DIV_PER_HIGH_1			0x012C
+#define PLL_SSC_ADJPER_LOW_1			0x0130
+#define PLL_SSC_ADJPER_HIGH_1			0x0134
+#define PLL_SSC_STEPSIZE_LOW_2			0x0138
+#define PLL_SSC_STEPSIZE_HIGH_2			0x013C
+#define PLL_SSC_DIV_PER_LOW_2			0x0140
+#define PLL_SSC_DIV_PER_HIGH_2			0x0144
+#define PLL_SSC_ADJPER_LOW_2			0x0148
+#define PLL_SSC_ADJPER_HIGH_2			0x014C
+#define PLL_SSC_CONTROL				0x0150
+#define PLL_PLL_OUTDIV_RATE			0x0154
+#define PLL_PLL_LOCKDET_RATE_1			0x0158
+#define PLL_PLL_LOCKDET_RATE_2			0x015C
+#define PLL_PLL_PROP_GAIN_RATE_1		0x0160
+#define PLL_PLL_PROP_GAIN_RATE_2		0x0164
+#define PLL_PLL_BAND_SEL_RATE_1			0x0168
+#define PLL_PLL_BAND_SEL_RATE_2			0x016C
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x0170
+#define PLL_PLL_INT_GAIN_IFILT_BAND_2		0x0174
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x0178
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_2	0x017C
+#define PLL_PLL_FASTLOCK_EN_BAND		0x0180
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x0184
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x0188
+#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x018C
+#define PLL_PLL_LOCK_OVERRIDE			0x0190
+#define PLL_PLL_LOCK_DELAY			0x0194
+#define PLL_PLL_LOCK_MIN_DELAY			0x0198
+#define PLL_CLOCK_INVERTERS			0x019C
+#define PLL_SPARE_AND_JPC_OVERRIDES		0x01A0
+#define PLL_BIAS_CONTROL_1			0x01A4
+#define PLL_BIAS_CONTROL_2			0x01A8
+#define PLL_ALOG_OBSV_BUS_CTRL_1		0x01AC
+#define PLL_COMMON_STATUS_ONE			0x01B0
+#define PLL_COMMON_STATUS_TWO			0x01B4
+#define PLL_BAND_SEL_CAL			0x01B8
+#define PLL_ICODE_ACCUM_STATUS_LOW		0x01BC
+#define PLL_ICODE_ACCUM_STATUS_HIGH		0x01C0
+#define PLL_FD_OUT_LOW				0x01C4
+#define PLL_FD_OUT_HIGH				0x01C8
+#define PLL_ALOG_OBSV_BUS_STATUS_1		0x01CC
+#define PLL_PLL_MISC_CONFIG			0x01D0
+#define PLL_FLL_CONFIG				0x01D4
+#define PLL_FLL_FREQ_ACQ_TIME			0x01D8
+#define PLL_FLL_CODE0				0x01DC
+#define PLL_FLL_CODE1				0x01E0
+#define PLL_FLL_GAIN0				0x01E4
+#define PLL_FLL_GAIN1				0x01E8
+#define PLL_SW_RESET				0x01EC
+#define PLL_FAST_PWRUP				0x01F0
+#define PLL_LOCKTIME0				0x01F4
+#define PLL_LOCKTIME1				0x01F8
+#define PLL_DEBUG_BUS_SEL			0x01FC
+#define PLL_DEBUG_BUS0				0x0200
+#define PLL_DEBUG_BUS1				0x0204
+#define PLL_DEBUG_BUS2				0x0208
+#define PLL_DEBUG_BUS3				0x020C
+#define PLL_ANALOG_FLL_CONTROL_OVERRIDES	0x0210
+#define PLL_VCO_CONFIG				0x0214
+#define PLL_VCO_CAL_CODE1_MODE0_STATUS		0x0218
+#define PLL_VCO_CAL_CODE1_MODE1_STATUS		0x021C
+#define PLL_RESET_SM_STATUS			0x0220
+#define PLL_TDC_OFFSET				0x0224
+#define PLL_PS3_PWRDOWN_CONTROLS		0x0228
+#define PLL_PS4_PWRDOWN_CONTROLS		0x022C
+#define PLL_PLL_RST_CONTROLS			0x0230
+#define PLL_GEAR_BAND_SELECT_CONTROLS		0x0234
+#define PLL_PSM_CLK_CONTROLS			0x0238
+#define PLL_SYSTEM_MUXES_2			0x023C
+#define PLL_VCO_CONFIG_1			0x0240
+#define PLL_VCO_CONFIG_2			0x0244
+#define PLL_CLOCK_INVERTERS_1			0x0248
+#define PLL_CLOCK_INVERTERS_2			0x024C
+#define PLL_CMODE_1				0x0250
+#define PLL_CMODE_2				0x0254
+#define PLL_ANALOG_CONTROLS_FIVE_1		0x0258
+#define PLL_ANALOG_CONTROLS_FIVE_2		0x025C
+#define PLL_PERF_OPTIMIZE			0x0260
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0	0x010
+#define PHY_CMN_CLK_CFG1	0x014
+#define PHY_CMN_GLBL_CTRL	0x018
+#define PHY_CMN_RBUF_CTRL	0x01C
+#define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_2		0x02C
+#define PHY_CMN_CTRL_3		0x030
+#define PHY_CMN_PLL_CNTRL	0x03C
+#define PHY_CMN_GLBL_DIGTOP_SPARE4 0x128
+
+/* Bit definition of SSC control registers */
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+#define SSC_FREQ_UPDATE		BIT(2)
+#define SSC_FREQ_UPDATE_MUX	BIT(3)
+#define SSC_UPDATE_SSC		BIT(4)
+#define SSC_UPDATE_SSC_MUX	BIT(5)
+#define SSC_START		BIT(6)
+#define SSC_START_MUX		BIT(7)
+
+/* Dynamic Refresh Control Registers */
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0		(0x014)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1		(0x018)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2		(0x01C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3		(0x020)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4		(0x024)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5		(0x028)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6		(0x02C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7		(0x030)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8		(0x034)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9		(0x038)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10		(0x03C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11		(0x040)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12		(0x044)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13		(0x048)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14		(0x04C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		(0x050)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16		(0x054)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17		(0x058)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18		(0x05C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		(0x060)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		(0x064)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		(0x068)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		(0x06C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		(0x070)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		(0x074)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		(0x078)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		(0x07C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		(0x080)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		(0x084)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		(0x088)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30		(0x08C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31		(0x090)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	(0x094)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	(0x098)
+
+#define DSI_PHY_TO_PLL_OFFSET	(0x500)
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_MAX
+};
+
+struct dsi_pll_div_table pll_4nm_dphy_lb[] = {
+	{27270000, 30000000, 2, 11},
+	{30000000, 33330000, 4, 5},
+	{33330000, 37500000, 2, 9},
+	{37500000, 40000000, 8, 2},
+	{40000000, 42860000, 1, 15},
+	{42860000, 46150000, 2, 7},
+	{46150000, 50000000, 1, 13},
+	{50000000, 54550000, 4, 3},
+	{54550000, 60000000, 1, 11},
+	{60000000, 66670000, 2, 5},
+	{66670000, 75000000, 1, 9},
+	{75000000, 85710000, 8, 1},
+	{85710000, 100000000, 1, 7},
+	{100000000, 120000000, 2, 3},
+	{120000000, 150000000, 1, 5},
+	{150000000, 200000000, 4, 1},
+	{200000000, 300000000, 1, 3},
+	{300000000, 600000000, 2, 1},
+	{600000000, 1500000000, 1, 1}
+};
+
+struct dsi_pll_div_table pll_4nm_dphy_hb[] = {
+	{68180000, 75000000, 2, 11},
+	{75000000, 83330000, 4, 5},
+	{83330000, 93750000, 2, 9},
+	{93750000, 100000000, 8, 2},
+	{100000000, 107140000, 1, 15},
+	{107140000, 115380000, 2, 7},
+	{115380000, 125000000, 1, 13},
+	{125000000, 136360000, 4, 3},
+	{136360000, 150000000, 1, 11},
+	{150000000, 166670000, 2, 5},
+	{166670000, 187500000, 1, 9},
+	{187500000, 214290000, 8, 1},
+	{214290000, 250000000, 1, 7},
+	{250000000, 300000000, 2, 3},
+	{300000000, 375000000, 1, 5},
+	{375000000, 500000000, 4, 1},
+	{500000000, 750000000, 1, 3},
+	{750000000, 1500000000, 2, 1},
+	{1500000000, 5000000000, 1, 1}
+};
+
+struct dsi_pll_div_table pll_4nm_cphy_lb[] = {
+	{30000000, 37500000, 4, 5},
+	{37500000, 50000000, 8, 2},
+	{50000000, 60000000, 4, 3},
+	{60000000, 75000000, 2, 5},
+	{75000000, 100000000, 8, 1},
+	{100000000, 120000000, 2, 3},
+	{120000000, 150000000, 1, 5},
+	{150000000, 200000000, 4, 1},
+	{200000000, 300000000, 1, 3},
+	{300000000, 600000000, 2, 1},
+	{600000000, 1500000000, 1, 1}
+};
+
+struct dsi_pll_div_table pll_4nm_cphy_hb[] = {
+	{75000000, 93750000, 4, 5},
+	{93750000, 12500000, 8, 2},
+	{125000000, 150000000, 4, 3},
+	{150000000, 187500000, 2, 5},
+	{187500000, 250000000, 8, 1},
+	{250000000, 300000000, 2, 3},
+	{300000000, 375000000, 1, 5},
+	{375000000, 500000000, 4, 1},
+	{500000000, 750000000, 1, 3},
+	{750000000, 1500000000, 2, 1},
+	{1500000000, 5000000000, 1, 1}
+};

+ 3 - 1
msm/dsi/dsi_pll_5nm.c

@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -1556,7 +1557,8 @@ int dsi_pll_5nm_configure(void *pll, bool commit)
 	if (rsc->slave)
 		dsi_pll_enable_pll_bias(rsc->slave);
 
-	dsi_pll_init_val(rsc);
+	if (commit)
+		dsi_pll_init_val(rsc);
 
 	rc = dsi_pll_5nm_set_byteclk_div(rsc, commit);
 

+ 1 - 1
msm/dsi/dsi_pwr.c

@@ -443,7 +443,7 @@ int dsi_pwr_panel_regulator_mode_set(struct dsi_regulator_info *regs,
 	}
 
 	if (i >= regs->count) {
-		DSI_ERR("Regulator %s was not found\n", reg_name);
+		DSI_DEBUG("Regulator %s was not found\n", reg_name);
 		return -EINVAL;
 	}
 

+ 10 - 3
msm/msm_atomic.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -87,7 +88,8 @@ static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
 	if (msm_is_mode_seamless_dms(msm_mode) && !enable)
 		return true;
 
-	if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+	if (!crtc_state->mode_changed && crtc_state->connectors_changed &&
+		crtc_state->active) {
 		for_each_old_connector_in_state(state, connector,
 				conn_state, i) {
 			if ((conn_state->crtc == crtc_state->crtc) ||
@@ -256,7 +258,8 @@ msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 		if (!old_crtc_state->active)
 			continue;
 
-		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
+		if (!crtc->state->active_changed &&
+				_msm_seamless_for_crtc(old_state, crtc->state, false))
 			continue;
 
 		funcs = crtc->helper_private;
@@ -317,6 +320,9 @@ msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 		mode = &new_crtc_state->mode;
 		adjusted_mode = &new_crtc_state->adjusted_mode;
 
+		if (!new_crtc_state->active)
+			continue;
+
 		if (!new_crtc_state->mode_changed &&
 				new_crtc_state->connectors_changed) {
 			if (_msm_seamless_for_conn(connector,
@@ -410,7 +416,8 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
 		if (!new_crtc_state->active)
 			continue;
 
-		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
+		if (!crtc->state->active_changed &&
+				_msm_seamless_for_crtc(old_state, crtc->state, true))
 			continue;
 
 		funcs = crtc->helper_private;

+ 3 - 2
msm/msm_cooling_device.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  */
 
@@ -19,7 +20,7 @@ struct sde_cdev {
 	unsigned int cdev_sf;
 };
 
-#ifdef CONFIG_THERMAL_OF
+#if IS_ENABLED(CONFIG_THERMAL_OF)
 struct sde_cdev *backlight_cdev_register(struct device *dev,
 					struct backlight_device *bd,
 					struct notifier_block *n);
@@ -33,6 +34,6 @@ backlight_cdev_register(struct device *dev,
 }
 static inline void backlight_cdev_unregister(struct sde_cdev *cdev)
 { }
-#endif
+#endif /* CONFIG_THERMAL_OF */
 
 #endif

+ 88 - 52
msm/msm_drv.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -168,19 +169,19 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
 	.atomic_commit_tail = msm_atomic_commit_tail,
 };
 
-#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
+#if IS_ENABLED(CONFIG_DRM_MSM_REGISTER_LOGGING)
 static bool reglog = false;
 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
 module_param(reglog, bool, 0600);
 #else
 #define reglog 0
-#endif
+#endif /* CONFIG_DRM_MSM_REGISTER_LOGGING */
 
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
 static bool fbdev = true;
 MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
 module_param(fbdev, bool, 0600);
-#endif
+#endif /* CONFIG_DRM_FBDEV_EMULATION */
 
 static char *vram = "16m";
 MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
@@ -296,7 +297,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 
 	ptr = devm_ioremap(&pdev->dev, res->start, size);
 	if (!ptr) {
-		dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+		DISP_DEV_ERR(&pdev->dev, "failed to ioremap: %s\n", name);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -330,13 +331,13 @@ unsigned long msm_get_phys_addr(struct platform_device *pdev, const char *name)
 	struct resource *res;
 
 	if (!name) {
-		dev_err(&pdev->dev, "invalid block name\n");
+		DISP_DEV_ERR(&pdev->dev, "invalid block name\n");
 		return 0;
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
 	if (!res) {
-		dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+		DISP_DEV_ERR(&pdev->dev, "failed to get memory resource: %s\n", name);
 		return 0;
 	}
 
@@ -507,10 +508,10 @@ static int msm_drm_uninit(struct device *dev)
 		priv->registered = false;
 	}
 
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
 	if (fbdev && priv->fbdev)
 		msm_fbdev_free(ddev);
-#endif
+#endif /* CONFIG_DRM_FBDEV_EMULATION */
 	drm_atomic_helper_shutdown(ddev);
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
 	msm_irq_uninstall(ddev);
@@ -518,8 +519,10 @@ static int msm_drm_uninit(struct device *dev)
 	drm_irq_uninstall(ddev);
 #endif
 
-	if (kms && kms->funcs)
+	if (kms && kms->funcs) {
 		kms->funcs->destroy(kms);
+		priv->kms = NULL;
+	}
 
 	if (priv->vram.paddr) {
 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
@@ -561,7 +564,7 @@ static int msm_drm_uninit(struct device *dev)
 
 static int get_mdp_ver(struct platform_device *pdev)
 {
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF)
 	static const struct of_device_id match_types[] = { {
 		.compatible = "qcom,mdss_mdp",
 		.data	= (void	*)KMS_MDP5,
@@ -578,7 +581,7 @@ static int get_mdp_ver(struct platform_device *pdev)
 	match = of_match_node(match_types, dev->of_node);
 	if (match)
 		return (int)(unsigned long)match->data;
-#endif
+#endif /* CONFIG_OF */
 	return KMS_MDP4;
 }
 
@@ -648,7 +651,7 @@ static int msm_init_vram(struct drm_device *dev)
 		p = dma_alloc_attrs(dev->dev, size,
 				&priv->vram.paddr, GFP_KERNEL, attrs);
 		if (!p) {
-			dev_err(dev->dev, "failed to allocate VRAM\n");
+			DISP_DEV_ERR(dev->dev, "failed to allocate VRAM\n");
 			priv->vram.paddr = 0;
 			return -ENOMEM;
 		}
@@ -661,7 +664,7 @@ static int msm_init_vram(struct drm_device *dev)
 	return ret;
 }
 
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF)
 static int msm_component_bind_all(struct device *dev,
 				struct drm_device *drm_dev)
 {
@@ -679,14 +682,13 @@ static int msm_component_bind_all(struct device *dev,
 {
 	return 0;
 }
-#endif
+#endif /* CONFIG_OF */
 
 static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct drm_device *ddev,
 	struct device *dev)
 {
 	int i, ret = 0;
 
-	kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
 	for (i = 0; i < priv->num_crtcs; i++) {
 		/* initialize display thread */
 		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
@@ -696,10 +698,13 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 			kthread_run(kthread_worker_fn,
 				&priv->disp_thread[i].worker,
 				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+		kthread_init_work(&priv->thread_priority_work,
+				  msm_drm_display_thread_priority_worker);
 		kthread_queue_work(&priv->disp_thread[i].worker, &priv->thread_priority_work);
+		kthread_flush_work(&priv->thread_priority_work);
 
 		if (IS_ERR(priv->disp_thread[i].thread)) {
-			dev_err(dev, "failed to create crtc_commit kthread\n");
+			DISP_DEV_ERR(dev, "failed to create crtc_commit kthread\n");
 			priv->disp_thread[i].thread = NULL;
 		}
 
@@ -718,10 +723,13 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 		 * frame_pending counters beyond 2. This can lead to commit
 		 * failure at crtc commit level.
 		 */
+		kthread_init_work(&priv->thread_priority_work,
+				  msm_drm_display_thread_priority_worker);
 		kthread_queue_work(&priv->event_thread[i].worker, &priv->thread_priority_work);
+		kthread_flush_work(&priv->thread_priority_work);
 
 		if (IS_ERR(priv->event_thread[i].thread)) {
-			dev_err(dev, "failed to create crtc_event kthread\n");
+			DISP_DEV_ERR(dev, "failed to create crtc_event kthread\n");
 			priv->event_thread[i].thread = NULL;
 		}
 
@@ -753,10 +761,12 @@ static int msm_drm_display_thread_create(struct msm_drm_private *priv, struct dr
 	kthread_init_worker(&priv->pp_event_worker);
 	priv->pp_event_thread = kthread_run(kthread_worker_fn,
 			&priv->pp_event_worker, "pp_event");
+	kthread_init_work(&priv->thread_priority_work, msm_drm_display_thread_priority_worker);
 	kthread_queue_work(&priv->pp_event_worker, &priv->thread_priority_work);
+	kthread_flush_work(&priv->thread_priority_work);
 
 	if (IS_ERR(priv->pp_event_thread)) {
-		dev_err(dev, "failed to create pp_event kthread\n");
+		DISP_DEV_ERR(dev, "failed to create pp_event kthread\n");
 		ret = PTR_ERR(priv->pp_event_thread);
 		priv->pp_event_thread = NULL;
 		return ret;
@@ -795,7 +805,7 @@ static struct msm_kms *_msm_drm_component_init_helper(
 		 * and (for example) use dmabuf/prime to share buffers with
 		 * imx drm driver on iMX5
 		 */
-		dev_err(dev, "failed to load kms\n");
+		DISP_DEV_ERR(dev, "failed to load kms\n");
 		return kms;
 	}
 	priv->kms = kms;
@@ -811,7 +821,7 @@ static struct msm_kms *_msm_drm_component_init_helper(
 
 	ret = (kms)->funcs->hw_init(kms);
 	if (ret) {
-		dev_err(dev, "kms hw init failed: %d\n", ret);
+		DISP_DEV_ERR(dev, "kms hw init failed: %d\n", ret);
 		return ERR_PTR(ret);
 	}
 
@@ -828,7 +838,7 @@ static int msm_drm_device_init(struct platform_device *pdev,
 
 	ddev = drm_dev_alloc(drv, dev);
 	if (IS_ERR(ddev)) {
-		dev_err(dev, "failed to allocate drm_device\n");
+		DISP_DEV_ERR(dev, "failed to allocate drm_device\n");
 		return PTR_ERR(ddev);
 	}
 
@@ -852,15 +862,15 @@ static int msm_drm_device_init(struct platform_device *pdev,
 
 	ret = sde_dbg_init(&pdev->dev);
 	if (ret) {
-		dev_err(dev, "failed to init sde dbg: %d\n", ret);
+		DISP_DEV_ERR(dev, "failed to init sde dbg: %d\n", ret);
 		goto dbg_init_fail;
 	}
 
 	pm_runtime_enable(dev);
 
-	ret = pm_runtime_get_sync(dev);
+	ret = pm_runtime_resume_and_get(dev);
 	if (ret < 0) {
-		dev_err(dev, "resource enable failed: %d\n", ret);
+		DISP_DEV_ERR(dev, "failed to enable power resource %d\n", ret);
 		goto pm_runtime_error;
 	}
 
@@ -907,8 +917,12 @@ static int msm_drm_component_init(struct device *dev)
 
 	/* Bind all our sub-components: */
 	ret = msm_component_bind_all(dev, ddev);
-	if (ret)
+	if (ret == -EPROBE_DEFER) {
+		destroy_workqueue(priv->wq);
+		return ret;
+	} else if (ret) {
 		goto bind_fail;
+	}
 
 	ret = msm_init_vram(ddev);
 	if (ret)
@@ -919,7 +933,7 @@ static int msm_drm_component_init(struct device *dev)
 
 	kms = _msm_drm_component_init_helper(priv, ddev, dev, pdev);
 	if (IS_ERR_OR_NULL(kms)) {
-		dev_err(dev, "msm_drm_component_init_helper failed\n");
+		DISP_DEV_ERR(dev, "msm_drm_component_init_helper failed\n");
 		goto fail;
 	}
 
@@ -929,13 +943,13 @@ static int msm_drm_component_init(struct device *dev)
 
 	ret = msm_drm_display_thread_create(priv, ddev, dev);
 	if (ret) {
-		dev_err(dev, "msm_drm_display_thread_create failed\n");
+		DISP_DEV_ERR(dev, "msm_drm_display_thread_create failed\n");
 		goto fail;
 	}
 
 	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
-		dev_err(dev, "failed to initialize vblank\n");
+		DISP_DEV_ERR(dev, "failed to initialize vblank\n");
 		goto fail;
 	}
 
@@ -943,7 +957,12 @@ static int msm_drm_component_init(struct device *dev)
 		drm_crtc_vblank_reset(crtc);
 
 	if (kms) {
-		pm_runtime_get_sync(dev);
+		ret = pm_runtime_resume_and_get(dev);
+		if (ret < 0) {
+			DISP_DEV_ERR(dev, "failed to enable power resource %d\n", ret);
+			goto fail;
+		}
+
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
 		ret = msm_irq_install(ddev, platform_get_irq(pdev, 0));
 #else
@@ -951,7 +970,7 @@ static int msm_drm_component_init(struct device *dev)
 #endif
 		pm_runtime_put_sync(dev);
 		if (ret < 0) {
-			dev_err(dev, "failed to install IRQ handler\n");
+			DISP_DEV_ERR(dev, "failed to install IRQ handler\n");
 			goto fail;
 		}
 	}
@@ -966,15 +985,15 @@ static int msm_drm_component_init(struct device *dev)
 	if (kms && kms->funcs && kms->funcs->cont_splash_config) {
 		ret = kms->funcs->cont_splash_config(kms, NULL);
 		if (ret) {
-			dev_err(dev, "kms cont_splash config failed.\n");
+			DISP_DEV_ERR(dev, "kms cont_splash config failed.\n");
 			goto fail;
 		}
 	}
 
-#ifdef CONFIG_DRM_FBDEV_EMULATION
+#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
 	if (fbdev)
 		priv->fbdev = msm_fbdev_init(ddev);
-#endif
+#endif /* CONFIG_DRM_FBDEV_EMULATION */
 
 	/* create drm client only when fbdev is not supported */
 	if (!priv->fbdev) {
@@ -990,7 +1009,7 @@ static int msm_drm_component_init(struct device *dev)
 
 	ret = sde_dbg_debugfs_register(dev);
 	if (ret) {
-		dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
+		DISP_DEV_ERR(dev, "failed to reg sde dbg debugfs: %d\n", ret);
 		goto fail;
 	}
 
@@ -1098,19 +1117,32 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 static void msm_lastclose(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
+	struct msm_kms *kms;
 	int i, rc;
 
-	if (!kms)
+	if (!priv || !priv->kms)
 		return;
 
+	kms = priv->kms;
+
 	/* check for splash status before triggering cleanup
 	 * if we end up here with splash status ON i.e before first
 	 * commit then ignore the last close call
 	 */
 	if (kms->funcs && kms->funcs->check_for_splash
-		&& kms->funcs->check_for_splash(kms))
-		return;
+		&& kms->funcs->check_for_splash(kms)) {
+		msm_wait_event_timeout(priv->pending_crtcs_event, !priv->pending_crtcs,
+			LASTCLOSE_TIMEOUT_MS, rc);
+		if (!rc)
+			DRM_INFO("wait for crtc mask 0x%x failed, commit anyway...\n",
+				priv->pending_crtcs);
+
+		rc = kms->funcs->trigger_null_flush(kms);
+		if (rc) {
+			DRM_ERROR("null flush commit failure during lastclose\n");
+			return;
+		}
+	}
 
 	/*
 	 * clean up vblank disable immediately as this is the last close.
@@ -1553,8 +1585,15 @@ static int msm_release(struct inode *inode, struct file *filp)
 	 * refcount > 1. This operation is not triggered from upstream
 	 * drm as msm_driver does not support DRIVER_LEGACY feature.
 	 */
-	if (drm_is_current_master(file_priv))
+	if (drm_is_current_master(file_priv)) {
+		msm_wait_event_timeout(priv->pending_crtcs_event, !priv->pending_crtcs,
+			LASTCLOSE_TIMEOUT_MS, ret);
+		if (!ret)
+			DRM_INFO("wait for crtc mask 0x%x failed, commit anyway...\n",
+				priv->pending_crtcs);
+
 		msm_preclose(dev, file_priv);
+	}
 
 	ret = drm_release(inode, filp);
 	filp->private_data = NULL;
@@ -1653,7 +1692,7 @@ int msm_ioctl_power_ctrl(struct drm_device *dev, void *data,
 
 	if (vote_req) {
 		if (power_ctrl->enable)
-			rc = pm_runtime_get_sync(dev->dev);
+			rc = pm_runtime_resume_and_get(dev->dev);
 		else
 			pm_runtime_put_sync(dev->dev);
 
@@ -1793,7 +1832,7 @@ static struct drm_driver msm_driver = {
 	.patchlevel         = MSM_VERSION_PATCHLEVEL,
 };
 
-#ifdef CONFIG_PM_SLEEP
+#if IS_ENABLED(CONFIG_PM_SLEEP)
 static int msm_pm_suspend(struct device *dev)
 {
 	struct drm_device *ddev;
@@ -1843,9 +1882,9 @@ static int msm_pm_resume(struct device *dev)
 
 	return 0;
 }
-#endif
+#endif /* CONFIG_PM_SLEEP */
 
-#ifdef CONFIG_PM
+#if IS_ENABLED(CONFIG_PM)
 static int msm_runtime_suspend(struct device *dev)
 {
 	struct drm_device *ddev = dev_get_drvdata(dev);
@@ -1876,7 +1915,7 @@ static int msm_runtime_resume(struct device *dev)
 
 	return ret;
 }
-#endif
+#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops msm_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
@@ -1929,7 +1968,7 @@ static int add_components_mdp(struct device *mdp_dev,
 
 		ret = of_graph_parse_endpoint(ep_node, &ep);
 		if (ret) {
-			dev_err(mdp_dev, "unable to parse port endpoint\n");
+			DISP_DEV_ERR(mdp_dev, "unable to parse port endpoint\n");
 			of_node_put(ep_node);
 			return ret;
 		}
@@ -1996,13 +2035,13 @@ static int add_display_components(struct device *dev,
 	if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
 		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 		if (ret) {
-			dev_err(dev, "failed to populate children devices\n");
+			DISP_DEV_ERR(dev, "failed to populate children devices\n");
 			return ret;
 		}
 
 		mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
 		if (!mdp_dev) {
-			dev_err(dev, "failed to find MDSS MDP node\n");
+			DISP_DEV_ERR(dev, "failed to find MDSS MDP node\n");
 			of_platform_depopulate(dev);
 			return -ENODEV;
 		}
@@ -2141,7 +2180,7 @@ static int msm_drm_component_dependency_check(struct device *dev)
 			struct platform_device *pdev =
 					of_find_device_by_node(node);
 			if (!platform_get_drvdata(pdev)) {
-				dev_err(dev,
+				DISP_DEV_ERR(dev,
 					"qcom,sde_rscc not probed yet\n");
 				return -EPROBE_DEFER;
 			} else {
@@ -2269,9 +2308,6 @@ static void __exit msm_drm_unregister(void)
 module_init(msm_drm_register);
 module_exit(msm_drm_unregister);
 
-#if IS_ENABLED(CONFIG_MSM_MMRM)
-MODULE_SOFTDEP("pre: msm-mmrm");
-#endif
 MODULE_AUTHOR("Rob Clark <[email protected]");
 MODULE_DESCRIPTION("MSM DRM Driver");
 MODULE_LICENSE("GPL");

+ 51 - 4
msm/msm_drv.h

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -84,6 +85,8 @@ struct msm_gem_vma;
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
 
+#define DISP_DEV_ERR(dev, fmt, ...) dev_err(dev, "[%s:%d] " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
 struct msm_file_private {
 	rwlock_t queuelock;
 	struct list_head submitqueues;
@@ -220,6 +223,7 @@ enum msm_mdp_conn_property {
 	CONNECTOR_PROP_DIMMING_CTRL,
 	CONNECTOR_PROP_DIMMING_MIN_BL,
 	CONNECTOR_PROP_EARLY_FENCE_LINE,
+	CONNECTOR_PROP_DYN_TRANSFER_TIME,
 
 	/* enum/bitmask properties */
 	CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -233,6 +237,7 @@ enum msm_mdp_conn_property {
 	CONNECTOR_PROP_AVR_STEP,
 	CONNECTOR_PROP_CACHE_STATE,
 	CONNECTOR_PROP_DSC_MODE,
+	CONNECTOR_PROP_WB_USAGE_TYPE,
 
 	/* total # of properties */
 	CONNECTOR_PROP_COUNT
@@ -253,6 +258,18 @@ enum msm_display_compression_type {
 	MSM_DISPLAY_COMPRESSION_VDC
 };
 
+/**
+ * enum msm_display_wd_jitter_type - Type of WD jitter used
+ * @MSM_DISPLAY_WD_JITTER_NONE:      No WD timer jitter enabled
+ * @MSM_DISPLAY_WD_INSTANTANEOUS_JITTER:  Instantaneous WD jitter enabled
+ * @MSM_DISPLAY_WD_LTJ_JITTER:       LTJ WD jitter enabled
+ */
+enum msm_display_wd_jitter_type {
+	MSM_DISPLAY_WD_JITTER_NONE = BIT(0),
+	MSM_DISPLAY_WD_INSTANTANEOUS_JITTER = BIT(1),
+	MSM_DISPLAY_WD_LTJ_JITTER = BIT(2),
+};
+
 #define MSM_DISPLAY_COMPRESSION_RATIO_NONE 1
 #define MSM_DISPLAY_COMPRESSION_RATIO_MAX 5
 
@@ -740,6 +757,24 @@ struct msm_dyn_clk_list {
 	u32 *pixel_clks_khz;
 };
 
+/**
+ * struct msm_display_wd_jitter_config - defines jitter properties for WD timer
+ * @jitter_type:        Type of WD jitter enabled.
+ * @inst_jitter_numer:  Instantaneous jitter numerator.
+ * @inst_jitter_denom:  Instantaneous jitter denominator.
+ * @ltj_max_numer:      LTJ max numerator.
+ * @ltj_max_denom:      LTJ max denominator.
+ * @ltj_time_sec:       LTJ time in seconds.
+ */
+struct msm_display_wd_jitter_config {
+	enum msm_display_wd_jitter_type jitter_type;
+	u32 inst_jitter_numer;
+	u32 inst_jitter_denom;
+	u32 ltj_max_numer;
+	u32 ltj_max_denom;
+	u32 ltj_time_sec;
+};
+
 /**
  * struct msm_mode_info - defines all msm custom mode info
  * @frame_rate:      frame_rate of the mode
@@ -756,10 +791,16 @@ struct msm_dyn_clk_list {
  * @panel_mode_caps   panel mode capabilities
  * @mdp_transfer_time_us   Specifies the mdp transfer time for command mode
  *                         panels in microseconds.
+ * @mdp_transfer_time_us_min   Specifies the minimum possible mdp transfer time
+ *                             for command mode panels in microseconds.
+ * @mdp_transfer_time_us_max   Specifies the maximum possible mdp transfer time
+ *                             for command mode panels in microseconds.
  * @allowed_mode_switches: bit mask to indicate supported mode switch.
  * @disable_rsc_solver: Dynamically disable RSC solver for the timing mode due to lower bitclk rate.
  * @dyn_clk_list: List of dynamic clock rates for RFI.
  * @qsync_min_fps: qsync min fps rate
+ * @wd_jitter:         Info for WD jitter.
+ * @vpadding:        panel stacking height
  */
 struct msm_mode_info {
 	uint32_t frame_rate;
@@ -775,10 +816,14 @@ struct msm_mode_info {
 	bool wide_bus_en;
 	u32 panel_mode_caps;
 	u32 mdp_transfer_time_us;
+	u32 mdp_transfer_time_us_min;
+	u32 mdp_transfer_time_us_max;
 	u32 allowed_mode_switches;
 	bool disable_rsc_solver;
 	struct msm_dyn_clk_list dyn_clk_list;
 	u32 qsync_min_fps;
+	struct msm_display_wd_jitter_config wd_jitter;
+	u32 vpadding;
 };
 
 /**
@@ -1215,8 +1260,10 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 		struct drm_gem_object **bos);
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
-void msm_framebuffer_set_cache_hint(struct drm_framebuffer *fb, u32 flags, u32 type);
-void msm_framebuffer_get_cache_hint(struct drm_framebuffer *fb, u32 *flags, u32 *type);
+int msm_framebuffer_set_cache_hint(struct drm_framebuffer *fb,
+		u32 flags, u32 rd_type, u32 wr_type);
+int msm_framebuffer_get_cache_hint(struct drm_framebuffer *fb,
+		u32 *flags, u32 *rd_type, u32 *wr_type);
 
 struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
 void msm_fbdev_free(struct drm_device *dev);
@@ -1300,7 +1347,7 @@ static inline void __exit msm_mdp_unregister(void)
 }
 #endif /* CONFIG_DRM_MSM_MDP5 */
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 int msm_debugfs_late_init(struct drm_device *dev);
 int msm_rd_debugfs_init(struct drm_minor *minor);
 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
@@ -1316,7 +1363,7 @@ static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_su
 		const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 #if IS_ENABLED(CONFIG_DRM_MSM_DSI)
 void __init dsi_display_register(void);

+ 21 - 14
msm/msm_fb.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -30,7 +31,8 @@ struct msm_framebuffer {
 	struct drm_framebuffer base;
 	const struct msm_format *format;
 	u32 cache_flags;
-	u32 cache_type;
+	u32 cache_rd_type;
+	u32 cache_wr_type;
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
@@ -194,7 +196,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 	format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
 			mode_cmd->modifier[0]);
 	if (!format) {
-		dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+		DISP_DEV_ERR(dev->dev, "unsupported pixel format: %4.4s\n",
 				(char *)&mode_cmd->pixel_format);
 		ret = -EINVAL;
 		goto fail;
@@ -226,7 +228,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
 	if (is_modified) {
 		if (!kms->funcs->check_modified_format) {
-			dev_err(dev->dev, "can't check modified fb format\n");
+			DISP_DEV_ERR(dev->dev, "can't check modified fb format\n");
 			ret = -EINVAL;
 			goto fail;
 		} else {
@@ -269,7 +271,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
 	ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
 	if (ret) {
-		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+		DISP_DEV_ERR(dev->dev, "framebuffer init failed: %d\n", ret);
 		goto fail;
 	}
 
@@ -283,29 +285,34 @@ fail:
 	return ERR_PTR(ret);
 }
 
-void msm_framebuffer_set_cache_hint(struct drm_framebuffer *fb, u32 flags, u32 type)
+int msm_framebuffer_set_cache_hint(struct drm_framebuffer *fb,
+		u32 flags, u32 rd_type, u32 wr_type)
 {
 	struct msm_framebuffer *msm_fb;
 
 	if (!fb)
-		return;
+		return -EINVAL;
 
 	msm_fb = to_msm_framebuffer(fb);
 	msm_fb->cache_flags = flags;
-	msm_fb->cache_type = type;
+	msm_fb->cache_rd_type = rd_type;
+	msm_fb->cache_wr_type = wr_type;
+
+	return 0;
 }
 
-void msm_framebuffer_get_cache_hint(struct drm_framebuffer *fb, u32 *flags, u32 *type)
+int  msm_framebuffer_get_cache_hint(struct drm_framebuffer *fb,
+		u32 *flags, u32 *rd_type, u32 *wr_type)
 {
 	struct msm_framebuffer *msm_fb;
 
-	if (!fb) {
-		*flags = 0;
-		*type = 0;
-		return;
-	}
+	if (!fb)
+		return -EINVAL;
 
 	msm_fb = to_msm_framebuffer(fb);
 	*flags = msm_fb->cache_flags;
-	*type = msm_fb->cache_type;
+	*rd_type = msm_fb->cache_rd_type;
+	*wr_type = msm_fb->cache_wr_type;
+
+	return 0;
 }

+ 30 - 15
msm/msm_gem.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -100,7 +101,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 			p = get_pages_vram(obj, npages);
 
 		if (IS_ERR(p)) {
-			dev_err(dev->dev, "could not get pages: %ld\n",
+			DISP_DEV_ERR(dev->dev, "could not get pages: %ld\n",
 					PTR_ERR(p));
 			return p;
 		}
@@ -111,7 +112,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 		if (IS_ERR(msm_obj->sgt)) {
 			void *ptr = ERR_CAST(msm_obj->sgt);
 
-			dev_err(dev->dev, "failed to allocate sgt\n");
+			DISP_DEV_ERR(dev->dev, "failed to allocate sgt\n");
 			msm_obj->sgt = NULL;
 			return ptr;
 		}
@@ -324,7 +325,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
 	ret = drm_gem_create_mmap_offset(obj);
 
 	if (ret) {
-		dev_err(dev->dev, "could not allocate mmap offset\n");
+		DISP_DEV_ERR(dev->dev, "could not allocate mmap offset\n");
 		return 0;
 	}
 
@@ -451,6 +452,17 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 		if ((dev && obj->import_attach) &&
 				((dev != obj->import_attach->dev) ||
 				msm_obj->obj_dirty)) {
+
+			if (of_device_is_compatible(dev->of_node, "qcom,smmu_sde_unsec") &&
+				of_device_is_compatible(obj->import_attach->dev->of_node,
+				"qcom,smmu_sde_sec")) {
+				SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt,
+						 msm_obj->obj_dirty);
+				DRM_ERROR("gem obj found mapped to %s, now requesting map on %s",
+					dev_name(obj->import_attach->dev), dev_name(dev));
+				return -EINVAL;
+			}
+
 			dmabuf = obj->import_attach->dmabuf;
 			dma_map_attrs = obj->import_attach->dma_map_attrs;
 
@@ -653,7 +665,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	args->pitch = align_pitch(args->width, args->bpp);
 	args->size  = PAGE_ALIGN(args->pitch * args->height);
 	return msm_gem_new_handle(dev, file, args->size,
-			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
+			MSM_BO_SCANOUT | MSM_BO_CACHED, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -688,7 +700,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 	mutex_lock(&msm_obj->lock);
 
 	if (WARN_ON(msm_obj->madv > madv)) {
-		dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+		DISP_DEV_ERR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 			msm_obj->madv, madv);
 		mutex_unlock(&msm_obj->lock);
 		return ERR_PTR(-EBUSY);
@@ -968,7 +980,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 	case MSM_BO_WC:
 		break;
 	default:
-		dev_err(dev->dev, "invalid cache flag: %x\n",
+		DISP_DEV_ERR(dev->dev, "invalid cache flag: %x\n",
 				(flags & MSM_BO_CACHE_MASK));
 		return -EINVAL;
 	}
@@ -1080,12 +1092,20 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 		ret = drm_gem_object_init(dev, obj, size);
 		if (ret)
 			goto fail;
+
+		/*
+		 * Our buffers are kept pinned, so allocating them from the
+		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
+		 * See comments above new_inode() why this is required _and_
+		 * expected if you're going to pin these pages.
+		 */
+		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
 	}
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&priv->mm_lock);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&priv->mm_lock);
 #endif
 
 	return obj;
@@ -1159,7 +1179,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	struct drm_gem_object *obj = NULL;
 	uint32_t size;
 	int ret;
-	unsigned long flags = 0;
 
 	size = PAGE_ALIGN(dmabuf->size);
 
@@ -1189,16 +1208,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	 */
 	msm_obj->flags |= MSM_BO_EXTBUF;
 
-	ret = dma_buf_get_flags(dmabuf, &flags);
-	if (ret)
-		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
-
 	mutex_unlock(&msm_obj->lock);
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
-	mutex_lock(&dev->struct_mutex);
+	mutex_lock(&priv->mm_lock);
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&priv->mm_lock);
 #endif
 
 	return obj;

+ 7 - 4
msm/msm_kms.h

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -119,10 +120,10 @@ struct msm_kms_funcs {
 	struct device *(*get_address_space_device)(
 			struct msm_kms *kms,
 			unsigned int domain);
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 	/* debugfs: */
 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
-#endif
+#endif /* CONFIG_DEBUG_FS */
 	/* destroys debugfs */
 	void (*debugfs_destroy)(struct msm_kms *kms);
 	/* handle continuous splash  */
@@ -130,6 +131,8 @@ struct msm_kms_funcs {
 			struct drm_atomic_state *state);
 	/* check for continuous splash status */
 	bool (*check_for_splash)(struct msm_kms *kms);
+	/*trigger null flush if stuck in cont splash*/
+	int (*trigger_null_flush)(struct msm_kms *kms);
 	/* topology lm information */
 	int (*get_mixer_count)(const struct msm_kms *kms,
 			const struct drm_display_mode *mode,
@@ -170,12 +173,12 @@ static inline void msm_kms_init(struct msm_kms *kms,
 	kms->funcs = funcs;
 }
 
-#ifdef CONFIG_DRM_MSM_MDP4
+#if IS_ENABLED(CONFIG_DRM_MSM_MDP4)
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
 #else
 static inline
 struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
-#endif
+#endif /* CONFIG_DRM_MSM_MDP4 */
 #if IS_ENABLED(CONFIG_DRM_MSM_MDP5)
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
 int msm_mdss_init(struct drm_device *dev);

+ 6 - 5
msm/msm_smmu.c

@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -85,7 +86,7 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char * const *names,
 
 	rc = qcom_iommu_sid_switch(client->dev, SID_ACQUIRE);
 	if (rc) {
-		dev_err(client->dev, "iommu sid switch failed (%d)\n", rc);
+		DISP_DEV_ERR(client->dev, "iommu sid switch failed (%d)\n", rc);
 		return rc;
 	}
 
@@ -415,7 +416,7 @@ static struct device *msm_smmu_device_add(struct device *dev,
 
 	smmu->client = msm_smmu_get_smmu(compat);
 	if (IS_ERR_OR_NULL(smmu->client)) {
-		DRM_ERROR("unable to find domain %d compat: %s\n", domain,
+		DRM_DEBUG("unable to find domain %d compat: %s\n", domain,
 				compat);
 		return ERR_PTR(-ENODEV);
 	}
@@ -517,13 +518,13 @@ static int msm_smmu_probe(struct platform_device *pdev)
 
 	match = of_match_device(msm_smmu_dt_match, &pdev->dev);
 	if (!match || !match->data) {
-		dev_err(&pdev->dev, "probe failed as match data is invalid\n");
+		DISP_DEV_ERR(&pdev->dev, "probe failed as match data is invalid\n");
 		return -EINVAL;
 	}
 
 	domain = match->data;
 	if (!domain) {
-		dev_err(&pdev->dev, "no matching device found\n");
+		DISP_DEV_ERR(&pdev->dev, "no matching device found\n");
 		return -EINVAL;
 	}
 
@@ -536,7 +537,7 @@ static int msm_smmu_probe(struct platform_device *pdev)
 	client->dev = &pdev->dev;
 	client->domain = iommu_get_domain_for_dev(client->dev);
 	if (!client->domain) {
-		dev_err(&pdev->dev, "iommu get domain for dev failed\n");
+		DISP_DEV_ERR(&pdev->dev, "iommu get domain for dev failed\n");
 		return -EINVAL;
 	}
 	client->compat = match->compatible;

+ 46 - 11
msm/sde/sde_color_processing.c

@@ -1,9 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
-#define pr_fmt(fmt)	"%s: " fmt, __func__
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
 #include <linux/dma-buf.h>
 #include <linux/string.h>
@@ -836,6 +837,20 @@ static int _set_spr_pu_feature(struct sde_hw_dspp *hw_dspp,
 	return 0;
 }
 
+int sde_dspp_spr_read_opr_value(struct sde_hw_dspp *hw_dspp, u32 *opr_value)
+{
+	int rc;
+
+	if (!opr_value || !hw_dspp || !hw_dspp->ops.read_spr_opr_value)
+		return -EINVAL;
+
+	rc = hw_dspp->ops.read_spr_opr_value(hw_dspp, opr_value);
+	if (rc)
+		SDE_ERROR("invalid opr read %d", rc);
+
+	return rc;
+}
+
 static int _set_demura_pu_feature(struct sde_hw_dspp *hw_dspp,
 	struct sde_hw_cp_cfg *hw_cfg, struct sde_crtc *sde_crtc)
 {
@@ -1649,6 +1664,13 @@ static void _sde_cp_crtc_commit_feature(struct sde_cp_node *prop_node,
 	int i = 0, ret = 0;
 	bool feature_enabled = false;
 	struct sde_mdss_cfg *catalog = NULL;
+	struct sde_crtc_state *sde_crtc_state;
+
+	sde_crtc_state = to_sde_crtc_state(sde_crtc->base.state);
+	if (!sde_crtc_state) {
+		DRM_ERROR("sde_crtc_state is null\n");
+		return;
+	}
 
 	memset(&hw_cfg, 0, sizeof(hw_cfg));
 	_sde_cp_get_cached_payload(prop_node, &hw_cfg, &feature_enabled);
@@ -1661,6 +1683,8 @@ static void _sde_cp_crtc_commit_feature(struct sde_cp_node *prop_node,
 	hw_cfg.skip_blend_plane_h = sde_crtc->skip_blend_plane_h;
 	hw_cfg.skip_blend_plane_w = sde_crtc->skip_blend_plane_w;
 
+	hw_cfg.num_ds_enabled = sde_crtc_state->num_ds_enabled;
+
 	SDE_EVT32(hw_cfg.panel_width, hw_cfg.panel_height);
 
 	for (i = 0; i < num_mixers; i++) {
@@ -1922,6 +1946,11 @@ int sde_cp_crtc_check_properties(struct drm_crtc *crtc,
 		DRM_ERROR("invalid sde_crtc_state %pK\n", sde_crtc_state);
 		return -EINVAL;
 	}
+
+	/* force revalidation of some properties when there is a mode switch */
+	if (state->mode_changed)
+		sde_cp_crtc_res_change(crtc);
+
 	mutex_lock(&sde_crtc->crtc_cp_lock);
 
 	ret = _sde_cp_crtc_check_pu_features(crtc);
@@ -2936,6 +2965,7 @@ static void _dspp_sixzone_install_property(struct drm_crtc *crtc)
 	version = catalog->dspp[0].sblk->sixzone.version >> 16;
 	switch (version) {
 	case 1:
+	case 2:
 		snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
 			"SDE_DSPP_PA_SIXZONE_V", version);
 		_sde_cp_crtc_install_blob_property(crtc, feature_name,
@@ -3481,7 +3511,7 @@ static void _sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
 	}
 
 	priv = kms->dev->dev_private;
-	ret = pm_runtime_get_sync(kms->dev->dev);
+	ret = pm_runtime_resume_and_get(kms->dev->dev);
 	if (ret < 0) {
 		SDE_ERROR("failed to enable power resource %d\n", ret);
 		SDE_EVT32(ret, SDE_EVTLOG_ERROR);
@@ -3719,7 +3749,7 @@ static void _sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
 		spin_unlock_irqrestore(&crtc->spin_lock, flags);
 		DRM_DEBUG_DRIVER("cannot find histogram event node in crtc\n");
 		/* unlock histogram */
-		ret = pm_runtime_get_sync(kms->dev->dev);
+		ret = pm_runtime_resume_and_get(kms->dev->dev);
 		if (ret < 0) {
 			SDE_ERROR("failed to enable power resource %d\n", ret);
 			SDE_EVT32(ret, SDE_EVTLOG_ERROR);
@@ -3744,9 +3774,9 @@ static void _sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
 				irq_idx, ret);
 			spin_unlock_irqrestore(&node->state_lock, state_flags);
 			spin_unlock_irqrestore(&crtc->spin_lock, flags);
-			ret = pm_runtime_get_sync(kms->dev->dev);
+			ret = pm_runtime_resume_and_get(kms->dev->dev);
 			if (ret < 0) {
-				SDE_ERROR("failed to enable power %d\n", ret);
+				SDE_ERROR("failed to enable power resource %d\n", ret);
 				SDE_EVT32(ret, SDE_EVTLOG_ERROR);
 				return;
 			}
@@ -3769,7 +3799,7 @@ static void _sde_cp_notify_hist_event(struct drm_crtc *crtc_drm, void *arg)
 	if (!crtc->hist_blob)
 		return;
 
-	ret = pm_runtime_get_sync(kms->dev->dev);
+	ret = pm_runtime_resume_and_get(kms->dev->dev);
 	if (ret < 0) {
 		SDE_ERROR("failed to enable power resource %d\n", ret);
 		SDE_EVT32(ret, SDE_EVTLOG_ERROR);
@@ -4376,6 +4406,8 @@ static void _sde_cp_ltm_hist_interrupt_cb(void *arg, int irq_idx)
 	ltm_data->display_v = hw_cfg.displayv;
 	ltm_data->init_h[0] = phase.init_h[LTM_0];
 	ltm_data->init_h[1] = phase.init_h[LTM_1];
+	ltm_data->init_h[2] = phase.init_h[LTM_2];
+	ltm_data->init_h[3] = phase.init_h[LTM_3];
 	ltm_data->init_v = phase.init_v;
 	ltm_data->inc_v = phase.inc_v;
 	ltm_data->inc_h = phase.inc_h;
@@ -4674,10 +4706,13 @@ void sde_cp_crtc_res_change(struct drm_crtc *crtc_drm)
 	list_for_each_entry_safe(prop_node, n, &crtc->cp_active_list,
 				 cp_active_list) {
 		if (prop_node->feature == SDE_CP_CRTC_DSPP_LTM_INIT ||
-			prop_node->feature == SDE_CP_CRTC_DSPP_LTM_VLUT) {
+			prop_node->feature == SDE_CP_CRTC_DSPP_LTM_VLUT ||
+			prop_node->feature == SDE_CP_CRTC_DSPP_RC_MASK) {
 			list_del_init(&prop_node->cp_active_list);
 			list_add_tail(&prop_node->cp_dirty_list,
 				&crtc->cp_dirty_list);
+
+			SDE_EVT32(prop_node->feature);
 		}
 	}
 	mutex_unlock(&crtc->crtc_cp_lock);
@@ -4865,7 +4900,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
 	if (!num_mixers)
 		return;
 	mutex_lock(&crtc->crtc_cp_lock);
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (info) {
 		for (i = 0; i < ARRAY_SIZE(dspp_cap_update_func); i++)
 			dspp_cap_update_func[i](crtc, info);
@@ -4874,7 +4909,7 @@ void sde_cp_crtc_enable(struct drm_crtc *drm_crtc)
 			info->data, SDE_KMS_INFO_DATALEN(info),
 			CRTC_PROP_DSPP_INFO);
 	}
-	kfree(info);
+	vfree(info);
 	mutex_unlock(&crtc->crtc_cp_lock);
 }
 
@@ -4889,7 +4924,7 @@ void sde_cp_crtc_disable(struct drm_crtc *drm_crtc)
 	}
 	crtc = to_sde_crtc(drm_crtc);
 	mutex_lock(&crtc->crtc_cp_lock);
-	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+	info = vzalloc(sizeof(struct sde_kms_info));
 	if (info)
 		msm_property_set_blob(&crtc->property_info,
 				&crtc->dspp_blob_info,
@@ -4900,7 +4935,7 @@ void sde_cp_crtc_disable(struct drm_crtc *drm_crtc)
 	crtc->skip_blend_plane_h = 0;
 	crtc->skip_blend_plane_w = 0;
 	mutex_unlock(&crtc->crtc_cp_lock);
-	kfree(info);
+	vfree(info);
 }
 
 void sde_cp_clear_state_info(struct drm_crtc_state *state)

+ 8 - 0
msm/sde/sde_color_processing.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -366,4 +367,11 @@ void sde_cp_disable_features(struct drm_crtc *crtc);
 void sde_cp_set_skip_blend_plane_info(struct drm_crtc *crtc,
 		struct sde_cp_crtc_skip_blend_plane *skip_blend);
 
+/**
+ * sde_dspp_spr_read_opr_value(): read opr value
+ * @hw_dspp: Pointer to DSPP hardware description.
+ * @opr_value: Pointer to opr value.
+ */
+int sde_dspp_spr_read_opr_value(struct sde_hw_dspp *hw_dspp, u32 *opr_value);
+
 #endif /*_SDE_COLOR_PROCESSING_H */

+ 139 - 27
msm/sde/sde_connector.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -809,6 +810,10 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
 	}
 
 	bl_config = &dsi_display->panel->bl_config;
+	bl_config->bl_scale = c_conn->bl_scale > MAX_BL_SCALE_LEVEL ?
+			MAX_BL_SCALE_LEVEL : c_conn->bl_scale;
+	bl_config->bl_scale_sv = c_conn->bl_scale_sv > SV_BL_SCALE_CAP ?
+			SV_BL_SCALE_CAP : c_conn->bl_scale_sv;
 
 	if (!c_conn->allow_bl_update) {
 		c_conn->unset_bl_level = bl_config->bl_level;
@@ -818,11 +823,6 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
 	if (c_conn->unset_bl_level)
 		bl_config->bl_level = c_conn->unset_bl_level;
 
-	bl_config->bl_scale = c_conn->bl_scale > MAX_BL_SCALE_LEVEL ?
-			MAX_BL_SCALE_LEVEL : c_conn->bl_scale;
-	bl_config->bl_scale_sv = c_conn->bl_scale_sv > SV_BL_SCALE_CAP ?
-			SV_BL_SCALE_CAP : c_conn->bl_scale_sv;
-
 	SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
 		bl_config->bl_scale, bl_config->bl_scale_sv,
 		bl_config->bl_level);
@@ -1112,7 +1112,7 @@ void sde_connector_helper_bridge_disable(struct drm_connector *connector)
 	/* Disable ESD thread */
 	sde_connector_schedule_status_work(connector, false);
 
-	if (!sde_in_trusted_vm(sde_kms) && c_conn->bl_device) {
+	if (!sde_in_trusted_vm(sde_kms) && c_conn->bl_device && !poms_pending) {
 		c_conn->bl_device->props.power = FB_BLANK_POWERDOWN;
 		c_conn->bl_device->props.state |= BL_CORE_FBBLANK;
 		backlight_update_status(c_conn->bl_device);
@@ -1156,7 +1156,7 @@ void sde_connector_helper_bridge_enable(struct drm_connector *connector)
 				MSM_ENC_TX_COMPLETE);
 	c_conn->allow_bl_update = true;
 
-	if (!sde_in_trusted_vm(sde_kms) && c_conn->bl_device) {
+	if (!sde_in_trusted_vm(sde_kms) && c_conn->bl_device && !display->poms_pending) {
 		c_conn->bl_device->props.power = FB_BLANK_UNBLANK;
 		c_conn->bl_device->props.state &= ~BL_CORE_FBBLANK;
 		backlight_update_status(c_conn->bl_device);
@@ -1637,6 +1637,7 @@ static int _sde_connector_set_prop_retire_fence(struct drm_connector *connector,
 	struct sde_connector *c_conn;
 	uint64_t fence_user_fd;
 	uint64_t __user prev_user_fd;
+	struct sde_hw_ctl *hw_ctl = NULL;
 
 	c_conn = to_sde_connector(connector);
 
@@ -1662,8 +1663,13 @@ static int _sde_connector_set_prop_retire_fence(struct drm_connector *connector,
 		 * commit completion
 		 */
 		offset++;
+
+		/* get hw_ctl for a wb connector */
+		if (c_conn->connector_type == DRM_MODE_CONNECTOR_VIRTUAL)
+			hw_ctl = sde_encoder_get_hw_ctl(c_conn);
+
 		rc = sde_fence_create(c_conn->retire_fence,
-					&fence_user_fd, offset);
+					&fence_user_fd, offset, hw_ctl);
 		if (rc) {
 			SDE_ERROR("fence create failed rc:%d\n", rc);
 			goto end;
@@ -1686,6 +1692,20 @@ end:
 	return rc;
 }
 
+static int _sde_connector_set_prop_dyn_transfer_time(struct sde_connector *c_conn, uint64_t val)
+{
+	int rc = 0;
+
+	if (!c_conn->ops.update_transfer_time)
+		return rc;
+
+	rc = c_conn->ops.update_transfer_time(c_conn->display, val);
+	if (rc)
+		SDE_ERROR_CONN(c_conn, "updating transfer time failed, val: %u, rc %d\n", val, rc);
+
+	return rc;
+}
+
 static int sde_connector_atomic_set_property(struct drm_connector *connector,
 		struct drm_connector_state *state,
 		struct drm_property *property,
@@ -1777,6 +1797,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
 			SDE_ERROR_CONN(c_conn, "dynamic bit clock set failed, rc: %d", rc);
 
 		break;
+	case CONNECTOR_PROP_DYN_TRANSFER_TIME:
+		_sde_connector_set_prop_dyn_transfer_time(c_conn, val);
+		break;
+	case CONNECTOR_PROP_LP:
+		/* suspend case: clear stale MISR */
+		if (val == SDE_MODE_DPMS_OFF)
+			memset(&c_conn->previous_misr_sign, 0, sizeof(struct sde_misr_sign));
+		break;
 	default:
 		break;
 	}
@@ -1868,19 +1896,26 @@ void sde_connector_complete_commit(struct drm_connector *connector,
 
 	/* signal connector's retire fence */
 	sde_fence_signal(to_sde_connector(connector)->retire_fence,
-			ts, fence_event);
+			ts, fence_event, NULL);
 }
 
 void sde_connector_commit_reset(struct drm_connector *connector, ktime_t ts)
 {
+	struct sde_hw_ctl *hw_ctl = NULL;
+	struct sde_connector *c_conn;
+
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
 		return;
 	}
+	c_conn = to_sde_connector(connector);
+
+	/* get hw_ctl for a wb connector */
+	if (c_conn->connector_type == DRM_MODE_CONNECTOR_VIRTUAL)
+		hw_ctl = sde_encoder_get_hw_ctl(c_conn);
 
 	/* signal connector's retire fence */
-	sde_fence_signal(to_sde_connector(connector)->retire_fence,
-			ts, SDE_FENCE_RESET_TIMELINE);
+	sde_fence_signal(c_conn->retire_fence, ts, SDE_FENCE_RESET_TIMELINE, hw_ctl);
 }
 
 static void sde_connector_update_hdr_props(struct drm_connector *connector)
@@ -1916,7 +1951,7 @@ static void sde_connector_update_colorspace(struct drm_connector *connector)
 }
 
 static int
-sde_connector_detect_ctx(struct drm_connector *connector, 
+sde_connector_detect_ctx(struct drm_connector *connector,
 		struct drm_modeset_acquire_ctx *ctx,
 		bool force)
 {
@@ -2073,11 +2108,19 @@ static int _sde_connector_lm_preference(struct sde_connector *sde_conn,
 		return -EINVAL;
 	}
 
-	sde_hw_mixer_set_preference(sde_kms->catalog, num_lm, disp_type);
+	sde_conn->lm_mask = sde_hw_mixer_set_preference(sde_kms->catalog,
+							num_lm, disp_type);
 
 	return ret;
 }
 
+static void _sde_connector_init_hw_fence(struct sde_connector *c_conn, struct sde_kms *sde_kms)
+{
+	/* Enable hw-fences for wb retire-fence */
+	if (c_conn->connector_type == DRM_MODE_CONNECTOR_VIRTUAL && sde_kms->catalog->hw_fence_rev)
+		c_conn->hwfence_wb_retire_fences_enable = true;
+}
+
 int sde_connector_get_panel_vfp(struct drm_connector *connector,
 	struct drm_display_mode *mode)
 {
@@ -2422,7 +2465,7 @@ static const struct file_operations conn_cmd_rx_fops = {
 	.write =        _sde_debugfs_conn_cmd_rx_write,
 };
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 /**
  * sde_connector_init_debugfs - initialize connector debugfs
  * @connector: Pointer to drm connector
@@ -2431,12 +2474,19 @@ static int sde_connector_init_debugfs(struct drm_connector *connector)
 {
 	struct sde_connector *sde_connector;
 	struct msm_display_info info;
+	struct sde_kms *sde_kms;
 
 	if (!connector || !connector->debugfs_entry) {
 		SDE_ERROR("invalid connector\n");
 		return -EINVAL;
 	}
 
+	sde_kms = sde_connector_get_kms(connector);
+	if (!sde_kms) {
+		SDE_ERROR("invalid kms\n");
+		return -EINVAL;
+	}
+
 	sde_connector = to_sde_connector(connector);
 
 	sde_connector_get_info(connector, &info);
@@ -2465,6 +2515,11 @@ static int sde_connector_init_debugfs(struct drm_connector *connector)
 		}
 	}
 
+	if (sde_connector->connector_type == DRM_MODE_CONNECTOR_VIRTUAL &&
+			sde_kms->catalog->hw_fence_rev)
+		debugfs_create_bool("wb_hw_fence_enable", 0600, connector->debugfs_entry,
+			&sde_connector->hwfence_wb_retire_fences_enable);
+
 	return 0;
 }
 #else
@@ -2472,7 +2527,7 @@ static int sde_connector_init_debugfs(struct drm_connector *connector)
 {
 	return 0;
 }
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 static int sde_connector_late_register(struct drm_connector *connector)
 {
@@ -2610,18 +2665,32 @@ sde_connector_best_encoder(struct drm_connector *connector)
 static struct drm_encoder *
 sde_connector_atomic_best_encoder(struct drm_connector *connector,
 		struct drm_atomic_state *state)
+{
+	struct sde_connector *c_conn;
+	struct drm_encoder *encoder = NULL;
+	struct drm_connector_state *connector_state = NULL;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return NULL;
+	}
+
+	connector_state = drm_atomic_get_new_connector_state(state, connector);
+	c_conn = to_sde_connector(connector);
+
+	if (c_conn->ops.atomic_best_encoder)
+		encoder = c_conn->ops.atomic_best_encoder(connector,
+				c_conn->display, connector_state);
+
+	return encoder;
+}
 #else
 static struct drm_encoder *
 sde_connector_atomic_best_encoder(struct drm_connector *connector,
 		struct drm_connector_state *connector_state)
-#endif
 {
 	struct sde_connector *c_conn;
 	struct drm_encoder *encoder = NULL;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
-	struct drm_connector_state *connector_state =
-			drm_atomic_get_new_connector_state(state, connector);
-#endif
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
@@ -2636,6 +2705,7 @@ sde_connector_atomic_best_encoder(struct drm_connector *connector,
 
 	return encoder;
 }
+#endif
 
 static int sde_connector_atomic_check(struct drm_connector *connector,
 		struct drm_atomic_state *state)
@@ -2764,7 +2834,7 @@ static void sde_connector_check_status_work(struct work_struct *work)
 	dev = conn->base.dev->dev;
 
 	if (!conn->ops.check_status || dev->power.is_suspended ||
-			(conn->dpms_mode != DRM_MODE_DPMS_ON)) {
+			(conn->lp_mode == SDE_MODE_DPMS_OFF)) {
 		SDE_DEBUG("dpms mode: %d\n", conn->dpms_mode);
 		mutex_unlock(&conn->lock);
 		return;
@@ -2864,6 +2934,13 @@ static int sde_connector_populate_mode_info(struct drm_connector *conn,
 		sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
 			mode_info.mdp_transfer_time_us);
 
+		if (mode_info.mdp_transfer_time_us_min && mode_info.mdp_transfer_time_us_max) {
+			sde_kms_info_add_keyint(info, "mdp_transfer_time_us_min",
+					mode_info.mdp_transfer_time_us_min);
+			sde_kms_info_add_keyint(info, "mdp_transfer_time_us_max",
+					mode_info.mdp_transfer_time_us_max);
+		}
+
 		sde_kms_info_add_keyint(info, "allowed_mode_switch",
 			mode_info.allowed_mode_switches);
 
@@ -2908,7 +2985,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
 		return -EINVAL;
 	}
 
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	info = vzalloc(sizeof(*info));
 	if (!info)
 		return -ENOMEM;
 
@@ -2966,7 +3043,7 @@ int sde_connector_set_blob_data(struct drm_connector *conn,
 			SDE_KMS_INFO_DATALEN(info),
 			prop_id);
 exit:
-	kfree(info);
+	vfree(info);
 
 	return rc;
 }
@@ -3029,6 +3106,8 @@ static int _sde_connector_install_properties(struct drm_device *dev,
 			msm_property_install_range(&c_conn->property_info, "dyn_bit_clk",
 					0x0, 0, ~0, 0, CONNECTOR_PROP_DYN_BIT_CLK);
 
+		msm_property_install_range(&c_conn->property_info, "dyn_transfer_time",
+				 0x0, 0, 1000000, 0, CONNECTOR_PROP_DYN_TRANSFER_TIME);
 
 		mutex_lock(&c_conn->base.dev->mode_config.mutex);
 		sde_connector_fill_modes(&c_conn->base,
@@ -3095,12 +3174,14 @@ static int _sde_connector_install_properties(struct drm_device *dev,
 		msm_property_install_enum(&c_conn->property_info, "dsc_mode", 0,
 			0, e_dsc_mode, ARRAY_SIZE(e_dsc_mode), 0, CONNECTOR_PROP_DSC_MODE);
 
-		if (display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE &&
+		if (dsi_display && dsi_display->panel &&
+			display_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE &&
 			display_info->capabilities & MSM_DISPLAY_CAP_VID_MODE)
 			msm_property_install_enum(&c_conn->property_info,
 			"panel_mode", 0, 0,
 			e_panel_mode,
-			ARRAY_SIZE(e_panel_mode), 0,
+			ARRAY_SIZE(e_panel_mode),
+			(dsi_display->panel->panel_mode == DSI_OP_VIDEO_MODE) ? 0 : 1,
 			CONNECTOR_PROP_SET_PANEL_MODE);
 
 		if (test_bit(SDE_FEATURE_DEMURA, sde_kms->catalog->features)) {
@@ -3305,8 +3386,11 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
 	_sde_connector_lm_preference(c_conn, sde_kms,
 			display_info.display_type);
 
-	SDE_DEBUG("connector %d attach encoder %d\n",
-			c_conn->base.base.id, encoder->base.id);
+	_sde_connector_init_hw_fence(c_conn, sde_kms);
+
+	SDE_DEBUG("connector %d attach encoder %d, wb hwfences:%d\n",
+			DRMID(&c_conn->base), DRMID(encoder),
+			c_conn->hwfence_wb_retire_fences_enable);
 
 	INIT_DELAYED_WORK(&c_conn->status_work,
 			sde_connector_check_status_work);
@@ -3372,11 +3456,21 @@ int sde_connector_register_custom_event(struct sde_kms *kms,
 		c_conn->dimming_bl_notify_enabled = val;
 		ret = 0;
 		break;
+	case DRM_EVENT_MISR_SIGN:
+		if (!conn_drm) {
+			SDE_ERROR("invalid connector\n");
+			return -EINVAL;
+		}
+		c_conn = to_sde_connector(conn_drm);
+		c_conn->misr_event_notify_enabled = val;
+		ret = sde_encoder_register_misr_event(c_conn->encoder, val);
+		break;
 	case DRM_EVENT_PANEL_DEAD:
 		ret = 0;
 		break;
 	case DRM_EVENT_SDE_HW_RECOVERY:
 		ret = _sde_conn_enable_hw_recovery(conn_drm);
+		sde_dbg_update_dump_mode(val);
 		break;
 	default:
 		break;
@@ -3400,6 +3494,7 @@ int sde_connector_event_notify(struct drm_connector *connector, uint32_t type,
 	case DRM_EVENT_DIMMING_BL:
 	case DRM_EVENT_PANEL_DEAD:
 	case DRM_EVENT_SDE_HW_RECOVERY:
+	case DRM_EVENT_MISR_SIGN:
 		ret = 0;
 		break;
 	default:
@@ -3419,3 +3514,20 @@ int sde_connector_event_notify(struct drm_connector *connector, uint32_t type,
 
 	return ret;
 }
+
+bool sde_connector_is_line_insertion_supported(struct sde_connector *sde_conn)
+{
+	struct dsi_display *display = NULL;
+
+	if (!sde_conn)
+		return false;
+
+	if (sde_conn->connector_type != DRM_MODE_CONNECTOR_DSI)
+		return false;
+
+	display = (struct dsi_display *)sde_conn->display;
+	if (!display || !display->panel)
+		return false;
+
+	return display->panel->host_config.line_insertion_enable;
+}

+ 57 - 0
msm/sde/sde_connector.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -430,6 +431,13 @@ struct sde_connector_ops {
 	 */
 	int (*get_num_lm_from_mode)(void *display, const struct drm_display_mode *mode);
 
+	/*
+	 * update_transfer_time - Update transfer time
+	 * @display: Pointer to private display structure
+	 * @transfer_time: new transfer time to be updated
+	 */
+	int (*update_transfer_time)(void *display, u32 transfer_time);
+
 };
 
 /**
@@ -474,6 +482,18 @@ struct sde_connector_dyn_hdr_metadata {
 	bool dynamic_hdr_update;
 };
 
+/**
+ * struct sde_misr_sign - defines sde misr signature structure
+ * @num_valid_misr : count of valid misr signature
+ * @roi_list : list of roi
+ * @misr_sign_value : list of misr signature
+ */
+struct sde_misr_sign {
+	atomic64_t num_valid_misr;
+	struct msm_roi_list roi_list;
+	u64 misr_sign_value[MAX_DSI_DISPLAYS];
+};
+
 /**
  * struct sde_connector - local sde connector structure
  * @base: Base drm connector structure
@@ -522,6 +542,7 @@ struct sde_connector_dyn_hdr_metadata {
  * @hdr_min_luminance: desired min luminance obtained from HDR block
  * @hdr_supported: does the sink support HDR content
  * @color_enc_fmt: Colorimetry encoding formats of sink
+ * @lm_mask: preferred LM mask for connector
  * @allow_bl_update: Flag to indicate if BL update is allowed currently or not
  * @dimming_bl_notify_enabled: Flag to indicate if dimming bl notify is enabled or not
  * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
@@ -533,6 +554,9 @@ struct sde_connector_dyn_hdr_metadata {
  * @cmd_rx_buf: the return buffer of response of command transfer
  * @rx_len: the length of dcs command received buffer
  * @cached_edid: cached edid data for the connector
+ * @misr_event_notify_enabled: Flag to indicate if misr event notify is enabled or not
+ * @previous_misr_sign: store previous misr signature
+ * @hwfence_wb_retire_fences_enable: enable hw-fences for wb retire-fence
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -593,6 +617,7 @@ struct sde_connector {
 	bool hdr_supported;
 
 	u32 color_enc_fmt;
+	u32 lm_mask;
 
 	u8 hdr_plus_app_ver;
 	u32 qsync_mode;
@@ -608,6 +633,10 @@ struct sde_connector {
 	int rx_len;
 
 	struct edid *cached_edid;
+	bool misr_event_notify_enabled;
+	struct sde_misr_sign previous_misr_sign;
+
+	bool hwfence_wb_retire_fences_enable;
 };
 
 /**
@@ -682,6 +711,7 @@ struct sde_connector {
  * @dnsc_blur_count: Number of downscale blur blocks used
  * @dnsc_blur_cfg: Configs for the downscale blur block
  * @dnsc_blur_lut: LUT idx used for the Gaussian filter LUTs in downscale blur block
+ * @usage_type: WB connector usage type
  */
 struct sde_connector_state {
 	struct drm_connector_state base;
@@ -702,6 +732,7 @@ struct sde_connector_state {
 	u32 dnsc_blur_count;
 	struct sde_drm_dnsc_blur_cfg dnsc_blur_cfg[DNSC_BLUR_MAX_COUNT];
 	u32 dnsc_blur_lut;
+	enum sde_wb_usage_type usage_type;
 };
 
 /**
@@ -1186,6 +1217,24 @@ static inline int sde_connector_state_get_compression_info(
 	return 0;
 }
 
+static inline bool sde_connector_is_3d_merge_enabled(struct drm_connector *conn)
+{
+	enum sde_rm_topology_name topology;
+
+	if (!conn)
+		return false;
+
+	topology = sde_connector_get_topology_name(conn);
+	if ((topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+			|| (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC)
+			|| (topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_VDC)
+			|| (topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE)
+			|| (topology == SDE_RM_TOPOLOGY_QUADPIPE_3DMERGE_DSC))
+		return true;
+
+	return false;
+}
+
 /**
 * sde_connector_set_msm_mode - set msm_mode for connector state
 * @conn_state: Pointer to drm connector state structure
@@ -1265,4 +1314,12 @@ int sde_connector_esd_status(struct drm_connector *connector);
 const char *sde_conn_get_topology_name(struct drm_connector *conn,
 		struct msm_display_topology topology);
 
+/*
+ * sde_connector_is_line_insertion_supported - get line insertion
+ * feature bit value from panel
+ * @sde_conn:    Pointer to sde connector structure
+ * @Return: line insertion support status
+ */
+bool sde_connector_is_line_insertion_supported(struct sde_connector *sde_conn);
+
 #endif /* _SDE_CONNECTOR_H_ */

+ 5 - 4
msm/sde/sde_core_irq.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -352,7 +353,7 @@ static void sde_disable_all_irqs(struct sde_kms *sde_kms)
 	sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
 }
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 #define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix)				\
 static int __prefix ## _open(struct inode *inode, struct file *file)	\
 {									\
@@ -423,7 +424,7 @@ int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
 void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
 {
 }
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 void sde_core_irq_preinstall(struct sde_kms *sde_kms)
 {
@@ -436,7 +437,7 @@ void sde_core_irq_preinstall(struct sde_kms *sde_kms)
 	}
 
 	if (!sde_in_trusted_vm(sde_kms)) {
-		rc = pm_runtime_get_sync(sde_kms->dev->dev);
+		rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
 		if (rc < 0) {
 			SDE_ERROR("failed to enable power resource %d\n", rc);
 			SDE_EVT32(rc, SDE_EVTLOG_ERROR);
@@ -489,7 +490,7 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
 		return;
 	}
 
-	rc = pm_runtime_get_sync(sde_kms->dev->dev);
+	rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
 	if (rc < 0) {
 		SDE_ERROR("failed to enable power resource %d\n", rc);
 		SDE_EVT32(rc, SDE_EVTLOG_ERROR);

+ 21 - 15
msm/sde/sde_core_perf.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -12,7 +13,6 @@
 #include <linux/clk.h>
 #include <linux/bitmap.h>
 #include <linux/sde_rsc.h>
-#include <linux/platform_device.h>
 #include <linux/soc/qcom/llcc-qcom.h>
 
 #include "msm_prop.h"
@@ -327,7 +327,7 @@ static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
 	struct drm_device *drm_dev;
 	struct device *dev;
 	struct platform_device *pdev;
-	u32 llcc_id[SDE_SYS_CACHE_MAX] = {LLCC_DISP};
+	u32 scid;
 	int rc = 0;
 
 	if (!kms || !kms->dev || !kms->dev->dev) {
@@ -341,7 +341,6 @@ static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
 	pdev = to_platform_device(dev);
 
 	/* If LLCC is already in the requested state, skip */
-	SDE_EVT32(activate, type, kms->perf.llcc_active[type]);
 	if ((activate && kms->perf.llcc_active[type]) ||
 		(!activate && !kms->perf.llcc_active[type])) {
 		SDE_DEBUG("skip llcc type:%d request:%d state:%d\n",
@@ -349,18 +348,19 @@ static int _sde_core_perf_activate_llcc(struct sde_kms *kms,
 		goto exit;
 	}
 
-	SDE_DEBUG("%sactivate the llcc type:%d state:%d\n",
-		activate ? "" : "de",
-		type, kms->perf.llcc_active[type]);
-
-	slice = llcc_slice_getd(llcc_id[type]);
+	slice = llcc_slice_getd(kms->catalog->sc_cfg[type].llcc_uid);
 	if (IS_ERR_OR_NULL(slice))  {
 		SDE_ERROR("failed to get llcc slice for uid:%d\n",
-				llcc_id[type]);
+				kms->catalog->sc_cfg[type].llcc_uid);
 		rc = -EINVAL;
 		goto exit;
 	}
 
+	scid = llcc_get_slice_id(slice);
+	SDE_EVT32(activate, type, kms->perf.llcc_active[type], scid);
+	SDE_DEBUG("%sactivate the llcc type:%d state:%d scid:%d\n", activate ? "" : "de", type,
+			kms->perf.llcc_active[type], scid);
+
 	if (activate) {
 		llcc_slice_activate(slice);
 		kms->perf.llcc_active[type] = true;
@@ -383,15 +383,13 @@ static void _sde_core_perf_crtc_set_llcc_cache_type(struct sde_kms *kms,
 {
 	struct drm_crtc *tmp_crtc;
 	struct sde_crtc *sde_crtc;
-	struct sde_sc_cfg *sc_cfg = kms->perf.catalog->sc_cfg;
 	struct sde_core_perf_params *cur_perf;
 	enum sde_crtc_client_type curr_client_type
 					= sde_crtc_get_client_type(crtc);
 	u32 llcc_active = 0;
 
-	if (!sc_cfg[type].has_sys_cache) {
-		SDE_DEBUG("System Cache %d is not enabled!. Won't use\n",
-				type);
+	if (!test_bit(type, kms->perf.catalog->sde_sys_cache_type_map)) {
+		SDE_DEBUG("system cache %d is not enabled!. Won't use\n", type);
 		return;
 	}
 
@@ -1083,7 +1081,7 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
 
 }
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 
 static ssize_t _sde_core_perf_threshold_high_write(struct file *file,
 		    const char __user *user_buf, size_t count, loff_t *ppos)
@@ -1385,6 +1383,14 @@ int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
 
 	debugfs_create_u32("uidle_perf_cnt", 0600, perf->debugfs_root,
 			&sde_kms->catalog->uidle_cfg.debugfs_perf);
+	debugfs_create_u32("uidle_fal10_target_idle_time_us", 0600, perf->debugfs_root,
+			&sde_kms->catalog->uidle_cfg.fal10_target_idle_time);
+	debugfs_create_u32("uidle_fal1_target_idle_time_us", 0600, perf->debugfs_root,
+			&sde_kms->catalog->uidle_cfg.fal1_target_idle_time);
+	debugfs_create_u32("uidle_fal10_threshold_us", 0600, perf->debugfs_root,
+			&sde_kms->catalog->uidle_cfg.fal10_threshold);
+	debugfs_create_u32("uidle_fal1_max_threshold", 0600, perf->debugfs_root,
+			&sde_kms->catalog->uidle_cfg.fal1_max_threshold);
 	debugfs_create_bool("uidle_enable", 0600, perf->debugfs_root,
 			&sde_kms->catalog->uidle_cfg.debugfs_ctrl);
 
@@ -1400,7 +1406,7 @@ int sde_core_perf_debugfs_init(struct sde_core_perf *perf,
 {
 	return 0;
 }
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 void sde_core_perf_destroy(struct sde_core_perf *perf)
 {

File diff suppressed because it is too large
+ 587 - 66
msm/sde/sde_crtc.c


+ 87 - 2
msm/sde/sde_crtc.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -232,6 +232,32 @@ struct sde_frame_data {
 	struct sde_frame_data_buffer *buf[SDE_FRAME_DATA_BUFFER_MAX];
 };
 
+/**
+ * struct sde_opr_value - defines sde opr value structure
+ * @num_valid_opr : count of valid opr values
+ * @opr_value : list of opr value
+ */
+struct sde_opr_value {
+	atomic_t num_valid_opr;
+	u32 opr_value[MAX_DSI_DISPLAYS];
+};
+
+/**
+ * enum sde_crtc_hw_fence_flags - flags to enable/disable hw fence features
+ * @HW_FENCE_OUT_FENCES_ENABLE: enables creation of hw fences for crtc output fences
+ * @HW_FENCE_IN_FENCES_ENABLE: enables hw fences for input-fences that are candidates for hw wait
+ *                   (i.e. they have the dma-fence flag for dma-fences set), this allows to
+ *                   selectively enable/disable input-fences, regardless of the dma-fence flags.
+ * @HW_FENCE-IN_FENCES_NO_OVERRIDE: skip the sw-override of the input hw-fences signal.
+ * @HW_FENCE_FEATURES_MAX: max number of features.
+ */
+enum sde_crtc_hw_fence_flags {
+	HW_FENCE_OUT_FENCES_ENABLE,
+	HW_FENCE_IN_FENCES_ENABLE,
+	HW_FENCE_IN_FENCES_NO_OVERRIDE,
+	HW_FENCE_FEATURES_MAX,
+};
+
 /**
  * struct sde_crtc - virtualized CRTC data structure
  * @base          : Base drm crtc structure
@@ -301,12 +327,14 @@ struct sde_frame_data {
  * @ltm_buffer_lock : muttx to protect ltm_buffers allcation and free
  * @ltm_lock        : Spinlock to protect ltm buffer_cnt, hist_en and ltm lists
  * @needs_hw_reset  : Initiate a hw ctl reset
+ * @reinit_crtc_mixers : Reinitialize mixers in crtc
  * @hist_irq_idx    : hist interrupt irq idx
  * @disable_pending_cp : flag tracks pending color processing features force disable
  * @src_bpp         : source bpp used to calculate compression ratio
  * @target_bpp      : target bpp used to calculate compression ratio
  * @static_cache_read_work: delayed worker to transition cache state to read
  * @cache_state     : Current static image cache state
+ * @cache_type      : Current static image cache type to use
  * @dspp_blob_info  : blob containing dspp hw capability information
  * @cached_encoder_mask : cached encoder_mask for vblank work
  * @valid_skip_blend_plane: flag to indicate if skip blend plane is valid
@@ -315,6 +343,12 @@ struct sde_frame_data {
  * @skip_blend_plane_h: skip blend plane height
  * @line_time_in_ns : current mode line time in nano sec is needed for QOS update
  * @frame_data      : Framedata data structure
+ * @previous_opr_value : store previous opr values
+ * @opr_event_notify_enabled : Flag to indicate if opr event notify is enabled or not
+ * @hwfence_features_mask : u32 mask to enable/disable hw fence features. See enum
+ *                          sde_crtc_hw_fence_flags for available fields.
+ * @hwfence_out_fences_skip: number of frames to skip before create a new hw-fence, this can be
+ *                   used to slow-down creation of output hw-fences for debugging purposes.
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -400,6 +434,7 @@ struct sde_crtc {
 	struct mutex ltm_buffer_lock;
 	spinlock_t ltm_lock;
 	bool needs_hw_reset;
+	bool reinit_crtc_mixers;
 	int hist_irq_idx;
 	bool disable_pending_cp;
 
@@ -408,6 +443,7 @@ struct sde_crtc {
 
 	struct kthread_delayed_work static_cache_read_work;
 	enum sde_sys_cache_state cache_state;
+	enum sde_sys_cache_type cache_type;
 
 	struct drm_property_blob *dspp_blob_info;
 	u32 cached_encoder_mask;
@@ -419,18 +455,37 @@ struct sde_crtc {
 	u32 line_time_in_ns;
 
 	struct sde_frame_data frame_data;
+
+	struct sde_opr_value previous_opr_value;
+	bool opr_event_notify_enabled;
+
+	DECLARE_BITMAP(hwfence_features_mask, HW_FENCE_FEATURES_MAX);
+	u32 hwfence_out_fences_skip;
 };
 
 enum sde_crtc_dirty_flags {
 	SDE_CRTC_DIRTY_DEST_SCALER,
 	SDE_CRTC_DIRTY_DIM_LAYERS,
 	SDE_CRTC_NOISE_LAYER,
-	SDE_CRTC_DIRTY_UIDLE,
 	SDE_CRTC_DIRTY_MAX,
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
 
+/**
+ * struct sde_line_insertion_param - sde line insertion parameters
+ * @panel_line_insertion_enable: line insertion support status
+ * @padding_height: panel height after line padding
+ * @padding_active: active lines in panel stacking pattern
+ * @padding_dummy: dummy lines in panel stacking pattern
+ */
+struct sde_line_insertion_param {
+	bool panel_line_insertion_enable;
+	u32 padding_height;
+	u32 padding_active;
+	u32 padding_dummy;
+};
+
 /**
  * struct sde_crtc_state - sde container for atomic crtc state
  * @base: Base drm crtc state structure
@@ -466,6 +521,8 @@ enum sde_crtc_dirty_flags {
  * @cp_dirty_list: array tracking features that are dirty
  * @cp_range_payload: array storing state user_data passed via range props
  * @cont_splash_populated: State was populated as part of cont. splash
+ * @param: sde line insertion parameters
+ * @hwfence_in_fences_set: input hw fences are configured for the commit
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -505,6 +562,8 @@ struct sde_crtc_state {
 	struct sde_cp_crtc_range_prop_payload
 		cp_range_payload[SDE_CP_CRTC_MAX_FEATURES];
 	bool cont_splash_populated;
+	struct sde_line_insertion_param line_insertion;
+	bool hwfence_in_fences_set;
 };
 
 enum sde_crtc_irq_state {
@@ -720,6 +779,12 @@ u32 sde_crtc_get_fps_mode(struct drm_crtc *crtc);
  */
 u32 sde_crtc_get_dfps_maxfps(struct drm_crtc *crtc);
 
+/**
+ * sde_crtc_get_wb_usage_type - get writeback usage type
+ * @crtc: Pointert to crtc
+ */
+enum sde_wb_usage_type sde_crtc_get_wb_usage_type(struct drm_crtc *crtc);
+
 /**
  * sde_crtc_get_client_type - check the crtc type- rt, rsc_rt, etc.
  * @crtc: Pointer to crtc
@@ -1066,4 +1131,24 @@ struct drm_encoder *sde_crtc_get_src_encoder_of_clone(struct drm_crtc *crtc);
  */
 void _sde_crtc_vm_release_notify(struct drm_crtc *crtc);
 
+/*
+ * sde_crtc_is_line_insertion_supported - get lineinsertion
+ * feature bit value from panel
+ * @drm_crtc:    Pointer to drm crtc structure
+ * @Return: line insertion support status
+ */
+bool sde_crtc_is_line_insertion_supported(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_calc_vpadding_param - calculate vpadding parameters
+ * @state: Pointer to DRM crtc state object
+ * @crtc_y: Plane's CRTC_Y offset
+ * @crtc_h: Plane's CRTC_H size
+ * @padding_y: Padding Y offset
+ * @padding_start: Padding start offset
+ * @padding_height: Padding height in total
+ */
+void sde_crtc_calc_vpadding_param(struct drm_crtc_state *state, u32 crtc_y, u32 crtc_h,
+				  u32 *padding_y, u32 *padding_start, u32 *padding_height);
+
 #endif /* _SDE_CRTC_H_ */

+ 497 - 114
msm/sde/sde_encoder.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -44,6 +44,7 @@
 #include "sde_hw_qdss.h"
 #include "sde_encoder_dce.h"
 #include "sde_vm.h"
+#include "sde_fence.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -77,6 +78,9 @@
 /* Maximum number of VSYNC wait attempts for RSC state transition */
 #define MAX_RSC_WAIT	5
 
+#define IS_ROI_UPDATED(a, b) (a.x1 != b.x1 || a.x2 != b.x2 || \
+			a.y1 != b.y1 || a.y2 != b.y2)
+
 /**
  * enum sde_enc_rc_events - events for resource control state machine
  * @SDE_ENC_RC_EVENT_KICKOFF:
@@ -141,8 +145,10 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable) {
-			SDE_EVT32(DRMID(drm_enc), enable);
+		if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable &&
+				phys->split_role != ENC_ROLE_SLAVE) {
+			if (enable)
+				SDE_EVT32(DRMID(drm_enc), enable);
 			phys->hw_ctl->ops.uidle_enable(phys->hw_ctl, enable);
 		}
 	}
@@ -157,15 +163,16 @@ ktime_t sde_encoder_calc_last_vsync_timestamp(struct drm_encoder *drm_enc)
 	struct intf_status intf_status = {0};
 	unsigned long features;
 	u32 fps;
+	bool is_cmd, is_vid;
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
 	cur_master = sde_enc->cur_master;
 	fps = sde_encoder_get_fps(drm_enc);
+	is_cmd = sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE);
+	is_vid = sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE);
 
 	if (!cur_master || !cur_master->hw_intf || !fps
-		|| !cur_master->hw_intf->ops.get_vsync_timestamp
-		|| (!sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE)
-			&& !sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE)))
+			|| !cur_master->hw_intf->ops.get_vsync_timestamp || (!is_cmd && !is_vid))
 		return 0;
 
 	features = cur_master->hw_intf->cap->features;
@@ -181,7 +188,7 @@ ktime_t sde_encoder_calc_last_vsync_timestamp(struct drm_encoder *drm_enc)
 			return 0;
 	}
 
-	vsync_counter = cur_master->hw_intf->ops.get_vsync_timestamp(cur_master->hw_intf);
+	vsync_counter = cur_master->hw_intf->ops.get_vsync_timestamp(cur_master->hw_intf, is_vid);
 	qtmr_counter = arch_timer_read_counter();
 	cur_time = ktime_get_ns();
 
@@ -219,6 +226,31 @@ ktime_t sde_encoder_calc_last_vsync_timestamp(struct drm_encoder *drm_enc)
 	return tvblank;
 }
 
+static void _sde_encoder_control_fal10_veto(struct drm_encoder *drm_enc, bool veto)
+{
+	bool clone_mode;
+	struct sde_kms *sde_kms = sde_encoder_get_kms(drm_enc);
+	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!sde_kms || !sde_kms->hw_uidle || !sde_kms->hw_uidle->ops.uidle_fal10_override)
+		return;
+
+	/*
+	 * clone mode is the only scenario where we want to enable software override
+	 * of fal10 veto.
+	 */
+	clone_mode = sde_encoder_in_clone_mode(drm_enc);
+	SDE_EVT32(DRMID(drm_enc), clone_mode, veto);
+
+	if (clone_mode && veto) {
+		sde_kms->hw_uidle->ops.uidle_fal10_override(sde_kms->hw_uidle, veto);
+		sde_enc->fal10_veto_override = true;
+	} else if (sde_enc->fal10_veto_override && !veto) {
+		sde_kms->hw_uidle->ops.uidle_fal10_override(sde_kms->hw_uidle, veto);
+		sde_enc->fal10_veto_override = false;
+	}
+}
+
 static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
@@ -321,6 +353,7 @@ static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
 	s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
 	ktime_t cur_ktime;
 	ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
+	u32 curr_atomic_cnt = atomic_read(info->atomic_cnt);
 
 	do {
 		rc = wait_event_timeout(*(info->wq),
@@ -331,6 +364,14 @@ static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
 		SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
 			timeout_ms, atomic_read(info->atomic_cnt),
 			info->count_check);
+
+		/* Make an early exit if the condition is already satisfied */
+		if ((atomic_read(info->atomic_cnt) < info->count_check) &&
+				(info->count_check < curr_atomic_cnt)) {
+			rc = true;
+			break;
+		}
+
 	/* If we timed out, counter is valid and time is less, wait again */
 	} while ((atomic_read(info->atomic_cnt) != info->count_check) &&
 			(rc == 0) &&
@@ -450,12 +491,9 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
 			unsigned long flags;
 
 			SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
-				irq->hw_idx, irq->irq_idx,
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				atomic_read(wait_info->atomic_cnt));
-			SDE_DEBUG_PHYS(phys_enc,
-					"done but irq %d not triggered\n",
-					irq->irq_idx);
+				irq->hw_idx, irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+				atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_CASE1);
+			SDE_DEBUG_PHYS(phys_enc, "done but irq %d not triggered\n", irq->irq_idx);
 			local_irq_save(flags);
 			irq->cb.func(phys_enc, irq->irq_idx);
 			local_irq_restore(flags);
@@ -472,7 +510,7 @@ int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
 		ret = 0;
 		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
 			irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
-			atomic_read(wait_info->atomic_cnt));
+			atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_CASE2);
 	}
 
 	SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
@@ -1042,6 +1080,32 @@ static int _sde_encoder_atomic_check_reserve(struct drm_encoder *drm_enc,
 	return ret;
 }
 
+bool sde_encoder_is_line_insertion_supported(struct drm_encoder *drm_enc)
+{
+	struct sde_connector *sde_conn = NULL;
+	struct sde_kms *sde_kms = NULL;
+	struct drm_connector *conn = NULL;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid drm encoder\n");
+		return false;
+	}
+
+	sde_kms = sde_encoder_get_kms(drm_enc);
+	if (!sde_kms)
+		return false;
+
+	conn = sde_encoder_get_connector(sde_kms->dev, drm_enc);
+	if (!conn || !conn->state)
+		return false;
+
+	sde_conn = to_sde_connector(conn);
+	if (!sde_conn)
+		return false;
+
+	return sde_connector_is_line_insertion_supported(sde_conn);
+}
+
 static void _sde_encoder_get_qsync_fps_callback(struct drm_encoder *drm_enc,
 			u32 *qsync_fps, struct drm_connector_state *conn_state)
 {
@@ -1132,10 +1196,8 @@ static int _sde_encoder_atomic_check_qsync(struct sde_connector *sde_conn,
 	qsync_dirty = msm_property_is_dirty(&sde_conn->property_info,
 			&sde_conn_state->property_state, CONNECTOR_PROP_QSYNC_MODE);
 
-	if (has_modeset && qsync_dirty &&
-			(msm_is_mode_seamless_poms(&sde_conn_state->msm_mode) ||
-			 msm_is_mode_seamless_dms(&sde_conn_state->msm_mode) ||
-			 msm_is_mode_seamless_dyn_clk(&sde_conn_state->msm_mode))) {
+	if (has_modeset && qsync_dirty && (msm_is_mode_seamless_poms(&sde_conn_state->msm_mode) ||
+				msm_is_mode_seamless_dyn_clk(&sde_conn_state->msm_mode))) {
 		SDE_ERROR("invalid qsync update during modeset priv flag:%x\n",
 				sde_conn_state->msm_mode.private_flags);
 		return -EINVAL;
@@ -1481,6 +1543,61 @@ static int _sde_encoder_rsc_client_update_vsync_wait(
 	return ret;
 }
 
+static int _sde_encoder_rsc_state_trigger(struct drm_encoder *drm_enc, enum sde_rsc_state rsc_state)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct msm_display_info *disp_info;
+	struct sde_rsc_cmd_config *rsc_config;
+	struct drm_crtc *crtc;
+	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
+	int ret;
+
+	/**
+	 * Already checked drm_enc, sde_enc is valid in function
+	 * _sde_encoder_update_rsc_client() which pass the parameters
+	 * to this function.
+	 */
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	crtc = sde_enc->crtc;
+	disp_info = &sde_enc->disp_info;
+	rsc_config = &sde_enc->rsc_config;
+
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
+			&& (disp_info->display_type == SDE_CONNECTOR_PRIMARY)) {
+		/* update it only once */
+		sde_enc->rsc_state_init = true;
+
+		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+			rsc_state, rsc_config, crtc->base.id,
+			&wait_vblank_crtc_id);
+	} else {
+		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
+			rsc_state, NULL, crtc->base.id,
+			&wait_vblank_crtc_id);
+	}
+
+	/**
+	 * if RSC performed a state change that requires a VBLANK wait, it will
+	 * set wait_vblank_crtc_id to the CRTC whose VBLANK we must wait on.
+	 *
+	 * if we are the primary display, we will need to enable and wait
+	 * locally since we hold the commit thread
+	 *
+	 * if we are an external display, we must send a signal to the primary
+	 * to enable its VBLANK and wait one, since the RSC hardware is driven
+	 * by the primary panel's VBLANK signals
+	 */
+	SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id);
+	if (ret) {
+		SDE_ERROR_ENC(sde_enc, "sde rsc client update failed ret:%d\n", ret);
+	} else if (wait_vblank_crtc_id != SDE_RSC_INVALID_CRTC_ID) {
+		ret = _sde_encoder_rsc_client_update_vsync_wait(drm_enc,
+			sde_enc, wait_vblank_crtc_id);
+	}
+
+	return ret;
+}
+
 static int _sde_encoder_update_rsc_client(
 		struct drm_encoder *drm_enc, bool enable)
 {
@@ -1491,7 +1608,6 @@ static int _sde_encoder_update_rsc_client(
 	int ret;
 	struct msm_display_info *disp_info;
 	struct msm_mode_info *mode_info;
-	int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
 	u32 qsync_mode = 0, v_front_porch;
 	struct drm_display_mode *mode;
 	bool is_vid_mode;
@@ -1575,42 +1691,7 @@ static int _sde_encoder_update_rsc_client(
 	SDE_EVT32(DRMID(drm_enc), rsc_state, qsync_mode,
 				 rsc_config->fps, sde_enc->rsc_state_init);
 
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
-			&& (disp_info->display_type == SDE_CONNECTOR_PRIMARY)) {
-		/* update it only once */
-		sde_enc->rsc_state_init = true;
-
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, rsc_config, crtc->base.id,
-			&wait_vblank_crtc_id);
-	} else {
-		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
-			rsc_state, NULL, crtc->base.id,
-			&wait_vblank_crtc_id);
-	}
-
-	/**
-	 * if RSC performed a state change that requires a VBLANK wait, it will
-	 * set wait_vblank_crtc_id to the CRTC whose VBLANK we must wait on.
-	 *
-	 * if we are the primary display, we will need to enable and wait
-	 * locally since we hold the commit thread
-	 *
-	 * if we are an external display, we must send a signal to the primary
-	 * to enable its VBLANK and wait one, since the RSC hardware is driven
-	 * by the primary panel's VBLANK signals
-	 */
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id);
-	if (ret) {
-		SDE_ERROR_ENC(sde_enc,
-				"sde rsc client update failed ret:%d\n", ret);
-		return ret;
-	} else if (wait_vblank_crtc_id == SDE_RSC_INVALID_CRTC_ID) {
-		return ret;
-	}
-
-	ret = _sde_encoder_rsc_client_update_vsync_wait(drm_enc,
-			sde_enc, wait_vblank_crtc_id);
+	ret = _sde_encoder_rsc_state_trigger(drm_enc, rsc_state);
 
 	return ret;
 }
@@ -1708,7 +1789,7 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 
 	if (enable) {
 		/* enable SDE core clks */
-		rc = pm_runtime_get_sync(drm_enc->dev->dev);
+		rc = pm_runtime_resume_and_get(drm_enc->dev->dev);
 		if (rc < 0) {
 			SDE_ERROR("failed to enable power resource %d\n", rc);
 			SDE_EVT32(rc, SDE_EVTLOG_ERROR);
@@ -1849,7 +1930,7 @@ static void _sde_encoder_rc_restart_delayed(struct sde_encoder_virt *sde_enc,
 	else
 		lp = SDE_MODE_DPMS_ON;
 
-	if (lp == SDE_MODE_DPMS_LP2)
+	if ((lp == SDE_MODE_DPMS_LP1) || (lp == SDE_MODE_DPMS_LP2))
 		idle_pc_duration = IDLE_SHORT_TIMEOUT;
 	else
 		idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
@@ -2094,6 +2175,14 @@ static int _sde_encoder_rc_post_modeset(struct drm_encoder *drm_enc,
 		goto end;
 	}
 
+	/* toggle te bit to update vsync source for sim cmd mode panels */
+	if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_CMD_MODE)
+			&& sde_enc->disp_info.is_te_using_watchdog_timer) {
+		sde_encoder_control_te(drm_enc, false);
+		_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info);
+		sde_encoder_control_te(drm_enc, true);
+	}
+
 	_sde_encoder_update_rsc_client(drm_enc, true);
 
 	SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
@@ -2115,6 +2204,7 @@ static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
 	struct drm_crtc *crtc = drm_enc->crtc;
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct sde_connector *sde_conn;
+	int crtc_id = 0;
 
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
@@ -2140,10 +2230,14 @@ static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
 		goto end;
 	}
 
+	crtc_id = drm_crtc_index(crtc);
 	if (is_vid_mode) {
 		sde_encoder_irq_control(drm_enc, false);
 		_sde_encoder_pm_qos_remove_request(drm_enc);
 	} else {
+		if (priv->event_thread[crtc_id].thread)
+			kthread_flush_worker(&priv->event_thread[crtc_id].worker);
+
 		/* disable all the clks and resources */
 		_sde_encoder_update_rsc_client(drm_enc, false);
 		_sde_encoder_resource_control_helper(drm_enc, false);
@@ -2432,17 +2526,38 @@ static void _sde_encoder_virt_populate_hw_res(struct drm_encoder *drm_enc)
 }
 
 static int sde_encoder_virt_modeset_rc(struct drm_encoder *drm_enc,
-		struct msm_display_mode *msm_mode, bool pre_modeset)
+	struct drm_display_mode *adj_mode, struct msm_display_mode *msm_mode, bool pre_modeset)
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	enum sde_intf_mode intf_mode;
+	struct drm_display_mode *old_adj_mode = NULL;
 	int ret;
-	bool is_cmd_mode = false;
+	bool is_cmd_mode = false, res_switch = false;
 
 	if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
 		is_cmd_mode = true;
 
 	if (pre_modeset) {
+		if (sde_enc->cur_master)
+			old_adj_mode = &sde_enc->cur_master->cached_mode;
+		if (old_adj_mode && is_cmd_mode)
+			res_switch = !drm_mode_match(old_adj_mode, adj_mode,
+					DRM_MODE_MATCH_TIMINGS);
+
+		if ((res_switch && sde_enc->disp_info.is_te_using_watchdog_timer) ||
+			sde_encoder_is_cwb_disabling(drm_enc, drm_enc->crtc)) {
+			/*
+			 * add tx wait for sim panel to avoid wd timer getting
+			 * updated in middle of frame to avoid early vsync
+			 */
+			ret = sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+			if (ret && ret != -EWOULDBLOCK) {
+				SDE_ERROR_ENC(sde_enc, "wait for idle failed %d\n", ret);
+				SDE_EVT32(DRMID(drm_enc), ret, SDE_EVTLOG_ERROR);
+				return ret;
+			}
+		}
+
 		intf_mode = sde_encoder_get_intf_mode(drm_enc);
 		if (msm_is_mode_seamless_dms(msm_mode) ||
 				(msm_is_mode_seamless_dyn_clk(msm_mode) &&
@@ -2491,6 +2606,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	struct drm_connector *conn;
 	struct sde_connector_state *c_state;
 	struct msm_display_mode *msm_mode;
+	struct sde_crtc *sde_crtc;
 	int i = 0, ret;
 	int num_lm, num_intf, num_pp_per_intf;
 
@@ -2522,6 +2638,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 	}
 
 	sde_enc->crtc = drm_enc->crtc;
+	sde_crtc = to_sde_crtc(drm_enc->crtc);
 	sde_crtc_set_qos_dirty(drm_enc->crtc);
 
 	/* get and store the mode_info */
@@ -2547,7 +2664,7 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 
 	/* release resources before seamless mode change */
 	msm_mode = &c_state->msm_mode;
-	ret = sde_encoder_virt_modeset_rc(drm_enc, msm_mode, true);
+	ret = sde_encoder_virt_modeset_rc(drm_enc, adj_mode, msm_mode, true);
 	if (ret)
 		return;
 
@@ -2581,12 +2698,13 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 			phys->hw_pp = sde_enc->hw_pp[i * num_pp_per_intf];
 			phys->connector = conn;
 			if (phys->ops.mode_set)
-				phys->ops.mode_set(phys, mode, adj_mode);
+				phys->ops.mode_set(phys, mode, adj_mode,
+				&sde_crtc->reinit_crtc_mixers);
 		}
 	}
 
 	/* update resources after seamless mode change */
-	sde_encoder_virt_modeset_rc(drm_enc, msm_mode, false);
+	sde_encoder_virt_modeset_rc(drm_enc, adj_mode, msm_mode, false);
 }
 
 void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
@@ -2783,6 +2901,7 @@ static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
 
 	memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
 	memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
+	_sde_encoder_control_fal10_veto(drm_enc, true);
 }
 
 static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
@@ -2883,6 +3002,13 @@ void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
 
 	_sde_encoder_virt_enable_helper(drm_enc);
 	sde_encoder_control_te(drm_enc, true);
+
+	/*
+	 * During IPC misr ctl register is reset.
+	 * Need to reconfigure misr after every IPC.
+	 */
+	if (atomic_read(&sde_enc->misr_enable))
+		sde_enc->misr_reconfigure = true;
 }
 
 static void sde_encoder_populate_encoder_phys(struct drm_encoder *drm_enc,
@@ -2930,7 +3056,7 @@ static void sde_encoder_populate_encoder_phys(struct drm_encoder *drm_enc,
 				phys->ops.enable(phys);
 		}
 
-		if (sde_enc->misr_enable  && phys->ops.setup_misr &&
+		if (atomic_read(&sde_enc->misr_enable)  && phys->ops.setup_misr &&
 		(sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE)))
 			phys->ops.setup_misr(phys, true,
 						sde_enc->misr_frame_count);
@@ -3046,10 +3172,13 @@ void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
 	struct sde_kms *sde_kms = sde_encoder_get_kms(drm_enc);
 	int i = 0;
 
+	_sde_encoder_control_fal10_veto(drm_enc, false);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		if (sde_enc->phys_encs[i]) {
 			sde_enc->phys_encs[i]->cont_splash_enabled = false;
 			sde_enc->phys_encs[i]->connector = NULL;
+			sde_enc->phys_encs[i]->hw_ctl = NULL;
 		}
 		atomic_set(&sde_enc->frame_done_cnt[i], 0);
 	}
@@ -3070,6 +3199,7 @@ void sde_encoder_virt_reset(struct drm_encoder *drm_enc)
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc = NULL;
+	struct sde_connector *sde_conn;
 	struct sde_kms *sde_kms;
 	enum sde_intf_mode intf_mode;
 	int ret, i = 0;
@@ -3091,6 +3221,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master) {
+		SDE_ERROR("Invalid cur_master\n");
+		return;
+	}
+	sde_conn = to_sde_connector(sde_enc->cur_master->connector);
 	SDE_DEBUG_ENC(sde_enc, "\n");
 
 	sde_kms = sde_encoder_get_kms(&sde_enc->base);
@@ -3101,12 +3236,22 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 
 	SDE_EVT32(DRMID(drm_enc));
 
-	/* wait for idle */
-	if (!sde_encoder_in_clone_mode(drm_enc))
+	if (!sde_encoder_in_clone_mode(drm_enc)) {
+		/* disable autorefresh */
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+			if (phys && phys->ops.disable_autorefresh)
+				phys->ops.disable_autorefresh(phys);
+		}
+
+		/* wait for idle */
 		sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+	}
 
 	_sde_encoder_input_handler_unregister(drm_enc);
 
+	flush_delayed_work(&sde_conn->status_work);
 	/*
 	 * For primary command mode and video mode encoders, execute the
 	 * resource control pre-stop operations before the physical encoders
@@ -3162,6 +3307,13 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
 		sde_encoder_virt_reset(drm_enc);
 }
 
+static void _trigger_encoder_hw_fences_override(struct sde_kms *sde_kms, struct sde_hw_ctl *ctl)
+{
+	/* trigger hw-fences override signal */
+	if (sde_kms && sde_kms->catalog->hw_fence_rev && ctl->ops.hw_fence_trigger_sw_override)
+		ctl->ops.hw_fence_trigger_sw_override(ctl);
+}
+
 void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 		struct sde_encoder_phys_wb *wb_enc)
 {
@@ -3220,7 +3372,7 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 	if (phys_enc->hw_dnsc_blur && phys_enc->hw_dnsc_blur->ops.bind_pingpong_blk &&
 			phys_enc->hw_pp) {
 		phys_enc->hw_dnsc_blur->ops.bind_pingpong_blk(phys_enc->hw_dnsc_blur,
-				false, phys_enc->hw_pp->idx);
+				false, phys_enc->hw_pp->idx, phys_enc->in_clone_mode);
 
 		if (ctl->ops.update_dnsc_blur_bitmask)
 			ctl->ops.update_dnsc_blur_bitmask(ctl, phys_enc->hw_dnsc_blur->idx, true);
@@ -3243,6 +3395,8 @@ void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
 		}
 	}
 
+	_trigger_encoder_hw_fences_override(phys_enc->sde_kms, ctl);
+
 	sde_crtc_disable_cp_features(sde_enc->base.crtc);
 	ctl->ops.get_pending_flush(ctl, &cfg);
 	SDE_EVT32(DRMID(phys_enc->parent), cfg.pending_flush_mask);
@@ -3288,6 +3442,35 @@ static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
 	return WB_MAX;
 }
 
+void sde_encoder_hw_fence_status(struct sde_kms *sde_kms,
+	struct drm_crtc *crtc, struct sde_hw_ctl *hw_ctl)
+{
+	u64 start_timestamp, end_timestamp;
+
+	if (!sde_kms || !hw_ctl || !sde_kms->hw_mdp) {
+		SDE_ERROR("invalid inputs\n");
+		return;
+	}
+
+	if ((sde_kms->debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
+		&& sde_kms->hw_mdp->ops.hw_fence_input_status) {
+
+		sde_kms->hw_mdp->ops.hw_fence_input_status(sde_kms->hw_mdp,
+			&start_timestamp, &end_timestamp);
+		trace_sde_hw_fence_status(crtc->base.id, "input",
+			start_timestamp, end_timestamp);
+	}
+
+	if ((sde_kms->debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP)
+		&& hw_ctl->ops.hw_fence_output_status) {
+
+		hw_ctl->ops.hw_fence_output_status(hw_ctl,
+			&start_timestamp, &end_timestamp);
+		trace_sde_hw_fence_status(crtc->base.id, "output",
+			start_timestamp, end_timestamp);
+	}
+}
+
 void sde_encoder_perf_uidle_status(struct sde_kms *sde_kms,
 	struct drm_crtc *crtc)
 {
@@ -3347,7 +3530,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
 	unsigned long lock_flags;
 	ktime_t ts = 0;
 
-	if (!drm_enc || !phy_enc)
+	if (!drm_enc || !phy_enc || !phy_enc->sde_kms)
 		return;
 
 	SDE_ATRACE_BEGIN("encoder_vblank_callback");
@@ -3357,8 +3540,7 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
 	 * calculate accurate vsync timestamp when available
 	 * set current time otherwise
 	 */
-	if (phy_enc->sde_kms && test_bit(SDE_FEATURE_HW_VSYNC_TS,
-					 phy_enc->sde_kms->catalog->features))
+	if (test_bit(SDE_FEATURE_HW_VSYNC_TS, phy_enc->sde_kms->catalog->features))
 		ts = sde_encoder_calc_last_vsync_timestamp(drm_enc);
 	if (!ts)
 		ts = ktime_get();
@@ -3370,10 +3552,13 @@ static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
 		sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data, ts);
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 
-	if (phy_enc->sde_kms &&
-			phy_enc->sde_kms->catalog->uidle_cfg.debugfs_perf)
+	if (phy_enc->sde_kms->catalog->uidle_cfg.debugfs_perf)
 		sde_encoder_perf_uidle_status(phy_enc->sde_kms, sde_enc->crtc);
 
+	if (phy_enc->sde_kms->debugfs_hw_fence)
+		sde_encoder_hw_fence_status(phy_enc->sde_kms, sde_enc->crtc, phy_enc->hw_ctl);
+
+	SDE_EVT32(DRMID(drm_enc), ktime_to_us(ts), atomic_read(&phy_enc->vsync_cnt));
 	SDE_ATRACE_END("encoder_vblank_callback");
 }
 
@@ -3556,6 +3741,30 @@ int sde_encoder_idle_request(struct drm_encoder *drm_enc)
 	return 0;
 }
 
+/**
+* _sde_encoder_update_retire_txq - update tx queue for a retire hw fence
+* phys: Pointer to physical encoder structure
+*
+*/
+static inline void _sde_encoder_update_retire_txq(struct sde_encoder_phys *phys,
+	struct sde_kms *sde_kms)
+{
+	struct sde_connector *c_conn;
+	int line_count;
+
+	c_conn = to_sde_connector(phys->connector);
+	if (!c_conn) {
+		SDE_ERROR("invalid connector");
+		return;
+	}
+
+	line_count = sde_connector_get_property(phys->connector->state,
+			CONNECTOR_PROP_EARLY_FENCE_LINE);
+	if (c_conn->hwfence_wb_retire_fences_enable)
+		sde_fence_update_hw_fences_txq(c_conn->retire_fence, false, line_count,
+			sde_kms->debugfs_hw_fence);
+}
+
 /**
  * _sde_encoder_trigger_flush - trigger flush for a physical encoder
  * drm_enc: Pointer to drm encoder structure
@@ -3763,6 +3972,28 @@ void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
 	phys_enc->enable_state = SDE_ENC_ENABLED;
 }
 
+void sde_encoder_helper_update_out_fence_txq(struct sde_encoder_virt *sde_enc, bool is_vid)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_kms *sde_kms = NULL;
+
+	if (!sde_enc || !sde_enc->crtc) {
+		SDE_ERROR("invalid encoder %d\n", !sde_enc);
+		return;
+	}
+	sde_kms = sde_encoder_get_kms(&sde_enc->base);
+	if (!sde_kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+
+	sde_crtc = to_sde_crtc(sde_enc->crtc);
+
+	SDE_EVT32(DRMID(sde_enc->crtc), is_vid);
+	sde_fence_update_hw_fences_txq(sde_crtc->output_fence, is_vid, 0, sde_kms ?
+		sde_kms->debugfs_hw_fence : 0);
+}
+
 /**
  * _sde_encoder_kickoff_phys - handle physical encoder kickoff
  *	Iterate through the physical encoders and perform consolidated flush
@@ -3852,12 +4083,11 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc,
 			pending_kickoff_cnt =
 					sde_encoder_phys_inc_pending(phys);
 			SDE_EVT32(pending_kickoff_cnt,
-					pending_flush.pending_flush_mask,
-					SDE_EVTLOG_FUNC_CASE2);
+					pending_flush.pending_flush_mask, SDE_EVTLOG_FUNC_CASE2);
 		}
 	}
 
-	if (sde_enc->misr_enable)
+	if (atomic_read(&sde_enc->misr_enable))
 		sde_encoder_misr_configure(&sde_enc->base, true,
 				sde_enc->misr_frame_count);
 
@@ -4084,6 +4314,9 @@ static void sde_encoder_early_wakeup_work_handler(struct kthread_work *work)
 			struct sde_encoder_virt, early_wakeup_work);
 	struct sde_kms *sde_kms = to_sde_kms(ddev_to_msm_kms(sde_enc->base.dev));
 
+	if (!sde_kms)
+		return;
+
 	sde_vm_lock(sde_kms);
 	if (!sde_vm_owns_hw(sde_kms)) {
 		sde_vm_unlock(sde_kms);
@@ -4281,6 +4514,50 @@ void sde_encoder_needs_hw_reset(struct drm_encoder *drm_enc)
 	}
 }
 
+static int _sde_encoder_prepare_for_kickoff_processing(struct drm_encoder *drm_enc,
+		struct sde_encoder_kickoff_params *params,
+		struct sde_encoder_virt *sde_enc,
+		struct sde_kms *sde_kms,
+		bool needs_hw_reset, bool is_cmd_mode)
+{
+	int rc, ret = 0;
+
+	/* if any phys needs reset, reset all phys, in-order */
+	if (needs_hw_reset)
+		sde_encoder_needs_hw_reset(drm_enc);
+
+	_sde_encoder_update_master(drm_enc, params);
+
+	_sde_encoder_update_roi(drm_enc);
+
+	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
+		if (rc) {
+			SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
+					sde_enc->cur_master->connector->base.id, rc);
+			ret = rc;
+		}
+	}
+
+	if (sde_enc->cur_master &&
+			((is_cmd_mode && sde_enc->cur_master->cont_splash_enabled) ||
+			!sde_enc->cur_master->cont_splash_enabled)) {
+		rc = sde_encoder_dce_setup(sde_enc, params);
+		if (rc) {
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
+			ret = rc;
+		}
+	}
+
+	sde_encoder_dce_flush(sde_enc);
+
+	if (sde_enc->cur_master && !sde_enc->cur_master->cont_splash_enabled)
+		sde_configure_qdss(sde_enc, sde_enc->cur_master->hw_qdss,
+				sde_enc->cur_master, sde_kms->qdss_enabled);
+
+	return ret;
+}
+
 int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -4354,39 +4631,8 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		goto end;
 	}
 
-	/* if any phys needs reset, reset all phys, in-order */
-	if (needs_hw_reset)
-		sde_encoder_needs_hw_reset(drm_enc);
-
-	_sde_encoder_update_master(drm_enc, params);
-
-	_sde_encoder_update_roi(drm_enc);
-
-	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
-		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
-		if (rc) {
-			SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
-					sde_enc->cur_master->connector->base.id,
-					rc);
-			ret = rc;
-		}
-	}
-
-	if (sde_enc->cur_master &&
-		((is_cmd_mode && sde_enc->cur_master->cont_splash_enabled) ||
-			!sde_enc->cur_master->cont_splash_enabled)) {
-		rc = sde_encoder_dce_setup(sde_enc, params);
-		if (rc) {
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
-			ret = rc;
-		}
-	}
-
-	sde_encoder_dce_flush(sde_enc);
-
-	if (sde_enc->cur_master && !sde_enc->cur_master->cont_splash_enabled)
-		sde_configure_qdss(sde_enc, sde_enc->cur_master->hw_qdss,
-				sde_enc->cur_master, sde_kms->qdss_enabled);
+	ret = _sde_encoder_prepare_for_kickoff_processing(drm_enc, params, sde_enc, sde_kms,
+			needs_hw_reset, is_cmd_mode);
 
 end:
 	SDE_ATRACE_END("sde_encoder_prepare_for_kickoff");
@@ -4397,6 +4643,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool config_changed)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
+	struct sde_kms *sde_kms;
 	unsigned int i;
 
 	if (!drm_enc) {
@@ -4421,6 +4668,15 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool config_changed)
 		SDE_EVT32(DRMID(drm_enc), i, SDE_EVTLOG_FUNC_CASE1);
 	}
 
+	/* update txq for any output retire hw-fence (wb-path) */
+	sde_kms = sde_encoder_get_kms(&sde_enc->base);
+	if (!sde_kms) {
+		SDE_ERROR("invalid sde_kms\n");
+		return;
+	}
+	if (sde_enc->cur_master)
+		_sde_encoder_update_retire_txq(sde_enc->cur_master, sde_kms);
+
 	/* All phys encs are ready to go, trigger the kickoff */
 	_sde_encoder_kickoff_phys(sde_enc, config_changed);
 
@@ -4486,6 +4742,7 @@ u32 sde_encoder_helper_get_kickoff_timeout_ms(struct drm_encoder *drm_enc)
 {
 	struct drm_encoder *src_enc = drm_enc;
 	struct sde_encoder_virt *sde_enc;
+	struct sde_kms *sde_kms;
 	u32 fps;
 
 	if (!drm_enc) {
@@ -4493,12 +4750,19 @@ u32 sde_encoder_helper_get_kickoff_timeout_ms(struct drm_encoder *drm_enc)
 		return DEFAULT_KICKOFF_TIMEOUT_MS;
 	}
 
+	sde_kms = sde_encoder_get_kms(drm_enc);
+	if (!sde_kms)
+		return DEFAULT_KICKOFF_TIMEOUT_MS;
+
 	if (sde_encoder_in_clone_mode(drm_enc))
 		src_enc = sde_crtc_get_src_encoder_of_clone(drm_enc->crtc);
 
 	if (!src_enc)
 		return DEFAULT_KICKOFF_TIMEOUT_MS;
 
+	if (test_bit(SDE_FEATURE_EMULATED_ENV, sde_kms->catalog->features))
+		return MAX_KICKOFF_TIMEOUT_MS;
+
 	sde_enc = to_sde_encoder_virt(src_enc);
 	fps = sde_enc->mode_info.frame_rate;
 
@@ -4663,7 +4927,7 @@ int sde_encoder_helper_collect_misr(struct sde_encoder_phys *phys_enc,
 			nonblock, misr_value) : -ENOTSUPP;
 }
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 static int _sde_encoder_status_show(struct seq_file *s, void *data)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -4752,7 +5016,7 @@ static ssize_t _sde_encoder_misr_setup(struct file *file,
 	if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
 		return -EINVAL;
 
-	sde_enc->misr_enable = enable;
+	atomic_set(&sde_enc->misr_enable, enable);
 	sde_enc->misr_reconfigure = true;
 	sde_enc->misr_frame_count = frame_count;
 	return count;
@@ -4785,9 +5049,12 @@ static ssize_t _sde_encoder_misr_read(struct file *file,
 	}
 	drm_enc = &sde_enc->base;
 
-	rc = pm_runtime_get_sync(drm_enc->dev->dev);
-	if (rc < 0)
+	rc = pm_runtime_resume_and_get(drm_enc->dev->dev);
+	if (rc < 0) {
+		SDE_ERROR("failed to enable power resource %d\n", rc);
+		SDE_EVT32(rc, SDE_EVTLOG_ERROR);
 		return rc;
+	}
 
 	sde_vm_lock(sde_kms);
 	if (!sde_vm_owns_hw(sde_kms)) {
@@ -4796,7 +5063,7 @@ static ssize_t _sde_encoder_misr_read(struct file *file,
 		goto end;
 	}
 
-	if (!sde_enc->misr_enable) {
+	if (!atomic_read(&sde_enc->misr_enable)) {
 		len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
 				"disabled\n");
 		goto buff_check;
@@ -4931,7 +5198,7 @@ static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
 static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
 {
 }
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 static int sde_encoder_late_register(struct drm_encoder *encoder)
 {
@@ -5248,6 +5515,13 @@ struct drm_encoder *sde_encoder_init(struct drm_device *dev, struct msm_display_
 			"input handler registration failed, rc = %d\n", ret);
 	}
 
+	/* Keep posted start as default configuration in driver
+	   if SBLUT is supported on target. Do not allow HAL to
+	   override driver's default frame trigger mode.
+	*/
+	if(sde_kms->catalog->dma_cfg.reg_dma_blks[REG_DMA_TYPE_SB].valid)
+		sde_enc->frame_trigger_mode = FRAME_DONE_WAIT_POSTED_START;
+
 	mutex_init(&sde_enc->rc_lock);
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
@@ -5320,8 +5594,13 @@ int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
 			SDE_ATRACE_BEGIN(atrace_buf);
 			ret = fn_wait(phys);
 			SDE_ATRACE_END(atrace_buf);
-			if (ret)
+			if (ret) {
+				SDE_ERROR_ENC(sde_enc, "intf_type:%d, event:%d i:%d, failed:%d\n",
+						sde_enc->disp_info.intf_type, event, i, ret);
+				SDE_EVT32(DRMID(drm_enc), sde_enc->disp_info.intf_type, event,
+						i, ret, SDE_EVTLOG_ERROR);
 				return ret;
+			}
 		}
 	}
 
@@ -5746,6 +6025,33 @@ bool sde_encoder_needs_dsc_disable(struct drm_encoder *drm_enc)
 	return TOPOLOGY_DSC_MODE(conn_state->old_topology_name);
 }
 
+struct sde_hw_ctl *sde_encoder_get_hw_ctl(struct sde_connector *c_conn)
+{
+	struct drm_encoder *drm_enc;
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *cur_master;
+	struct sde_hw_ctl *hw_ctl = NULL;
+
+	if (!c_conn || !c_conn->hwfence_wb_retire_fences_enable)
+		goto exit;
+
+	/* get encoder to find the hw_ctl for this connector */
+	drm_enc = c_conn->encoder;
+	if (!drm_enc)
+		goto exit;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	cur_master = sde_enc->phys_encs[0];
+	if (!cur_master || !cur_master->hw_ctl)
+		goto exit;
+
+	hw_ctl = cur_master->hw_ctl;
+	SDE_DEBUG("conn hw_ctl idx:%d intf_mode:%d\n", hw_ctl->idx, cur_master->intf_mode);
+
+exit:
+	return hw_ctl;
+}
+
 void sde_encoder_add_data_to_minidump_va(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -5768,3 +6074,80 @@ void sde_encoder_add_data_to_minidump_va(struct drm_encoder *drm_enc)
 			phys_enc->ops.add_to_minidump(phys_enc);
 	}
 }
+
+void sde_encoder_misr_sign_event_notify(struct drm_encoder *drm_enc)
+{
+	struct drm_event event;
+	struct drm_connector *connector;
+	struct sde_connector *c_conn = NULL;
+	struct sde_connector_state *c_state = NULL;
+	struct sde_encoder_virt *sde_enc = NULL;
+	struct sde_encoder_phys *phys = NULL;
+	u32 current_misr_value[MAX_DSI_DISPLAYS] = {0};
+	int rc = 0, i = 0;
+	bool misr_updated = false, roi_updated = false;
+	struct msm_roi_list *prev_roi, *c_state_roi;
+
+	if (!drm_enc)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!atomic_read(&sde_enc->misr_enable)) {
+		SDE_DEBUG("MISR is disabled\n");
+		return;
+	}
+
+	connector = sde_enc->cur_master->connector;
+	if (!connector)
+		return;
+
+	c_conn = to_sde_connector(connector);
+	c_state = to_sde_connector_state(connector->state);
+
+	atomic64_set(&c_conn->previous_misr_sign.num_valid_misr, 0);
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+
+		if (!phys || !phys->ops.collect_misr) {
+			SDE_DEBUG("invalid misr ops\n", i);
+			continue;
+		}
+
+		rc = phys->ops.collect_misr(phys, true, &current_misr_value[i]);
+		if (rc) {
+			SDE_ERROR("failed to collect misr %d\n", rc);
+			return;
+		}
+
+		atomic64_inc(&c_conn->previous_misr_sign.num_valid_misr);
+	}
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		if (current_misr_value[i] != c_conn->previous_misr_sign.misr_sign_value[i]) {
+			c_conn->previous_misr_sign.misr_sign_value[i] = current_misr_value[i];
+			misr_updated = true;
+		}
+	}
+
+	prev_roi = &c_conn->previous_misr_sign.roi_list;
+	c_state_roi = &c_state->rois;
+
+	if (prev_roi->num_rects != c_state_roi->num_rects) {
+		roi_updated = true;
+	} else {
+		for (i = 0; i < prev_roi->num_rects; i++) {
+			if (IS_ROI_UPDATED(prev_roi->roi[i], c_state_roi->roi[i]))
+				roi_updated = true;
+		}
+	}
+
+	if (roi_updated)
+		memcpy(&c_conn->previous_misr_sign.roi_list, &c_state->rois, sizeof(c_state->rois));
+
+	if (misr_updated || roi_updated) {
+		event.type = DRM_EVENT_MISR_SIGN;
+		event.length = sizeof(c_conn->previous_misr_sign);
+		msm_mode_object_event_notify(&connector->base, connector->dev, &event,
+						(u8 *)&c_conn->previous_misr_sign);
+	}
+}

+ 60 - 2
msm/sde/sde_encoder.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <[email protected]>
@@ -170,6 +170,7 @@ enum sde_enc_rc_states {
  * @cur_conn_roi:		current connector roi
  * @prv_conn_roi:		previous connector roi to optimize if unchanged
  * @crtc			pointer to drm_crtc
+ * @fal10_veto_override:	software override for micro idle fal10 veto
  * @recovery_events_enabled:	status of hw recovery feature enable by client
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
@@ -221,7 +222,7 @@ struct sde_encoder_virt {
 	struct sde_rsc_client *rsc_client;
 	bool rsc_state_init;
 	struct msm_display_info disp_info;
-	bool misr_enable;
+	atomic_t misr_enable;
 	bool misr_reconfigure;
 	u32 misr_frame_count;
 
@@ -244,6 +245,7 @@ struct sde_encoder_virt {
 	struct sde_rect prv_conn_roi;
 	struct drm_crtc *crtc;
 
+	bool fal10_veto_override;
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
 	struct dev_pm_qos_request pm_qos_cpu_req[NR_CPUS];
@@ -605,6 +607,13 @@ bool sde_encoder_needs_dsc_disable(struct drm_encoder *drm_enc);
 void sde_encoder_get_transfer_time(struct drm_encoder *drm_enc,
 		u32 *transfer_time_us);
 
+/**
+ * sde_encoder_helper_update_out_fence_txq - updates hw-fence tx queue
+ * @sde_enc: Pointer to sde encoder structure
+ * @is_vid: Boolean to indicate if is video-mode
+ */
+void sde_encoder_helper_update_out_fence_txq(struct sde_encoder_virt *sde_enc, bool is_vid);
+
 /*
  * sde_encoder_get_dfps_maxfps - get dynamic FPS max frame rate of
 				the given encoder
@@ -681,5 +690,54 @@ static inline bool sde_encoder_is_widebus_enabled(struct drm_encoder *drm_enc)
 	return sde_enc->mode_info.wide_bus_en;
 }
 
+/*
+ * sde_encoder_is_line_insertion_supported - get line insertion
+ * feature bit value from panel
+ * @drm_enc:    Pointer to drm encoder structure
+ * @Return: line insertion support status
+ */
+bool sde_encoder_is_line_insertion_supported(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_get_hw_ctl - gets hw ctl from the connector
+ * @c_conn: sde connector
+ * @Return: pointer to the hw ctl from the encoder upon success, otherwise null
+ */
+struct sde_hw_ctl *sde_encoder_get_hw_ctl(struct sde_connector *c_conn);
+
 void sde_encoder_add_data_to_minidump_va(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_misr_sign_event_notify - collect MISR, check with previous value
+ * if change then notify to client with custom event
+ * @drm_enc: pointer to drm encoder
+ */
+void sde_encoder_misr_sign_event_notify(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_register_misr_event - register or deregister MISR event
+ * @drm_enc: pointer to drm encoder
+ * @val: indicates register or deregister
+ */
+static inline int sde_encoder_register_misr_event(struct drm_encoder *drm_enc, bool val)
+{
+	struct sde_encoder_virt *sde_enc = NULL;
+
+	if (!drm_enc)
+		return -EINVAL;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	atomic_set(&sde_enc->misr_enable, val);
+
+	/*
+	 * To setup MISR ctl reg, set misr_reconfigure as true.
+	 * MISR is calculated for the specific number of frames.
+	 */
+	if (atomic_read(&sde_enc->misr_enable)) {
+		sde_enc->misr_reconfigure = true;
+		sde_enc->misr_frame_count = 1;
+	}
+
+	return 0;
+}
 #endif /* __SDE_ENCODER_H__ */

+ 2 - 0
msm/sde/sde_encoder_dce.c

@@ -1,8 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  */
 
+#define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/kthread.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>

+ 12 - 3
msm/sde/sde_encoder_phys.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -26,6 +26,9 @@
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
 #define DEFAULT_KICKOFF_TIMEOUT_MS		84
 
+/* wait 1 sec for the emulated targets */
+#define MAX_KICKOFF_TIMEOUT_MS                  100000
+
 #define MAX_TE_PROFILE_COUNT		5
 /**
  * enum sde_enc_split_role - Role this physical encoder will play in a
@@ -136,6 +139,7 @@ struct sde_encoder_virt_ops {
  * @get_underrun_line_count:	Obtain and log current internal vertical line
  *                              count and underrun line count
  * @add_to_minidump:		Add this phys_enc data to minidumps
+ * @disable_autorefresh:	Disable autorefresh
  */
 
 struct sde_encoder_phys_ops {
@@ -148,7 +152,7 @@ struct sde_encoder_phys_ops {
 			struct drm_display_mode *adjusted_mode);
 	void (*mode_set)(struct sde_encoder_phys *encoder,
 			struct drm_display_mode *mode,
-			struct drm_display_mode *adjusted_mode);
+			struct drm_display_mode *adjusted_mode, bool *reinit_mixers);
 	void (*cont_splash_mode_set)(struct sde_encoder_phys *encoder,
 			struct drm_display_mode *adjusted_mode);
 	void (*enable)(struct sde_encoder_phys *encoder);
@@ -189,6 +193,7 @@ struct sde_encoder_phys_ops {
 			struct msm_display_info *disp_info);
 	u32 (*get_underrun_line_count)(struct sde_encoder_phys *phys);
 	void (*add_to_minidump)(struct sde_encoder_phys *phys);
+	void (*disable_autorefresh)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -309,6 +314,7 @@ struct sde_encoder_irq {
  * @frame_trigger_mode:		frame trigger mode indication for command
  *				mode display
  * @recovered:			flag set to true when recovered from pp timeout
+ * @autorefresh_disable_trans:   flag set to true during autorefresh disable transition
  */
 struct sde_encoder_phys {
 	struct drm_encoder *parent;
@@ -356,6 +362,7 @@ struct sde_encoder_phys {
 	int vfp_cached;
 	enum frame_trigger_mode_type frame_trigger_mode;
 	bool recovered;
+	bool autorefresh_disable_trans;
 };
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
@@ -411,6 +418,7 @@ struct sde_encoder_phys_cmd_te_timestamp {
  * @wr_ptr_wait_success: log wr_ptr_wait success for release fence trigger
  * @te_timestamp_list: List head for the TE timestamp list
  * @te_timestamp: Array of size MAX_TE_PROFILE_COUNT te_timestamp_list elements
+ * @qsync_threshold_lines: tearcheck threshold lines calculated based on qsync_min_fps
  */
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
@@ -423,6 +431,7 @@ struct sde_encoder_phys_cmd {
 	struct list_head te_timestamp_list;
 	struct sde_encoder_phys_cmd_te_timestamp
 			te_timestamp[MAX_TE_PROFILE_COUNT];
+	u32 qsync_threshold_lines;
 };
 
 /**
@@ -444,7 +453,7 @@ struct sde_encoder_phys_cmd {
  * @bo_disable:		Buffer object(s) to use during the disabling state
  * @fb_disable:		Frame buffer to use during the disabling state
  * @sc_cfg:		Stores wb system cache config
- * @crtc		Pointer to drm_crtc
+ * @crtc:		Pointer to drm_crtc
  * @prog_line:		Cached programmable line value used to trigger early wb-fence
  */
 struct sde_encoder_phys_wb {

+ 134 - 22
msm/sde/sde_encoder_phys_cmd.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -171,6 +171,37 @@ static void _sde_encoder_phys_cmd_update_intf_cfg(
 	}
 }
 
+static void sde_encoder_override_tearcheck_rd_ptr(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_intf *hw_intf;
+	struct drm_display_mode *mode;
+	struct sde_encoder_phys_cmd *cmd_enc;
+	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
+	u32 adjusted_tear_rd_ptr_line_cnt;
+
+	if (!phys_enc || !phys_enc->hw_intf)
+		return;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	hw_intf = phys_enc->hw_intf;
+	mode = &phys_enc->cached_mode;
+
+	/* Configure TE rd_ptr_val to the end of qsync Start Window.
+	 * This ensures next frame trigger_start does not get latched in the current
+	 * vsync window.
+	 */
+	adjusted_tear_rd_ptr_line_cnt = mode->vdisplay + cmd_enc->qsync_threshold_lines + 1;
+
+	if (hw_intf && hw_intf->ops.override_tear_rd_ptr_val)
+		hw_intf->ops.override_tear_rd_ptr_val(hw_intf, adjusted_tear_rd_ptr_line_cnt);
+
+	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
+	SDE_EVT32_VERBOSE(phys_enc->hw_intf->idx - INTF_0, mode->vdisplay,
+		cmd_enc->qsync_threshold_lines, info[0].rd_ptr_line_count,
+		info[0].rd_ptr_frame_count, info[0].wr_ptr_line_count,
+		info[1].rd_ptr_line_count, info[1].rd_ptr_frame_count, info[1].wr_ptr_line_count);
+}
+
 static void _sde_encoder_phys_signal_frame_done(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc;
@@ -180,6 +211,9 @@ static void _sde_encoder_phys_signal_frame_done(struct sde_encoder_phys *phys_en
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 	ctl = phys_enc->hw_ctl;
 
+	if (!ctl)
+		return;
+
 	/* notify all synchronous clients first, then asynchronous clients */
 	if (phys_enc->parent_ops.handle_frame_done &&
 		atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0)) {
@@ -193,11 +227,23 @@ static void _sde_encoder_phys_signal_frame_done(struct sde_encoder_phys *phys_en
 		spin_unlock(phys_enc->enc_spinlock);
 	}
 
-	if (ctl && ctl->ops.get_scheduler_status)
+	if (ctl->ops.get_scheduler_status)
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0,
-		phys_enc->hw_pp->idx - PINGPONG_0, event, scheduler_status);
+		phys_enc->hw_pp->idx - PINGPONG_0, event, scheduler_status,
+		phys_enc->autorefresh_disable_trans);
+
+	/*
+	 * For hw-fences, in the last frame during the autorefresh disable transition
+	 * hw won't trigger the output-fence signal once the frame is done, therefore
+	 * sw must trigger the override to force the signal here
+	 */
+	if (phys_enc->autorefresh_disable_trans) {
+		if (ctl->ops.trigger_output_fence_override)
+			ctl->ops.trigger_output_fence_override(ctl);
+		phys_enc->autorefresh_disable_trans = false;
+	}
 
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
@@ -247,10 +293,11 @@ static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx)
 	new_cnt = atomic_add_unless(&cmd_enc->autorefresh.kickoff_cnt, -1, 0);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			phys_enc->hw_intf->idx - INTF_0,
-			new_cnt);
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			phys_enc->hw_intf->idx - INTF_0, new_cnt);
+
+	if (new_cnt)
+		_sde_encoder_phys_signal_frame_done(phys_enc);
 
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&cmd_enc->autorefresh.kickoff_wq);
@@ -265,15 +312,16 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 	struct sde_encoder_phys_cmd_te_timestamp *te_timestamp;
 	unsigned long lock_flags;
+	u32 fence_ready = 0;
 
-	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf)
+	if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_intf || !phys_enc->hw_ctl)
 		return;
 
 	SDE_ATRACE_BEGIN("rd_ptr_irq");
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 	ctl = phys_enc->hw_ctl;
 
-	if (ctl && ctl->ops.get_scheduler_status)
+	if (ctl->ops.get_scheduler_status)
 		scheduler_status = ctl->ops.get_scheduler_status(ctl);
 
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
@@ -286,13 +334,16 @@ static void sde_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
 	}
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
+	if ((scheduler_status != 0x1) && ctl->ops.get_hw_fence_status)
+		fence_ready = ctl->ops.get_hw_fence_status(ctl);
+
 	sde_encoder_helper_get_pp_line_count(phys_enc->parent, info);
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 		info[0].pp_idx, info[0].intf_idx,
-		info[0].wr_ptr_line_count, info[0].intf_frame_count,
+		info[0].wr_ptr_line_count, info[0].intf_frame_count, info[0].rd_ptr_line_count,
 		info[1].pp_idx, info[1].intf_idx,
-		info[1].wr_ptr_line_count, info[1].intf_frame_count,
-		scheduler_status);
+		info[1].wr_ptr_line_count, info[1].intf_frame_count, info[1].rd_ptr_line_count,
+		scheduler_status, fence_ready);
 
 	if (phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
@@ -307,7 +358,7 @@ static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
 {
 	struct sde_encoder_phys *phys_enc = arg;
 	struct sde_hw_ctl *ctl;
-	u32 event = 0;
+	u32 event = 0, qsync_mode = 0;
 	struct sde_hw_pp_vsync_info info[MAX_CHANNELS_PER_ENC] = {{0}};
 
 	if (!phys_enc || !phys_enc->hw_ctl)
@@ -315,6 +366,7 @@ static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
 
 	SDE_ATRACE_BEGIN("wr_ptr_irq");
 	ctl = phys_enc->hw_ctl;
+	qsync_mode = sde_connector_get_qsync_mode(phys_enc->connector);
 
 	if (atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)) {
 		event = SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
@@ -330,7 +382,10 @@ static void sde_encoder_phys_cmd_wr_ptr_irq(void *arg, int irq_idx)
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
 		ctl->idx - CTL_0, event,
 		info[0].pp_idx, info[0].intf_idx, info[0].wr_ptr_line_count,
-		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count);
+		info[1].pp_idx, info[1].intf_idx, info[1].wr_ptr_line_count, qsync_mode);
+
+	if (qsync_mode)
+		sde_encoder_override_tearcheck_rd_ptr(phys_enc);
 
 	/* Signal any waiting wr_ptr start interrupt */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
@@ -431,7 +486,7 @@ static void sde_encoder_phys_cmd_cont_splash_mode_set(
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 		to_sde_encoder_phys_cmd(phys_enc);
@@ -452,8 +507,14 @@ static void sde_encoder_phys_cmd_mode_set(
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
+		if (sde_rm_get_hw(rm, &iter)) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx,
+						to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
@@ -734,6 +795,10 @@ static int _sde_encoder_phys_cmd_wait_for_idle(
 		return -EINVAL;
 	}
 
+	if (sde_encoder_check_ctl_done_support(phys_enc->parent)
+			&& !sde_encoder_phys_cmd_is_master(phys_enc))
+		return 0;
+
 	if (atomic_read(&phys_enc->pending_kickoff_cnt) > 1)
 		wait_info.count_check = 1;
 
@@ -1062,6 +1127,7 @@ static void sde_encoder_phys_cmd_tearcheck_config(
 	tc_cfg.start_pos = mode->vdisplay;
 	tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
 	tc_cfg.wr_ptr_irq = 1;
+	cmd_enc->qsync_threshold_lines = tc_cfg.sync_threshold_start;
 
 	SDE_DEBUG_CMDENC(cmd_enc,
 	  "tc %d intf %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
@@ -1388,6 +1454,7 @@ static int sde_encoder_phys_cmd_prepare_for_kickoff(
 	if (sde_connector_is_qsync_updated(phys_enc->connector)) {
 		tc_cfg.sync_threshold_start = _get_tearcheck_threshold(
 				phys_enc);
+		cmd_enc->qsync_threshold_lines = tc_cfg.sync_threshold_start;
 		if (phys_enc->has_intf_te &&
 				phys_enc->hw_intf->ops.update_tearcheck)
 			phys_enc->hw_intf->ops.update_tearcheck(
@@ -1532,6 +1599,10 @@ static int sde_encoder_phys_cmd_wait_for_tx_complete(
 
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
+	if (sde_encoder_check_ctl_done_support(phys_enc->parent)
+			&& !sde_encoder_phys_cmd_is_master(phys_enc))
+		return 0;
+
 	if (!atomic_read(&phys_enc->pending_kickoff_cnt)) {
 		SDE_EVT32(DRMID(phys_enc->parent),
 			phys_enc->intf_idx - INTF_0,
@@ -1619,6 +1690,10 @@ static int sde_encoder_phys_cmd_wait_for_commit_done(
 
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
+	if (sde_encoder_check_ctl_done_support(phys_enc->parent)
+			&& !sde_encoder_phys_cmd_is_master(phys_enc))
+		return 0;
+
 	/* only required for master controller */
 	if (sde_encoder_phys_cmd_is_master(phys_enc)) {
 		rc = _sde_encoder_phys_cmd_wait_for_wr_ptr(phys_enc);
@@ -1837,24 +1912,26 @@ static void _sde_encoder_autorefresh_disable_seq2(
 			tear_status.write_count);
 	}
 }
-
-static void sde_encoder_phys_cmd_prepare_commit(
-		struct sde_encoder_phys *phys_enc)
+static void _sde_encoder_phys_disable_autorefresh(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-	struct sde_kms *sde_kms = phys_enc->sde_kms;
+	struct sde_kms *sde_kms;
 
 	if (!phys_enc || !sde_encoder_phys_cmd_is_master(phys_enc))
 		return;
 
+	if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
+		return;
+
 	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
 			cmd_enc->autorefresh.cfg.enable);
 
-	if (!sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc))
-		return;
+	sde_kms = phys_enc->sde_kms;
 
 	sde_encoder_phys_cmd_connect_te(phys_enc, false);
 	_sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0);
+	phys_enc->autorefresh_disable_trans = true;
+
 	if (sde_kms && sde_kms->catalog &&
 			(sde_kms->catalog->autorefresh_disable_seq == AUTOREFRESH_DISABLE_SEQ1)) {
 		_sde_encoder_autorefresh_disable_seq1(phys_enc);
@@ -1865,6 +1942,11 @@ static void sde_encoder_phys_cmd_prepare_commit(
 	SDE_DEBUG_CMDENC(cmd_enc, "autorefresh disabled successfully\n");
 }
 
+static void sde_encoder_phys_cmd_prepare_commit(struct sde_encoder_phys *phys_enc)
+{
+	return _sde_encoder_phys_disable_autorefresh(phys_enc);
+}
+
 static void sde_encoder_phys_cmd_trigger_start(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -1888,11 +1970,38 @@ static void sde_encoder_phys_cmd_trigger_start(
 	cmd_enc->wr_ptr_wait_success = false;
 }
 
+static void _sde_encoder_phys_cmd_calculate_wd_params(struct sde_encoder_phys *phys_enc,
+		struct intf_wd_jitter_params *wd_jitter)
+{
+	u32 nominal_te_value;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_mode_info *mode_info;
+	const u32 multiplier = 1 << 10;
+
+	sde_enc = to_sde_encoder_virt(phys_enc->parent);
+	mode_info = &sde_enc->mode_info;
+
+	if (mode_info->wd_jitter.jitter_type & MSM_DISPLAY_WD_INSTANTANEOUS_JITTER)
+		wd_jitter->jitter = mult_frac(multiplier, mode_info->wd_jitter.inst_jitter_numer,
+				(mode_info->wd_jitter.inst_jitter_denom * 100));
+
+	if (mode_info->wd_jitter.jitter_type & MSM_DISPLAY_WD_LTJ_JITTER) {
+		nominal_te_value = CALCULATE_WD_LOAD_VALUE(mode_info->frame_rate) * MDP_TICK_COUNT;
+		wd_jitter->ltj_max = mult_frac(nominal_te_value, mode_info->wd_jitter.ltj_max_numer,
+				(mode_info->wd_jitter.ltj_max_denom) * 100);
+		wd_jitter->ltj_slope = mult_frac((1 << 16), wd_jitter->ltj_max,
+				(mode_info->wd_jitter.ltj_time_sec * mode_info->frame_rate));
+	}
+
+	phys_enc->hw_intf->ops.configure_wd_jitter(phys_enc->hw_intf, wd_jitter);
+}
+
 static void sde_encoder_phys_cmd_setup_vsync_source(struct sde_encoder_phys *phys_enc,
 		u32 vsync_source, struct msm_display_info *disp_info)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_connector *sde_conn;
+	struct intf_wd_jitter_params wd_jitter = {0, 0};
 
 	if (!phys_enc || !phys_enc->hw_intf)
 		return;
@@ -1906,6 +2015,8 @@ static void sde_encoder_phys_cmd_setup_vsync_source(struct sde_encoder_phys *phy
 	if ((disp_info->is_te_using_watchdog_timer || sde_conn->panel_dead) &&
 			phys_enc->hw_intf->ops.setup_vsync_source) {
 		vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0;
+		if (phys_enc->hw_intf->ops.configure_wd_jitter)
+			_sde_encoder_phys_cmd_calculate_wd_params(phys_enc, &wd_jitter);
 		phys_enc->hw_intf->ops.setup_vsync_source(phys_enc->hw_intf,
 				sde_enc->mode_info.frame_rate);
 	} else {
@@ -1957,6 +2068,7 @@ static void sde_encoder_phys_cmd_init_ops(struct sde_encoder_phys_ops *ops)
 	ops->setup_misr = sde_encoder_helper_setup_misr;
 	ops->collect_misr = sde_encoder_helper_collect_misr;
 	ops->add_to_minidump = sde_encoder_phys_cmd_add_enc_to_minidump;
+	ops->disable_autorefresh = _sde_encoder_phys_disable_autorefresh;
 }
 
 static inline bool sde_encoder_phys_cmd_intf_te_supported(

+ 101 - 17
msm/sde/sde_encoder_phys_vid.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -490,6 +491,7 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	int new_cnt = -1, old_cnt = -1;
 	u32 event = 0;
 	int pend_ret_fence_cnt = 0;
+	u32 fence_ready = -1;
 
 	if (!phys_enc)
 		return;
@@ -509,7 +511,7 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 
 	old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
 
-	if (hw_ctl && hw_ctl->ops.get_flush_register)
+	if (hw_ctl->ops.get_flush_register)
 		flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
 	if (flush_register)
@@ -527,7 +529,7 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 	}
 
 not_flushed:
-	if (hw_ctl && hw_ctl->ops.get_reset)
+	if (hw_ctl->ops.get_reset)
 		reset_status = hw_ctl->ops.get_reset(hw_ctl);
 
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -544,12 +546,16 @@ not_flushed:
 		phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf,
 			&intf_status);
 
+	if (flush_register && hw_ctl->ops.get_hw_fence_status)
+		fence_ready = hw_ctl->ops.get_hw_fence_status(hw_ctl);
+
 	SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->hw_intf->idx - INTF_0,
 			old_cnt, atomic_read(&phys_enc->pending_kickoff_cnt),
 			reset_status ? SDE_EVTLOG_ERROR : 0,
 			flush_register, event,
 			atomic_read(&phys_enc->pending_retire_fence_cnt),
-			intf_status.frame_count, intf_status.line_count);
+			intf_status.frame_count, intf_status.line_count,
+			fence_ready);
 
 	/* Signal any waiting atomic commit thread */
 	wake_up_all(&phys_enc->pending_kickoff_wq);
@@ -606,7 +612,7 @@ static void sde_encoder_phys_vid_cont_splash_mode_set(
 static void sde_encoder_phys_vid_mode_set(
 		struct sde_encoder_phys *phys_enc,
 		struct drm_display_mode *mode,
-		struct drm_display_mode *adj_mode)
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_rm *rm;
 	struct sde_rm_hw_iter iter;
@@ -632,8 +638,14 @@ static void sde_encoder_phys_vid_mode_set(
 	/* Retrieve previously allocated HW Resources. Shouldn't fail */
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
-		if (sde_rm_get_hw(rm, &iter))
+		if (sde_rm_get_hw(rm, &iter)) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx,
+						to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
 		SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
@@ -876,17 +888,21 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
 		struct sde_encoder_phys *phys_enc, bool notify)
 {
 	struct sde_encoder_wait_info wait_info = {0};
-	int ret = 0;
+	int ret = 0, new_cnt;
 	u32 event = SDE_ENCODER_FRAME_EVENT_ERROR |
 		SDE_ENCODER_FRAME_EVENT_SIGNAL_RELEASE_FENCE |
 		SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE;
 	struct drm_connector *conn;
+	struct sde_hw_ctl *hw_ctl;
+	u32 flush_register = 0xebad;
+	bool timeout = false;
 
-	if (!phys_enc) {
+	if (!phys_enc || !phys_enc->hw_ctl) {
 		pr_err("invalid encoder\n");
 		return -EINVAL;
 	}
 
+	hw_ctl = phys_enc->hw_ctl;
 	conn = phys_enc->connector;
 
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
@@ -897,20 +913,36 @@ static int _sde_encoder_phys_vid_wait_for_vblank(
 	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
 			&wait_info);
 
-	if (notify && (ret == -ETIMEDOUT) &&
-	    atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0) &&
-	    phys_enc->parent_ops.handle_frame_done) {
-		phys_enc->parent_ops.handle_frame_done(
-			phys_enc->parent, phys_enc, event);
+	if (ret == -ETIMEDOUT) {
+		new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+		timeout = true;
 
-		if (sde_encoder_recovery_events_enabled(phys_enc->parent))
-			sde_connector_event_notify(conn,
-				DRM_EVENT_SDE_HW_RECOVERY,
+		/*
+		 * Reset ret when flush register is consumed. This handles a race condition between
+		 * irq wait timeout handler reading the register status and the actual IRQ handler
+		 */
+		if (hw_ctl->ops.get_flush_register)
+			flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+		if (!flush_register)
+			ret = 0;
+
+		SDE_EVT32(DRMID(phys_enc->parent), new_cnt, flush_register, ret,
+				SDE_EVTLOG_FUNC_CASE1);
+	}
+
+	if (notify && timeout && atomic_add_unless(&phys_enc->pending_retire_fence_cnt, -1, 0)
+			&& phys_enc->parent_ops.handle_frame_done) {
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent, phys_enc, event);
+
+		/* notify only on actual timeout cases */
+		if ((ret == -ETIMEDOUT) && sde_encoder_recovery_events_enabled(phys_enc->parent))
+			sde_connector_event_notify(conn, DRM_EVENT_SDE_HW_RECOVERY,
 				sizeof(uint8_t), SDE_RECOVERY_HARD_RESET);
 	}
 
-	SDE_EVT32(DRMID(phys_enc->parent), event, notify, ret,
-			ret ? SDE_EVTLOG_FATAL : 0);
+	SDE_EVT32(DRMID(phys_enc->parent), event, notify, timeout, ret,
+			ret ? SDE_EVTLOG_FATAL : 0, SDE_EVTLOG_FUNC_EXIT);
+
 	return ret;
 }
 
@@ -920,6 +952,21 @@ static int sde_encoder_phys_vid_wait_for_vblank(
 	return _sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
 }
 
+static void sde_encoder_phys_vid_update_txq(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+
+	if (!phys_enc)
+		return;
+
+	sde_enc = to_sde_encoder_virt(phys_enc->parent);
+	if (!sde_enc)
+		return;
+
+	SDE_EVT32(DRMID(phys_enc->parent));
+	sde_encoder_helper_update_out_fence_txq(sde_enc, true);
+}
+
 static int sde_encoder_phys_vid_wait_for_commit_done(
 		struct sde_encoder_phys *phys_enc)
 {
@@ -929,6 +976,9 @@ static int sde_encoder_phys_vid_wait_for_commit_done(
 	if (rc)
 		sde_encoder_helper_phys_reset(phys_enc);
 
+	/* Update TxQ for the incoming frame */
+	sde_encoder_phys_vid_update_txq(phys_enc);
+
 	return rc;
 }
 
@@ -1122,12 +1172,42 @@ exit:
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
+static int sde_encoder_phys_vid_poll_for_active_region(struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+	struct intf_timing_params *timing;
+	u32 line_cnt, v_inactive, poll_time_us, trial = 0;
+
+	if (!phys_enc || !phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	timing = &vid_enc->timing_params;
+
+	/* if programmable fetch is not enabled return early or if it is not a DSI interface*/
+	if (!programmable_fetch_get_num_lines(vid_enc, timing) ||
+			phys_enc->hw_intf->cap->type != INTF_DSI)
+		return 0;
+
+	poll_time_us = DIV_ROUND_UP(1000000, timing->vrefresh) / MAX_POLL_CNT;
+	v_inactive = timing->v_front_porch + timing->v_back_porch + timing->vsync_pulse_width;
+
+	do {
+		usleep_range(poll_time_us, poll_time_us + 5);
+		line_cnt = phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
+		trial++;
+	} while ((trial < MAX_POLL_CNT) || (line_cnt < v_inactive));
+
+	return (trial >= MAX_POLL_CNT) ? -ETIMEDOUT : 0;
+}
+
 static void sde_encoder_phys_vid_handle_post_kickoff(
 		struct sde_encoder_phys *phys_enc)
 {
 	unsigned long lock_flags;
 	struct sde_encoder_phys_vid *vid_enc;
 	u32 avr_mode;
+	u32 ret;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -1150,6 +1230,10 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
 				1);
 			spin_unlock_irqrestore(phys_enc->enc_spinlock,
 				lock_flags);
+
+			ret = sde_encoder_phys_vid_poll_for_active_region(phys_enc);
+			if (ret)
+				SDE_DEBUG_VIDENC(vid_enc, "poll for active failed ret:%d\n", ret);
 		}
 		phys_enc->enable_state = SDE_ENC_ENABLED;
 	}

+ 303 - 147
msm/sde/sde_encoder_phys_wb.c

@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -77,19 +78,24 @@ static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
  * sde_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
  * @phys_enc:	Pointer to physical encoder
  */
-static void sde_encoder_phys_wb_set_ot_limit(
-		struct sde_encoder_phys *phys_enc)
+static void sde_encoder_phys_wb_set_ot_limit(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct drm_connector_state *conn_state;
 	struct sde_vbif_set_ot_params ot_params;
+	enum sde_wb_usage_type usage_type;
+
+	conn_state = phys_enc->connector->state;
+	usage_type = sde_connector_get_property(conn_state, CONNECTOR_PROP_WB_USAGE_TYPE);
 
 	memset(&ot_params, 0, sizeof(ot_params));
 	ot_params.xin_id = hw_wb->caps->xin_id;
 	ot_params.num = hw_wb->idx - WB_0;
 	ot_params.width = wb_enc->wb_roi.w;
 	ot_params.height = wb_enc->wb_roi.h;
-	ot_params.is_wfd = true;
+	ot_params.is_wfd = ((phys_enc->in_clone_mode) || (usage_type == WB_USAGE_OFFLINE_WB)) ?
+					false : true;
 	ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
 	ot_params.vbif_idx = hw_wb->caps->vbif_idx;
 	ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
@@ -107,7 +113,9 @@ static void sde_encoder_phys_wb_set_qos_remap(struct sde_encoder_phys *phys_enc)
 	struct sde_encoder_phys_wb *wb_enc;
 	struct sde_hw_wb *hw_wb;
 	struct drm_crtc *crtc;
+	struct drm_connector_state *conn_state;
 	struct sde_vbif_set_qos_params qos_params;
+	enum sde_wb_usage_type usage_type;
 
 	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
 		SDE_ERROR("invalid arguments\n");
@@ -121,6 +129,8 @@ static void sde_encoder_phys_wb_set_qos_remap(struct sde_encoder_phys *phys_enc)
 	}
 
 	crtc = wb_enc->crtc;
+	conn_state = phys_enc->connector->state;
+	usage_type = sde_connector_get_property(conn_state, CONNECTOR_PROP_WB_USAGE_TYPE);
 
 	if (!wb_enc->hw_wb || !wb_enc->hw_wb->caps) {
 		SDE_ERROR("[enc:%d wb:%d] invalid WB HW\n", DRMID(phys_enc->parent), WBID(wb_enc));
@@ -134,8 +144,12 @@ static void sde_encoder_phys_wb_set_qos_remap(struct sde_encoder_phys *phys_enc)
 	qos_params.xin_id = hw_wb->caps->xin_id;
 	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
 	qos_params.num = hw_wb->idx - WB_0;
-	qos_params.client_type = phys_enc->in_clone_mode ?
-					VBIF_CWB_CLIENT : VBIF_NRT_CLIENT;
+	if (phys_enc->in_clone_mode)
+		qos_params.client_type = VBIF_CWB_CLIENT;
+	else if (usage_type == WB_USAGE_OFFLINE_WB)
+		qos_params.client_type = VBIF_OFFLINE_WB_CLIENT;
+	else
+		qos_params.client_type = VBIF_NRT_CLIENT;
 
 	SDE_DEBUG("[enc:%d wb:%d] qos_remap - wb:%d vbif:%d xin:%d clone:%d\n",
 		DRMID(phys_enc->parent), WBID(wb_enc), qos_params.num,
@@ -152,9 +166,11 @@ static void sde_encoder_phys_wb_set_qos(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_wb *wb_enc;
 	struct sde_hw_wb *hw_wb;
+	struct drm_connector_state *conn_state;
 	struct sde_hw_wb_qos_cfg qos_cfg = {0};
 	struct sde_perf_cfg *perf;
-	u32 fps_index = 0, lut_index, index, frame_rate, qos_count;
+	u32 fps_index = 0, lut_index, creq_index, ds_index, frame_rate, qos_count;
+	enum sde_wb_usage_type usage_type;
 
 	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
 		SDE_ERROR("invalid parameter(s)\n");
@@ -167,6 +183,9 @@ static void sde_encoder_phys_wb_set_qos(struct sde_encoder_phys *phys_enc)
 		return;
 	}
 
+	conn_state = phys_enc->connector->state;
+	usage_type = sde_connector_get_property(conn_state, CONNECTOR_PROP_WB_USAGE_TYPE);
+
 	perf = &phys_enc->sde_kms->catalog->perf;
 	frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
 
@@ -181,22 +200,26 @@ static void sde_encoder_phys_wb_set_qos(struct sde_encoder_phys *phys_enc)
 
 	qos_cfg.danger_safe_en = true;
 
-	if (phys_enc->in_clone_mode && (SDE_FORMAT_IS_TILE(wb_enc->wb_fmt) ||
-				SDE_FORMAT_IS_UBWC(wb_enc->wb_fmt)))
-		lut_index = SDE_QOS_LUT_USAGE_CWB_TILE;
-	else if (phys_enc->in_clone_mode)
-		lut_index = SDE_QOS_LUT_USAGE_CWB;
+	if (phys_enc->in_clone_mode)
+		lut_index = (SDE_FORMAT_IS_TILE(wb_enc->wb_fmt)
+				|| SDE_FORMAT_IS_UBWC(wb_enc->wb_fmt)) ?
+					SDE_QOS_LUT_USAGE_CWB_TILE : SDE_QOS_LUT_USAGE_CWB;
 	else
-		lut_index = SDE_QOS_LUT_USAGE_NRT;
+		lut_index = (usage_type == WB_USAGE_OFFLINE_WB) ?
+					SDE_QOS_LUT_USAGE_OFFLINE_WB : SDE_QOS_LUT_USAGE_NRT;
 
-	index = (fps_index * SDE_QOS_LUT_USAGE_MAX) + lut_index;
-	qos_cfg.danger_lut = perf->danger_lut[index];
-	qos_cfg.safe_lut = (u32) perf->safe_lut[index];
-	qos_cfg.creq_lut = perf->creq_lut[index * SDE_CREQ_LUT_TYPE_MAX];
+	creq_index = lut_index * SDE_CREQ_LUT_TYPE_MAX;
+	creq_index += (fps_index * SDE_QOS_LUT_USAGE_MAX * SDE_CREQ_LUT_TYPE_MAX);
+	qos_cfg.creq_lut = perf->creq_lut[creq_index];
 
-	SDE_DEBUG("[enc:%d wb:%d] fps:%d mode:%d luts[0x%x,0x%x 0x%llx]\n",
+	ds_index = lut_index * SDE_DANGER_SAFE_LUT_TYPE_MAX;
+	ds_index += (fps_index * SDE_QOS_LUT_USAGE_MAX * SDE_DANGER_SAFE_LUT_TYPE_MAX);
+	qos_cfg.danger_lut = perf->danger_lut[ds_index];
+	qos_cfg.safe_lut = (u32) perf->safe_lut[ds_index];
+
+	SDE_DEBUG("[enc:%d wb:%d] fps:%d mode:%d type:%d luts[0x%x,0x%x 0x%llx]\n",
 		DRMID(phys_enc->parent), WBID(wb_enc), frame_rate, phys_enc->in_clone_mode,
-		qos_cfg.danger_lut, qos_cfg.safe_lut, qos_cfg.creq_lut);
+		usage_type, qos_cfg.danger_lut, qos_cfg.safe_lut, qos_cfg.creq_lut);
 
 	if (hw_wb->ops.setup_qos_lut)
 		hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
@@ -317,18 +340,21 @@ static void _sde_enc_phys_wb_get_out_resolution(struct drm_crtc_state *crtc_stat
 	sde_crtc_get_ds_io_res(crtc_state, &ds_res);
 	sde_connector_get_dnsc_blur_io_res(conn_state, &dnsc_blur_res);
 
-	if (ds_res.enabled) {
+	if (dnsc_blur_res.enabled) {
+		*out_width = dnsc_blur_res.dst_w;
+		*out_height = dnsc_blur_res.dst_h;
+	} else if (ds_res.enabled) {
 		if (ds_tap_pt == CAPTURE_DSPP_OUT) {
 			*out_width = ds_res.dst_w;
 			*out_height = ds_res.dst_h;
 		} else if (ds_tap_pt == CAPTURE_MIXER_OUT) {
 			*out_width = ds_res.src_w;
 			*out_height = ds_res.src_h;
+		} else {
+			*out_width = mode->hdisplay;
+			*out_height = mode->vdisplay;
 		}
-	} else if (dnsc_blur_res.enabled) {
-		*out_width = dnsc_blur_res.dst_w;
-		*out_height = dnsc_blur_res.dst_h;
-	} else {
+	}  else {
 		*out_width = mode->hdisplay;
 		*out_height = mode->vdisplay;
 	}
@@ -340,14 +366,15 @@ static void _sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc,
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
 	struct sde_hw_wb_cdp_cfg *cdp_cfg = &wb_enc->cdp_cfg;
+	u32 cdp_index;
 
 	if (!hw_wb->ops.setup_cdp)
 		return;
 
 	memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
 
-	cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
-		[SDE_PERF_CDP_USAGE_NRT].wr_enable;
+	cdp_index = phys_enc->in_clone_mode ? SDE_PERF_CDP_USAGE_RT : SDE_PERF_CDP_USAGE_NRT;
+	cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg[cdp_index].wr_enable;
 	cdp_cfg->ubwc_meta_enable = SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
 	cdp_cfg->tile_amortize_enable = SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
 						SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
@@ -365,7 +392,7 @@ static void _sde_encoder_phys_wb_setup_roi(struct sde_encoder_phys *phys_enc,
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
 	struct sde_rect pu_roi = {0,};
 
-	if (hw_wb->ops.setup_roi)
+	if (!hw_wb->ops.setup_roi)
 		return;
 
 	if (hw_wb->ops.setup_crop && phys_enc->in_clone_mode) {
@@ -423,7 +450,8 @@ static void _sde_encoder_phys_wb_setup_out_cfg(struct sde_encoder_phys *phys_enc
 			wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_size[0],
 			wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_size[1],
 			wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_size[2],
-			wb_cfg->dest.plane_addr[3], wb_cfg->dest.plane_size[3]);
+			wb_cfg->dest.plane_addr[3], wb_cfg->dest.plane_size[3],
+			wb_cfg->roi.x, wb_cfg->roi.y, wb_cfg->roi.w, wb_cfg->roi.h);
 		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
 	}
 }
@@ -521,12 +549,15 @@ static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc, bo
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+	struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
 	struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
 	struct sde_crtc *crtc = to_sde_crtc(wb_enc->crtc);
 	struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp;
 	struct sde_hw_dnsc_blur *hw_dnsc_blur = phys_enc->hw_dnsc_blur;
 	bool need_merge = (crtc->num_mixers > 1);
+	enum sde_dcwb;
 	int i = 0;
+	const int num_wb = 1;
 
 	if (!phys_enc->in_clone_mode) {
 		SDE_DEBUG("[enc:%d wb:%d] not in CWB mode. early return\n",
@@ -546,10 +577,16 @@ static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc, bo
 			test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features))) {
 		struct sde_hw_intf_cfg_v1 intf_cfg = { 0, };
 
-		for (i = 0; i < crtc->num_mixers; i++)
-			intf_cfg.cwb[intf_cfg.cwb_count++] = (enum sde_cwb)
-				(test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features) ?
-					((hw_pp->idx % 2) + i) : (hw_pp->idx + i));
+		intf_cfg.wb_count = num_wb;
+		intf_cfg.wb[0] = hw_wb->idx;
+
+		for (i = 0; i < crtc->num_mixers; i++) {
+			if (test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features))
+				intf_cfg.cwb[intf_cfg.cwb_count++] =
+						(enum sde_cwb)(hw_pp->dcwb_idx + i);
+			else
+				intf_cfg.cwb[intf_cfg.cwb_count++] = (enum sde_cwb)(hw_pp->idx + i);
+		}
 
 		if (hw_pp->merge_3d && (intf_cfg.merge_3d_count <
 				MAX_MERGE_3D_PER_CTL_V1) && need_merge)
@@ -570,6 +607,9 @@ static void _sde_encoder_phys_wb_setup_cwb(struct sde_encoder_phys *phys_enc, bo
 				test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features))
 			hw_wb->ops.bind_dcwb_pp_blk(hw_wb, enable, hw_pp->idx);
 
+		if (hw_wb->ops.setup_crop && !enable)
+			hw_wb->ops.setup_crop(hw_wb, wb_cfg, false);
+
 		if (hw_ctl->ops.update_intf_cfg) {
 			hw_ctl->ops.update_intf_cfg(hw_ctl, &intf_cfg, enable);
 			SDE_DEBUG("[enc:%d wb:%d] in CWB/DCWB mode on CTL_%d PP-%d merge3d:%d\n",
@@ -718,16 +758,13 @@ static int _sde_enc_phys_wb_validate_dnsc_blur_filter(
 	return 0;
 }
 
-static int _sde_enc_phys_wb_validate_dnsc_blur_ds(struct drm_crtc_state *crtc_state,
-			struct drm_connector_state *conn_state, const struct sde_format *fmt)
+static int _sde_enc_phys_wb_validate_dnsc_blur_filters(struct drm_crtc_state *crtc_state,
+		struct drm_connector_state *conn_state)
 {
-	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
 	struct sde_connector_state *sde_conn_state = to_sde_connector_state(conn_state);
-	struct sde_kms *sde_kms;
-	struct sde_drm_dnsc_blur_cfg *cfg;
 	struct sde_dnsc_blur_filter_info *filter_info;
-	struct sde_io_res ds_res = {0, }, dnsc_blur_res = {0, };
-	u32 ds_tap_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT);
+	struct sde_drm_dnsc_blur_cfg *cfg;
+	struct sde_kms *sde_kms;
 	int ret = 0, i, j;
 
 	sde_kms = sde_connector_get_kms(conn_state->connector);
@@ -736,13 +773,48 @@ static int _sde_enc_phys_wb_validate_dnsc_blur_ds(struct drm_crtc_state *crtc_st
 		return -EINVAL;
 	}
 
+	for (i = 0; i < sde_conn_state->dnsc_blur_count; i++) {
+		cfg = &sde_conn_state->dnsc_blur_cfg[i];
+
+		for (j = 0; j < sde_kms->catalog->dnsc_blur_filter_count; j++) {
+			filter_info = &sde_kms->catalog->dnsc_blur_filters[i];
+			if (cfg->flags_h == filter_info->filter) {
+				ret = _sde_enc_phys_wb_validate_dnsc_blur_filter(filter_info,
+						cfg->src_width, cfg->dst_width);
+				if (ret)
+					break;
+			}
+			if (cfg->flags_v == filter_info->filter) {
+				ret = _sde_enc_phys_wb_validate_dnsc_blur_filter(filter_info,
+						cfg->src_height, cfg->dst_height);
+				if (ret)
+					break;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int _sde_enc_phys_wb_validate_dnsc_blur_ds(struct drm_crtc_state *crtc_state,
+			struct drm_connector_state *conn_state, const struct sde_format *fmt,
+			struct sde_rect *wb_roi)
+{
+	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state);
+	const struct drm_display_mode *mode = &crtc_state->mode;
+	struct sde_io_res ds_res = {0, }, dnsc_blur_res = {0, };
+	u32 ds_tap_pt = sde_crtc_get_property(cstate, CRTC_PROP_CAPTURE_OUTPUT);
+
 	sde_crtc_get_ds_io_res(crtc_state, &ds_res);
 	sde_connector_get_dnsc_blur_io_res(conn_state, &dnsc_blur_res);
 
-	if ((ds_res.enabled && (!ds_res.src_w || !ds_res.src_h
-					|| !ds_res.dst_w || !ds_res.dst_h))) {
-		SDE_ERROR("invalid ds cfg src:%ux%u dst:%ux%u\n",
-				ds_res.src_w, ds_res.src_h, ds_res.dst_w, ds_res.dst_h);
+	/* wb_roi should match with mode w/h if none of these features are enabled */
+	if ((!ds_res.enabled && !dnsc_blur_res.enabled && !cstate->cwb_enc_mask)
+			&& ((wb_roi->w && (wb_roi->w != mode->hdisplay))
+				|| (wb_roi->h && (wb_roi->h != mode->vdisplay)))) {
+		SDE_ERROR("invalid wb-roi {%u,%u,%u,%u} mode:%ux%u\n",
+				wb_roi->x, wb_roi->y, wb_roi->w, wb_roi->h,
+				mode->hdisplay, mode->vdisplay);
 		return -EINVAL;
 	}
 
@@ -750,7 +822,9 @@ static int _sde_enc_phys_wb_validate_dnsc_blur_ds(struct drm_crtc_state *crtc_st
 		return 0;
 
 	if (!dnsc_blur_res.src_w || !dnsc_blur_res.src_h
-			|| !dnsc_blur_res.dst_w || !dnsc_blur_res.dst_h) {
+			|| !dnsc_blur_res.dst_w || !dnsc_blur_res.dst_h
+			|| (dnsc_blur_res.src_w < dnsc_blur_res.dst_w)
+			|| (dnsc_blur_res.src_h < dnsc_blur_res.dst_h)) {
 		SDE_ERROR("invalid dnsc_blur cfg src:%ux%u dst:%ux%u\n",
 				dnsc_blur_res.src_w, dnsc_blur_res.src_h,
 				dnsc_blur_res.dst_w, dnsc_blur_res.dst_h);
@@ -775,29 +849,15 @@ static int _sde_enc_phys_wb_validate_dnsc_blur_ds(struct drm_crtc_state *crtc_st
 	} else if (SDE_FORMAT_IS_YUV(fmt)) {
 		SDE_ERROR("YUV output not supported with dnsc_blur\n");
 		return -EINVAL;
+	} else if ((wb_roi->w && (wb_roi->w != dnsc_blur_res.dst_w)) ||
+			(wb_roi->h && (wb_roi->h != dnsc_blur_res.dst_h))) {
+		SDE_ERROR("invalid WB ROI with dnsc_blur, roi:{%d,%d,%d,%d}, dnsc_blur dst:%ux%u\n",
+				wb_roi->x, wb_roi->y, wb_roi->w, wb_roi->h,
+				dnsc_blur_res.dst_w, dnsc_blur_res.dst_h);
+		return -EINVAL;
 	}
 
-	for (i = 0; i < sde_conn_state->dnsc_blur_count; i++) {
-		cfg = &sde_conn_state->dnsc_blur_cfg[i];
-
-		for (j = 0; j < sde_kms->catalog->dnsc_blur_filter_count; j++) {
-			filter_info = &sde_kms->catalog->dnsc_blur_filters[i];
-			if (cfg->flags_h == filter_info->filter) {
-				ret = _sde_enc_phys_wb_validate_dnsc_blur_filter(filter_info,
-						cfg->src_width, cfg->dst_width);
-				if (ret)
-					break;
-			}
-			if (cfg->flags_v == filter_info->filter) {
-				ret = _sde_enc_phys_wb_validate_dnsc_blur_filter(filter_info,
-						cfg->src_height, cfg->dst_height);
-				if (ret)
-					break;
-			}
-		}
-	}
-
-	return ret;
+	return _sde_enc_phys_wb_validate_dnsc_blur_filters(crtc_state, conn_state);
 }
 
 static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
@@ -873,13 +933,6 @@ static int _sde_enc_phys_wb_validate_cwb(struct sde_encoder_phys *phys_enc,
 		return -EINVAL;
 	}
 
-	if (((wb_roi.w < out_width) || (wb_roi.h < out_height)) &&
-			(wb_roi.w * wb_roi.h * fmt->bpp) % 256) {
-		SDE_ERROR("invalid stride w = %d h = %d bpp =%d out_width = %d, out_height = %d\n",
-				wb_roi.w, wb_roi.h, fmt->bpp, out_width, out_height);
-		return -EINVAL;
-	}
-
 	/*
 	 * If output size is equal to input size ensure wb_roi with x and y offset
 	 * will be within buffer. If output size is smaller, only width and height are taken
@@ -1002,7 +1055,7 @@ static int sde_encoder_phys_wb_atomic_check(struct sde_encoder_phys *phys_enc,
 	if (SDE_FORMAT_IS_YUV(fmt) != !!phys_enc->hw_cdm)
 		crtc_state->mode_changed = true;
 
-	rc = _sde_enc_phys_wb_validate_dnsc_blur_ds(crtc_state, conn_state, fmt);
+	rc = _sde_enc_phys_wb_validate_dnsc_blur_ds(crtc_state, conn_state, fmt, &wb_roi);
 	if (rc) {
 		SDE_ERROR("[enc:%d wb:%d] failed dnsc_blur/ds validation; ret:%d\n",
 				DRMID(phys_enc->parent), WBID(wb_enc), rc);
@@ -1052,19 +1105,21 @@ static int sde_encoder_phys_wb_atomic_check(struct sde_encoder_phys *phys_enc,
 	return rc;
 }
 
-static void _sde_encoder_phys_wb_setup_cache(struct sde_encoder_phys_wb *wb_enc,
+static void _sde_encoder_phys_wb_setup_sys_cache(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb)
 {
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_wb_device *wb_dev = wb_enc->wb_dev;
 	struct drm_connector_state *state = wb_dev->connector->state;
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
 	struct sde_crtc *sde_crtc = to_sde_crtc(wb_enc->crtc);
-	struct sde_sc_cfg *sc_cfg = &hw_wb->catalog->sc_cfg[SDE_SYS_CACHE_DISP_WB];
+	struct sde_sc_cfg *sc_cfg;
 	struct sde_hw_wb_sc_cfg *cfg  = &wb_enc->sc_cfg;
-	u32 cache_enable;
+	u32 cache_enable, cache_flag, cache_rd_type, cache_wr_type;
+	int i;
 
-	if (!sc_cfg->has_sys_cache) {
-		SDE_DEBUG("sys cache feature not enabled\n");
+	if (!fb) {
+		SDE_ERROR("invalid fb on wb %d\n", WBID(wb_enc));
 		return;
 	}
 
@@ -1073,6 +1128,31 @@ static void _sde_encoder_phys_wb_setup_cache(struct sde_encoder_phys_wb *wb_enc,
 		return;
 	}
 
+	/*
+	 * - use LLCC_DISP/LLCC_DISP_1 for cwb static display
+	 * - use LLCC_DISP_WB for 2-pass composition using offline-wb
+	 */
+	if (phys_enc->in_clone_mode) {
+		/* toggle system cache SCID between consecutive CWB writes */
+		if (test_bit(SDE_SYS_CACHE_DISP_1, hw_wb->catalog->sde_sys_cache_type_map)
+				&& cfg->type == SDE_SYS_CACHE_DISP) {
+			cache_wr_type = SDE_SYS_CACHE_DISP_1;
+			cache_rd_type = SDE_SYS_CACHE_DISP_1;
+		} else {
+			cache_wr_type = SDE_SYS_CACHE_DISP;
+			cache_rd_type = SDE_SYS_CACHE_DISP;
+		}
+	} else {
+		cache_rd_type = SDE_SYS_CACHE_DISP_WB;
+		cache_wr_type = SDE_SYS_CACHE_DISP_WB;
+	}
+
+	sc_cfg = &hw_wb->catalog->sc_cfg[cache_wr_type];
+	if (!test_bit(cache_wr_type, hw_wb->catalog->sde_sys_cache_type_map)) {
+		SDE_DEBUG("sys cache type %d not enabled\n", cache_wr_type);
+		return;
+	}
+
 	cache_enable = sde_connector_get_property(state, CONNECTOR_PROP_CACHE_STATE);
 
 	if (!cfg->wr_en && !cache_enable)
@@ -1083,19 +1163,121 @@ static void _sde_encoder_phys_wb_setup_cache(struct sde_encoder_phys_wb *wb_enc,
 
 	if (cache_enable) {
 		cfg->wr_scid = sc_cfg->llcc_scid;
-		cfg->type = SDE_SYS_CACHE_DISP_WB;
-		msm_framebuffer_set_cache_hint(fb, MSM_FB_CACHE_WRITE_EN, SDE_SYS_CACHE_DISP_WB);
+		cfg->type = cache_wr_type;
+		cache_flag = MSM_FB_CACHE_WRITE_EN;
 	} else {
 		cfg->wr_scid = 0x0;
 		cfg->type = SDE_SYS_CACHE_NONE;
-		msm_framebuffer_set_cache_hint(fb, MSM_FB_CACHE_NONE, SDE_SYS_CACHE_NONE);
+		cache_flag = MSM_FB_CACHE_NONE;
+		cache_rd_type = SDE_SYS_CACHE_NONE;
+		cache_wr_type = SDE_SYS_CACHE_NONE;
 	}
+	msm_framebuffer_set_cache_hint(fb, cache_flag, cache_rd_type, cache_wr_type);
 
-	sde_crtc->new_perf.llcc_active[SDE_SYS_CACHE_DISP_WB] = cache_enable;
-	sde_core_perf_crtc_update_llcc(wb_enc->crtc);
+	/*
+	 * avoid llcc_active reset for crtc while in clone mode as it will reset it for
+	 * primary display as well
+	 */
+	if (cache_enable) {
+		sde_crtc->new_perf.llcc_active[cache_wr_type] = true;
+		sde_crtc->new_perf.llcc_active[cache_rd_type] = true;
+		sde_core_perf_crtc_update_llcc(wb_enc->crtc);
+	} else if (!phys_enc->in_clone_mode) {
+		for (i = 0; i < SDE_SYS_CACHE_MAX; i++)
+			sde_crtc->new_perf.llcc_active[i] = false;
+		sde_core_perf_crtc_update_llcc(wb_enc->crtc);
+	}
 
 	hw_wb->ops.setup_sys_cache(hw_wb, cfg);
-	SDE_EVT32(WBID(wb_enc), cfg->wr_scid, cfg->flags, cfg->type, cache_enable);
+	SDE_EVT32(WBID(wb_enc), cfg->wr_scid, cfg->flags, cfg->type, cache_enable,
+			phys_enc->in_clone_mode, cache_flag, cache_rd_type,
+			cache_wr_type, fb->base.id);
+}
+
+static void _sde_encoder_phys_wb_update_cwb_flush_helper(
+		struct sde_encoder_phys *phys_enc, bool enable)
+{
+	struct sde_connector *c_conn = NULL;
+	struct sde_connector_state *c_state = NULL;
+	struct sde_hw_wb *hw_wb;
+	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_pingpong *hw_pp;
+	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	struct sde_crtc_state *crtc_state;
+	struct sde_crtc *crtc;
+	int i = 0;
+	int cwb_capture_mode = 0;
+	bool need_merge = false;
+	bool dspp_out = false;
+	enum sde_cwb cwb_idx = 0;
+	enum sde_cwb src_pp_idx = 0;
+	enum sde_dcwb dcwb_idx = 0;
+	size_t dither_sz = 0;
+	void *dither_cfg = NULL;
+
+	/* In CWB mode, program actual source master sde_hw_ctl from crtc */
+	crtc = to_sde_crtc(wb_enc->crtc);
+	hw_ctl = crtc->mixers[0].hw_ctl;
+	hw_pp = phys_enc->hw_pp;
+	hw_wb = wb_enc->hw_wb;
+	if (!hw_ctl || !hw_wb || !hw_pp) {
+		SDE_ERROR("[enc:%d wb:%d] HW resource not available for CWB\n",
+				DRMID(phys_enc->parent), WBID(wb_enc));
+		return;
+	}
+
+	crtc_state = to_sde_crtc_state(wb_enc->crtc->state);
+	cwb_capture_mode = sde_crtc_get_property(crtc_state, CRTC_PROP_CAPTURE_OUTPUT);
+	need_merge = (crtc->num_mixers > 1) ? true : false;
+	dspp_out = (cwb_capture_mode == CAPTURE_DSPP_OUT);
+	cwb_idx = (enum sde_cwb)hw_pp->idx;
+	src_pp_idx = (enum sde_cwb)crtc->mixers[0].hw_lm->idx;
+
+	if (test_bit(SDE_WB_CWB_DITHER_CTRL, &hw_wb->caps->features)) {
+		if (cwb_capture_mode) {
+			c_conn = to_sde_connector(phys_enc->connector);
+			c_state = to_sde_connector_state(phys_enc->connector->state);
+			dither_cfg = msm_property_get_blob(&c_conn->property_info,
+					&c_state->property_state, &dither_sz,
+					CONNECTOR_PROP_PP_CWB_DITHER);
+			SDE_DEBUG("Read cwb dither setting from blob %pK\n", dither_cfg);
+		} else {
+			/* disable case: tap is lm */
+			dither_cfg = NULL;
+		}
+	}
+
+	for (i = 0; i < crtc->num_mixers; i++) {
+		src_pp_idx = (enum sde_cwb) (src_pp_idx + i);
+
+		if (test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features)) {
+			dcwb_idx = (enum sde_dcwb) ((hw_pp->idx % 2) + i);
+			if ((test_bit(SDE_WB_CWB_DITHER_CTRL, &hw_wb->caps->features)) &&
+				hw_wb->ops.program_cwb_dither_ctrl){
+				hw_wb->ops.program_cwb_dither_ctrl(hw_wb,
+					dcwb_idx, dither_cfg, dither_sz, enable);
+			}
+			if (hw_wb->ops.program_dcwb_ctrl)
+				hw_wb->ops.program_dcwb_ctrl(hw_wb, dcwb_idx,
+					src_pp_idx, cwb_capture_mode, enable);
+			if (hw_ctl->ops.update_bitmask)
+				hw_ctl->ops.update_bitmask(hw_ctl,
+					SDE_HW_FLUSH_CWB, dcwb_idx, 1);
+
+		} else if (test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) {
+			cwb_idx = (enum sde_cwb) (hw_pp->idx + i);
+			if (hw_wb->ops.program_cwb_ctrl)
+				hw_wb->ops.program_cwb_ctrl(hw_wb, cwb_idx,
+					src_pp_idx, dspp_out, enable);
+			if (hw_ctl->ops.update_bitmask)
+				hw_ctl->ops.update_bitmask(hw_ctl,
+					SDE_HW_FLUSH_CWB, cwb_idx, 1);
+		}
+	}
+
+	if (need_merge && hw_ctl->ops.update_bitmask && hw_pp && hw_pp->merge_3d)
+		hw_ctl->ops.update_bitmask(hw_ctl, SDE_HW_FLUSH_MERGE_3D,
+				hw_pp->merge_3d->idx, 1);
 }
 
 static void _sde_encoder_phys_wb_update_cwb_flush(struct sde_encoder_phys *phys_enc, bool enable)
@@ -1108,15 +1290,11 @@ static void _sde_encoder_phys_wb_update_cwb_flush(struct sde_encoder_phys *phys_
 	struct sde_hw_dnsc_blur *hw_dnsc_blur;
 	struct sde_crtc *crtc;
 	struct sde_crtc_state *crtc_state;
-	int i = 0, cwb_capture_mode = 0;
+	int cwb_capture_mode = 0;
 	enum sde_cwb cwb_idx = 0;
 	enum sde_dcwb dcwb_idx = 0;
 	enum sde_cwb src_pp_idx = 0;
 	bool dspp_out = false, need_merge = false;
-	struct sde_connector *c_conn = NULL;
-	struct sde_connector_state *c_state = NULL;
-	void *dither_cfg = NULL;
-	size_t dither_sz = 0;
 
 	if (!phys_enc->in_clone_mode) {
 		SDE_DEBUG("enc:%d, wb:%d - not in CWB mode. early return\n",
@@ -1149,7 +1327,7 @@ static void _sde_encoder_phys_wb_update_cwb_flush(struct sde_encoder_phys *phys_
 	need_merge = (crtc->num_mixers > 1) ? true : false;
 
 	if (test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features)) {
-		dcwb_idx = (enum sde_dcwb) ((hw_pp->idx % 2) + i);
+		dcwb_idx = hw_pp->dcwb_idx;
 		if ((dcwb_idx + crtc->num_mixers) > DCWB_MAX) {
 			SDE_ERROR("[enc:%d, wb:%d] invalid DCWB config; dcwb=%d, num_lm=%d\n",
 				DRMID(phys_enc->parent), WBID(wb_enc), dcwb_idx, crtc->num_mixers);
@@ -1175,51 +1353,7 @@ static void _sde_encoder_phys_wb_update_cwb_flush(struct sde_encoder_phys *phys_
 
 	if (test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features) ||
 			test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features)) {
-		if (test_bit(SDE_WB_CWB_DITHER_CTRL, &hw_wb->caps->features)) {
-			if (cwb_capture_mode) {
-				c_conn = to_sde_connector(phys_enc->connector);
-				c_state = to_sde_connector_state(phys_enc->connector->state);
-				dither_cfg = msm_property_get_blob(&c_conn->property_info,
-						&c_state->property_state, &dither_sz,
-						CONNECTOR_PROP_PP_CWB_DITHER);
-				SDE_DEBUG("Read cwb dither setting from blob %pK\n", dither_cfg);
-			} else {
-				/* disable case: tap is lm */
-				dither_cfg = NULL;
-			}
-		}
-
-		for (i = 0; i < crtc->num_mixers; i++) {
-			src_pp_idx = (enum sde_cwb) (src_pp_idx + i);
-
-			if (test_bit(SDE_WB_DCWB_CTRL, &hw_wb->caps->features)) {
-				dcwb_idx = (enum sde_dcwb) ((hw_pp->idx % 2) + i);
-				if (test_bit(SDE_WB_CWB_DITHER_CTRL, &hw_wb->caps->features)) {
-					if (hw_wb->ops.program_cwb_dither_ctrl)
-						hw_wb->ops.program_cwb_dither_ctrl(hw_wb,
-							dcwb_idx, dither_cfg, dither_sz, enable);
-				}
-				if (hw_wb->ops.program_dcwb_ctrl)
-					hw_wb->ops.program_dcwb_ctrl(hw_wb, dcwb_idx,
-						src_pp_idx, cwb_capture_mode, enable);
-				if (hw_ctl->ops.update_bitmask)
-					hw_ctl->ops.update_bitmask(hw_ctl,
-						SDE_HW_FLUSH_CWB, dcwb_idx, 1);
-
-			} else if (test_bit(SDE_WB_CWB_CTRL, &hw_wb->caps->features)) {
-				cwb_idx = (enum sde_cwb) (hw_pp->idx + i);
-				if (hw_wb->ops.program_cwb_ctrl)
-					hw_wb->ops.program_cwb_ctrl(hw_wb, cwb_idx,
-						src_pp_idx, dspp_out, enable);
-				if (hw_ctl->ops.update_bitmask)
-					hw_ctl->ops.update_bitmask(hw_ctl,
-						SDE_HW_FLUSH_CWB, cwb_idx, 1);
-			}
-		}
-
-		if (need_merge && hw_ctl->ops.update_bitmask && hw_pp && hw_pp->merge_3d)
-			hw_ctl->ops.update_bitmask(hw_ctl, SDE_HW_FLUSH_MERGE_3D,
-					hw_pp->merge_3d->idx, 1);
+		_sde_encoder_phys_wb_update_cwb_flush_helper(phys_enc, enable);
 	} else {
 		phys_enc->hw_mdptop->ops.set_cwb_ppb_cntl(phys_enc->hw_mdptop,
 				need_merge, dspp_out);
@@ -1303,6 +1437,13 @@ static void _sde_encoder_phys_wb_setup_dnsc_blur(struct sde_encoder_phys *phys_e
 	sde_conn = to_sde_connector(wb_dev->connector);
 	sde_conn_state = to_sde_connector_state(wb_dev->connector->state);
 
+	if (sde_conn_state->dnsc_blur_count && !hw_dnsc_blur) {
+		SDE_ERROR("[enc:%d wb:%d] invalid config - dnsc_blur block not reserved\n",
+			DRMID(phys_enc->parent), WBID(wb_enc));
+		sde_kms->catalog->dnsc_blur_count = 0;
+		return;
+	}
+
 	/* swap between 0 & 1 lut idx on each config change for gaussian lut */
 	sde_conn_state->dnsc_blur_lut = 1 - sde_conn_state->dnsc_blur_lut;
 
@@ -1316,7 +1457,8 @@ static void _sde_encoder_phys_wb_setup_dnsc_blur(struct sde_encoder_phys *phys_e
 			hw_dnsc_blur->ops.setup_dither(hw_dnsc_blur, cfg);
 
 		if (hw_dnsc_blur->ops.bind_pingpong_blk)
-			hw_dnsc_blur->ops.bind_pingpong_blk(hw_dnsc_blur, enable, hw_pp->idx);
+			hw_dnsc_blur->ops.bind_pingpong_blk(hw_dnsc_blur, enable, hw_pp->idx,
+					phys_enc->in_clone_mode);
 
 		SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), sde_conn_state->dnsc_blur_count,
 				cfg->flags, cfg->flags_h, cfg->flags_v, cfg->src_width,
@@ -1418,7 +1560,7 @@ static void sde_encoder_phys_wb_setup(struct sde_encoder_phys *phys_enc)
 
 	_sde_encoder_phys_wb_setup_ctl(phys_enc, wb_enc->wb_fmt);
 
-	_sde_encoder_phys_wb_setup_cache(wb_enc, fb);
+	_sde_encoder_phys_wb_setup_sys_cache(phys_enc, fb);
 
 	_sde_encoder_phys_wb_setup_cwb(phys_enc, true);
 
@@ -1617,8 +1759,10 @@ static void sde_encoder_phys_wb_irq_ctrl(struct sde_encoder_phys *phys, bool ena
  * @mode:	Pointer to requested display mode
  * @adj_mode:	Pointer to adjusted display mode
  */
-static void sde_encoder_phys_wb_mode_set(struct sde_encoder_phys *phys_enc,
-		struct drm_display_mode *mode, struct drm_display_mode *adj_mode)
+static void sde_encoder_phys_wb_mode_set(
+		struct sde_encoder_phys *phys_enc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adj_mode, bool *reinit_mixers)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_rm *rm = &phys_enc->sde_kms->rm;
@@ -1640,8 +1784,13 @@ static void sde_encoder_phys_wb_mode_set(struct sde_encoder_phys *phys_enc,
 	sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
 	for (i = 0; i <= instance; i++) {
 		sde_rm_get_hw(rm, &iter);
-		if (i == instance)
+		if (i == instance) {
+			if (phys_enc->hw_ctl && phys_enc->hw_ctl != to_sde_hw_ctl(iter.hw)) {
+				*reinit_mixers =  true;
+				SDE_EVT32(phys_enc->hw_ctl->idx, to_sde_hw_ctl(iter.hw)->idx);
+			}
 			phys_enc->hw_ctl = to_sde_hw_ctl(iter.hw);
+		}
 	}
 
 	if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
@@ -1889,7 +2038,8 @@ static int sde_encoder_phys_wb_prepare_for_kickoff(struct sde_encoder_phys *phys
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	int ret = 0;
 
-	phys_enc->frame_trigger_mode = params->frame_trigger_mode;
+	phys_enc->frame_trigger_mode = params ?
+		params->frame_trigger_mode : FRAME_DONE_WAIT_DEFAULT;
 	if (!phys_enc->in_clone_mode && (phys_enc->frame_trigger_mode == FRAME_DONE_WAIT_DEFAULT)
 			&& (atomic_read(&phys_enc->pending_kickoff_cnt))) {
 		ret = _sde_encoder_phys_wb_wait_for_idle(phys_enc, true);
@@ -2108,6 +2258,7 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
 	struct sde_crtc *sde_crtc = to_sde_crtc(wb_enc->crtc);
+	struct sde_hw_wb_sc_cfg cfg = { 0 };
 	int i;
 
 	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
@@ -2129,13 +2280,18 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
 
 	/* reset system cache properties */
 	if (wb_enc->sc_cfg.wr_en) {
-		memset(&wb_enc->sc_cfg, 0, sizeof(struct sde_hw_wb_sc_cfg));
 		if (hw_wb->ops.setup_sys_cache)
-			hw_wb->ops.setup_sys_cache(hw_wb, &wb_enc->sc_cfg);
+			hw_wb->ops.setup_sys_cache(hw_wb, &cfg);
 
-		for (i = 0; i < SDE_SYS_CACHE_MAX; i++)
-			sde_crtc->new_perf.llcc_active[i] = 0;
-		sde_core_perf_crtc_update_llcc(wb_enc->crtc);
+		/*
+		 * avoid llcc_active reset for crtc while in clone mode as it will reset it for
+		 * primary display as well
+		 */
+		if (!phys_enc->in_clone_mode) {
+			for (i = 0; i < SDE_SYS_CACHE_MAX; i++)
+				sde_crtc->new_perf.llcc_active[i] = 0;
+			sde_core_perf_crtc_update_llcc(wb_enc->crtc);
+		}
 	}
 
 	if (phys_enc->in_clone_mode) {
@@ -2222,7 +2378,7 @@ static void sde_encoder_phys_wb_get_hw_resources(struct sde_encoder_phys *phys_e
 		WBID(wb_enc), hw_res->wbs[hw_wb->idx - WB_0], hw_res->needs_cdm);
 }
 
-#ifdef CONFIG_DEBUG_FS
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 /**
  * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
  * @phys_enc:		Pointer to physical encoder
@@ -2246,7 +2402,7 @@ static int sde_encoder_phys_wb_init_debugfs(
 {
 	return 0;
 }
-#endif
+#endif /* CONFIG_DEBUG_FS */
 
 static int sde_encoder_phys_wb_late_register(struct sde_encoder_phys *phys_enc,
 		struct dentry *debugfs_root)

+ 559 - 42
msm/sde/sde_fence.c

@@ -1,17 +1,515 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/sync_file.h>
 #include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include "sde_fence.h"
 
 #define TIMELINE_VAL_LENGTH		128
 #define SPEC_FENCE_FLAG_FENCE_ARRAY	0x10
+#define SPEC_FENCE_FLAG_ARRAY_BIND	0x11
+
+/**
+ * struct sde_fence - release/retire fence structure
+ * @base: base fence structure
+ * @ctx: fence context
+ * @name: name of each fence- it is fence timeline + commit_count
+ * @fence_list: list to associated this fence on timeline/context
+ * @fd: fd attached to this fence - debugging purpose.
+ * @hwfence_out_ctl: hw ctl for the output fence
+ * @hwfence_index: hw fence index for this fence
+ * @txq_updated_fence: flag to indicate that a fence has been updated in txq
+ */
+struct sde_fence {
+	struct dma_fence base;
+	struct sde_fence_context *ctx;
+	char name[SDE_FENCE_NAME_SIZE];
+	struct list_head fence_list;
+	int fd;
+	struct sde_hw_ctl *hwfence_out_ctl;
+	u64 hwfence_index;
+	bool txq_updated_fence;
+};
+
+/**
+ * enum sde_hw_fence_clients - sde clients for the hw-fence feature
+ *
+ * Do not modify the order of this struct and/or add more elements
+ * without modify/add fields in the 'hw_fence_data' structs.
+ */
+enum sde_hw_fence_clients {
+	SDE_HW_FENCE_CLIENT_CTL_0,
+	SDE_HW_FENCE_CLIENT_CTL_1,
+	SDE_HW_FENCE_CLIENT_CTL_2,
+	SDE_HW_FENCE_CLIENT_CTL_3,
+	SDE_HW_FENCE_CLIENT_CTL_4,
+	SDE_HW_FENCE_CLIENT_CTL_5,
+	SDE_HW_FENCE_CLIENT_MAX,
+};
+
+/**
+ * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
+ *              clients to communicate with the fence controller.
+ * This struct must match the order of the 'sde_hw_fence_clients' enum,
+ * the output signal must match with the signals that FenceCTL expects for each display client.
+ * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
+ */
+struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
+	{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 14, {2, 3},   0, 8, 8},
+	{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 15, {4, 5},   0, 8, 8},
+	{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 16, {6, 7},   0, 8, 8},
+	{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 17, {8, 9},   0, 8, 8},
+	{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 18, {10, 11}, 0, 8, 8},
+	{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 19, {12, 13}, 0, 8, 8}
+};
+
+/**
+ * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
+ *              clients to communicate with the fence controller.
+ * This struct must match the order of the 'sde_hw_fence_clients' enum,
+ * the output signal must match with the signals that FenceCTL expects for each display client.
+ * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
+ */
+struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
+	{SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 0, {0, 6},  0, 8, 25},
+	{SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 1, {1, 7},  0, 8, 25},
+	{SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 2, {2, 8},  0, 8, 25},
+	{SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 3, {3, 9},  0, 8, 25},
+	{SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 4, {4, 10}, 0, 8, 25},
+	{SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 5, {5, 11}, 0, 8, 25}
+};
+
+int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc)
+{
+	struct sde_hw_fence_data *sde_hw_fence_data;
+	struct sde_hw_fence_data *hwfence_data;
+	int ctl_id;
+
+	if (!hw_ctl)
+		return -EINVAL;
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
+		SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
+		return -EINVAL;
+	}
+
+	hwfence_data = &hw_ctl->hwfence_data;
+	sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
+
+	if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
+		SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
+			sde_hw_fence_data[ctl_id].client_id, ctl_id);
+		return -EINVAL;
+	}
+
+	/* init the default fence-data for this client */
+	memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
+
+	SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
+	hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
+		&hwfence_data->mem_descriptor);
+	if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
+
+		hwfence_data->hw_fence_handle = NULL;
+
+		SDE_ERROR("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
+			hwfence_data->hw_fence_client_id);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("hwfence registered ctl_id:%d hw_fence_client_id:%d handle:0x%p\n",
+		ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle);
+
+	return 0;
+}
+
+void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
+{
+	struct sde_hw_fence_data *hwfence_data;
+
+	if (!hw_ctl)
+		return;
+
+	hwfence_data = &hw_ctl->hwfence_data;
+
+	/* client was not registered */
+	if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
+		return;
+
+	SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
+		hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
+
+	msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
+
+	hwfence_data->hw_fence_handle = NULL;
+}
+
+static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
+{
+	struct sde_hw_fence_data *data;
+	struct msm_hw_fence_create_params params;
+	int ctl_id;
+	u64 hwfence_index;
+	int ret;
+
+	if (!hw_ctl)
+		return -EINVAL;
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	data = &hw_ctl->hwfence_data;
+
+	if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
+		SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
+		return -EINVAL;
+	}
+	params.fence = &sde_fence->base;
+	params.handle = &hwfence_index;
+
+	/* Create the HW fence */
+	ret = msm_hw_fence_create(data->hw_fence_handle, &params);
+	if (ret) {
+		SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
+			sde_fence->base.context, sde_fence->base.seqno);
+	} else {
+		/* store ctl and index for this fence */
+		sde_fence->hwfence_out_ctl = hw_ctl;
+		sde_fence->hwfence_index = hwfence_index;
+
+		SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
+			sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
+			sde_fence->base.seqno, sde_fence->name);
+	}
+
+	return ret;
+}
+
+static inline char *_get_client_id_name(int hw_fence_client_id)
+{
+	switch (hw_fence_client_id) {
+	case HW_FENCE_CLIENT_ID_CTX0:
+		return "HW_FENCE_CLIENT_ID_CTX0";
+	case HW_FENCE_CLIENT_ID_CTL0:
+		return "HW_FENCE_CLIENT_ID_CTL0";
+	case HW_FENCE_CLIENT_ID_CTL1:
+		return "HW_FENCE_CLIENT_ID_CTL1";
+	case HW_FENCE_CLIENT_ID_CTL2:
+		return "HW_FENCE_CLIENT_ID_CTL2";
+	case HW_FENCE_CLIENT_ID_CTL3:
+		return "HW_FENCE_CLIENT_ID_CTL3";
+	case HW_FENCE_CLIENT_ID_CTL4:
+		return "HW_FENCE_CLIENT_ID_CTL4";
+	case HW_FENCE_CLIENT_ID_CTL5:
+		return "HW_FENCE_CLIENT_ID_CTL15";
+	default:
+		return "Unknown";
+	}
+
+	return "unknown";
+}
+
+int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
+	u32 num_fences)
+{
+	struct sde_hw_fence_data *data;
+	int i, ret;
+	int ctl_id;
+
+	if (!hw_ctl) {
+		SDE_ERROR("wrong ctl\n");
+		return -EINVAL;
+	}
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	data = &hw_ctl->hwfence_data;
+	if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
+		SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
+		num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
+
+	for (i = 0; i < num_fences; i++) {
+		SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
+			(fences[i])->context, (fences[i])->seqno);
+	}
+
+	/* register for wait */
+	ret = msm_hw_fence_wait_update(data->hw_fence_handle, fences, num_fences, true);
+	if (ret)
+		SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
+	SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
+
+	return ret;
+}
+
+static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, u32 line_count, u32 debugfs_hw_fence)
+{
+	struct sde_hw_fence_data *data;
+	u32 ipcc_out_signal;
+	int ctl_id;
+
+	if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
+			!hw_ctl->ops.hw_fence_update_output_fence) {
+		SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
+		return -EINVAL;
+	}
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	data = &hw_ctl->hwfence_data;
+	if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
+		/* This should not have happened!, review the ping pong calculation */
+		SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
+			MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
+		return -EINVAL;
+	}
+
+	ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
+	data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
+		MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
+
+	SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
+		ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
+
+	if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
+			hw_ctl->ops.hw_fence_output_timestamp_ctrl)
+		hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
+
+	/* update client/signal output fence */
+	hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
+	SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
+
+	/* arm dpu to trigger output fence signal once ready */
+	if (line_count)
+		hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
+			HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
+	else
+		hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CTRL_DONE);
+
+	return 0;
+}
+
+static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, u32 line_count,
+		u32 debugfs_hw_fence)
+{
+	struct sde_hw_ctl *hw_ctl = NULL;
+	struct sde_fence *fc, *next;
+
+	spin_lock(&ctx->list_lock);
+	if (list_empty(&ctx->fence_list_head)) {
+		spin_unlock(&ctx->list_lock);
+		return 0;
+	}
+
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
+		struct dma_fence *fence = &fc->base;
+
+		/* this is not hw-fence, or already processed */
+		if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
+			continue;
+
+		hw_ctl = fc->hwfence_out_ctl;
+		if (!hw_ctl) {
+			/*
+			 * We flaged an output dma-fence as hw-fence but the hw ctl to handle
+			 * it is not available, this should not have happened, but if it does,
+			 * this can translate to a fence-timeout!
+			 */
+			SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
+			SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
+				fence->context, fence->seqno);
+
+			spin_unlock(&ctx->list_lock);
+			return -EINVAL;
+		}
+	}
+	spin_unlock(&ctx->list_lock);
+
+	/* arm dpu to trigger output hw-fence ipcc signal upon completion */
+	if (hw_ctl)
+		_arm_output_hw_fence(hw_ctl, line_count, debugfs_hw_fence);
+
+	return 0;
+}
+
+/* update output hw_fences txq */
+int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
+		u32 debugfs_hw_fence)
+{
+	int ret = 0;
+	struct sde_hw_fence_data *data;
+	struct sde_fence *fc, *next;
+	struct sde_hw_ctl *hw_ctl = NULL;
+	int ctl_id;
+	bool txq_updated = false;
+
+	spin_lock(&ctx->list_lock);
+	if (list_empty(&ctx->fence_list_head)) {
+		spin_unlock(&ctx->list_lock);
+		return 0;
+	}
+
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
+		struct dma_fence *fence = &fc->base;
+
+		/* this is not hw-fence, or already processed */
+		if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
+				fc->txq_updated_fence)
+			continue;
+
+		hw_ctl = fc->hwfence_out_ctl;
+		if (!hw_ctl) {
+			/* We flaged an output dma-fence as hw-fence but the hw ctl to handle
+			 * it is not available, this should not have happened, but if it does,
+			 * this can translate to a fence-timeout!
+			 */
+			SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
+			SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
+				fence->seqno, SDE_EVTLOG_ERROR);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		ctl_id = hw_ctl->idx - CTL_0;
+		data = &hw_ctl->hwfence_data;
+		if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
+			SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
+				ctl_id);
+			SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
+				fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		/* update hw-fence tx queue */
+		ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
+		if (ret) {
+			SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
+				fc->hwfence_index, fence->context, fence->seqno,
+				data->hw_fence_client_id);
+			SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
+				fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
+			goto exit;
+		}
+		/* avoid updating txq more than once and avoid repeating the same fence twice */
+		txq_updated = fc->txq_updated_fence = true;
+
+		SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
+			fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
+
+		/* We will update TxQ one time per frame */
+		if (txq_updated)
+			break;
+	}
+
+exit:
+	spin_unlock(&ctx->list_lock);
+
+	/* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
+	if ((txq_updated && hw_ctl) || line_count)
+		_sde_fence_arm_output_hw_fence(ctx, line_count, debugfs_hw_fence);
+
+	return ret;
+}
+
+static void _sde_hw_fence_release(struct sde_fence *f)
+{
+	struct sde_hw_fence_data *data;
+	struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
+	int ctl_id;
+	int ret;
+
+	if (!hw_ctl) {
+		SDE_ERROR("invalid hw_ctl\n");
+		return;
+	}
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	data = &hw_ctl->hwfence_data;
+	if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
+		SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
+		return;
+	}
+
+	SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
+		ctl_id, f->base.context, f->base.seqno, f->name);
+
+	/* Delete the HW fence */
+	ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
+	if (ret)
+		SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
+			f->base.context, f->base.seqno);
+}
+
+static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
+{
+	struct sde_hw_fence_data *data;
+	int ret = 0;
+
+	data = &hw_ctl->hwfence_data;
+
+	if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
+		SDE_EVT32(data->hw_fence_client_id);
+		ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
+		if (ret) {
+			pr_err("failed to reset client %d\n", data->hw_fence_client_id);
+			return -EINVAL;
+		}
+	}
+
+	return ret;
+}
+
+int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
+		struct sde_hw_mdp *hw_mdp, bool disable)
+{
+	struct sde_hw_fence_data *data;
+	u32 ipcc_signal_id;
+	u32 ipcc_client_id;
+	int ctl_id;
+
+	/* we must support sw_override as well, so check both functions */
+	if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
+			!hw_ctl->ops.hw_fence_trigger_sw_override) {
+		SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
+		return -EINVAL;
+	}
+
+	ctl_id = hw_ctl->idx - CTL_0;
+	data = &hw_ctl->hwfence_data;
+
+	if (disable) {
+		hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
+		return -EPERM;
+	}
+
+	if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
+			&& hw_mdp->ops.hw_fence_input_timestamp_ctrl)
+		hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
+
+	ipcc_signal_id = data->ipcc_in_signal;
+	ipcc_client_id = data->ipcc_in_client;
+
+	SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
+		ipcc_client_id, ctl_id);
+	SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id);
+
+	/* configure dpu hw for the client/signal pair signaling input-fence */
+	hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
+
+	/* Enable hw-fence for this ctrl-path */
+	hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
+
+	return 0;
+}
 
 void *sde_sync_get(uint64_t fd)
 {
@@ -25,11 +523,47 @@ void sde_sync_put(void *fence)
 		dma_fence_put(fence);
 }
 
+void sde_fence_dump(struct dma_fence *fence)
+{
+	char timeline_str[TIMELINE_VAL_LENGTH];
+
+	if (fence->ops->timeline_value_str)
+		fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
+
+	SDE_ERROR(
+		"fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
+		fence->ops->get_driver_name(fence),
+		fence->ops->get_timeline_name(fence),
+		fence->seqno, timeline_str,
+		fence->ops->signaled ?
+		fence->ops->signaled(fence) : 0xffffffff,
+		dma_fence_get_status(fence), fence->flags);
+}
+
+static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
+{
+	struct dma_fence_array *array;
+	struct dma_fence *user_fence;
+	int i;
+
+	array = container_of(base_fence, struct dma_fence_array, base);
+	if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
+		test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
+		for (i = 0; i < array->num_fences; i++) {
+			user_fence = array->fences[i];
+			if (user_fence) {
+				dma_fence_get(user_fence);
+				sde_fence_dump(user_fence);
+				dma_fence_put(user_fence);
+			}
+		}
+	}
+}
+
 signed long sde_sync_wait(void *fnc, long timeout_ms)
 {
 	struct dma_fence *fence = fnc;
 	int rc, status = 0;
-	char timeline_str[TIMELINE_VAL_LENGTH];
 
 	if (!fence)
 		return -EINVAL;
@@ -39,10 +573,6 @@ signed long sde_sync_wait(void *fnc, long timeout_ms)
 	rc = dma_fence_wait_timeout(fence, true,
 				msecs_to_jiffies(timeout_ms));
 	if (!rc || (rc == -EINVAL) || fence->error) {
-		if (fence->ops->timeline_value_str)
-			fence->ops->timeline_value_str(fence,
-					timeline_str, TIMELINE_VAL_LENGTH);
-
 		status = dma_fence_get_status(fence);
 		if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
 			if (status == -EINVAL) {
@@ -51,23 +581,11 @@ signed long sde_sync_wait(void *fnc, long timeout_ms)
 			} else if (fence->ops->signaled && fence->ops->signaled(fence)) {
 				SDE_INFO("spec fence status:%d\n", status);
 			} else {
-				SDE_ERROR(
-					"fence driver name:%s timeline name:%s signaled:0x%x status:%d flags:0x%x rc:%d\n",
-					fence->ops->get_driver_name(fence),
-					fence->ops->get_timeline_name(fence),
-					fence->ops->signaled ?
-					fence->ops->signaled(fence) : 0xffffffff,
-					status, fence->flags, rc);
+				sde_fence_dump(fence);
+				sde_fence_dump_user_fds_info(fence);
 			}
 		} else {
-			SDE_ERROR(
-				"fence driver name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d\n",
-				fence->ops->get_driver_name(fence),
-				fence->ops->get_timeline_name(fence),
-				fence->seqno, timeline_str,
-				fence->ops->signaled ?
-				fence->ops->signaled(fence) : 0xffffffff,
-				status);
+			sde_fence_dump(fence);
 		}
 	}
 
@@ -94,21 +612,6 @@ uint32_t sde_sync_get_name_prefix(void *fence)
 	return prefix;
 }
 
-/**
- * struct sde_fence - release/retire fence structure
- * @fence: base fence structure
- * @name: name of each fence- it is fence timeline + commit_count
- * @fence_list: list to associated this fence on timeline/context
- * @fd: fd attached to this fence - debugging purpose.
- */
-struct sde_fence {
-	struct dma_fence base;
-	struct sde_fence_context *ctx;
-	char name[SDE_FENCE_NAME_SIZE];
-	struct list_head	fence_list;
-	int fd;
-};
-
 static void sde_fence_destroy(struct kref *kref)
 {
 	struct sde_fence_context *ctx;
@@ -163,6 +666,11 @@ static void sde_fence_release(struct dma_fence *fence)
 
 	if (fence) {
 		f = to_sde_fence(fence);
+
+		/* Delete the HW fence */
+		if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
+			_sde_hw_fence_release(f);
+
 		kref_put(&f->ctx->kref, sde_fence_destroy);
 		kfree(f);
 	}
@@ -205,7 +713,7 @@ static struct dma_fence_ops sde_fence_ops = {
  * @val: Timeline value at which to signal the fence
  * Return: File descriptor on success, or error code on error
  */
-static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
+static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
 {
 	struct sde_fence *sde_fence;
 	struct sync_file *sync_file;
@@ -247,6 +755,10 @@ static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
 		goto exit;
 	}
 
+	/* If ctl_id is valid, try to create a hw-fence */
+	if (hw_ctl)
+		sde_fence_create_hw_fence(hw_ctl, sde_fence);
+
 	fd_install(fd, sync_file->file);
 	sde_fence->fd = fd;
 
@@ -343,7 +855,7 @@ end:
 }
 
 int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
-							uint32_t offset)
+				uint32_t offset, struct sde_hw_ctl *hw_ctl)
 {
 	uint32_t trigger_value;
 	int fd, rc = -EINVAL;
@@ -367,19 +879,19 @@ int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
 	trigger_value = ctx->commit_count + offset;
 	spin_unlock_irqrestore(&ctx->lock, flags);
 
-	fd = _sde_fence_create_fd(ctx, trigger_value);
+	fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
 	*val = fd;
 	SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
 			fd, trigger_value, ctx->commit_count, offset);
 
-	SDE_EVT32(ctx->drm_id, trigger_value, fd);
+	SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
 	rc = (fd >= 0) ? 0 : fd;
 
 	return rc;
 }
 
 void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
-		enum sde_fence_event fence_event)
+		enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
 {
 	unsigned long flags;
 
@@ -390,10 +902,15 @@ void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
 
 	spin_lock_irqsave(&ctx->lock, flags);
 	if (fence_event == SDE_FENCE_RESET_TIMELINE) {
+		/* reset hw-fences without error */
+		if (hw_ctl)
+			_reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
+				MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
+
 		if ((int)(ctx->done_count - ctx->commit_count) < 0) {
 			SDE_DEBUG(
-			  "timeline reset attempt! done count:%d commit:%d\n",
-				ctx->done_count, ctx->commit_count);
+			  "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
+				ctx->drm_id, ctx->done_count, ctx->commit_count);
 			ctx->done_count = ctx->commit_count;
 			SDE_EVT32(ctx->drm_id, ctx->done_count,
 				ctx->commit_count, ktime_to_us(ts),

+ 98 - 2
msm/sde/sde_fence.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  */
 
@@ -9,13 +10,21 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/soc/qcom/msm_hw_fence.h>
 
 #ifndef CHAR_BIT
 #define CHAR_BIT 8 /* define this if limits.h not available */
 #endif
 
+#define HW_FENCE_TRIGGER_SEL_CTRL_DONE       0x0
+#define HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT 0x1
+
+#define SDE_INPUT_HW_FENCE_TIMESTAMP         BIT(0)
+#define SDE_OUTPUT_HW_FENCE_TIMESTAMP        BIT(1)
+
 #define SDE_FENCE_NAME_SIZE	24
 
+#define MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG 2
 /**
  * struct sde_fence_context - release/retire fence context/timeline structure
  * @commit_count: Number of detected commits since bootup
@@ -52,6 +61,32 @@ enum sde_fence_event {
 	SDE_FENCE_SIGNAL_ERROR
 };
 
+/**
+ * struct sde_hw_fence_data - contains the information of each display-client of the hw-fences
+ *                       to communicate with the fence controller.
+ * @client_id: client_id enum for the display driver.
+ * @hw_fence_client_id: client_id enum for the hw-fence driver.
+ * @mem_descriptor: memory descriptor with the hfi for the rx/tx queues mapping.
+ * @ipcc_in_client: ipcc client triggering the signal: IN_CLIENT (APPS) -> DPU
+ * @ipcc_in_signal: ipcc signal triggered from client to dpu: IN_SIGNAL (APPS) -> DPU
+ * @ipcc_out_signal_pp: output signal from dpu to fctl, ping-pongs between two signals
+ * @ipcc_out_signal_pp_idx: index of the output signal ping-pong
+ * @ipcc_out_client: destination client id (APPS for the FCTL)
+ * @ipcc_this_client: ipcc dpu client id (For Waipio: APPS, For Kailua: DPU HW)
+ */
+struct sde_hw_fence_data {
+	int client_id;
+	enum hw_fence_client_id hw_fence_client_id;
+	void *hw_fence_handle;
+	struct msm_hw_fence_mem_addr mem_descriptor;
+	u32 ipcc_in_client;
+	u32 ipcc_in_signal;
+	u32 ipcc_out_signal_pp[MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG];
+	u32 ipcc_out_signal_pp_idx;
+	u32 ipcc_out_client;
+	u32 ipcc_this_client;
+};
+
 #if IS_ENABLED(CONFIG_SYNC_FILE)
 /**
  * sde_sync_get - Query sync fence object from a file handle
@@ -89,6 +124,7 @@ signed long sde_sync_wait(void *fence, long timeout_ms);
 
 /**
  * sde_sync_get_name_prefix - get integer representation of fence name prefix
+ *
  * @fence: Pointer to opaque fence structure
  *
  * Return: 32-bit integer containing first 4 characters of fence name,
@@ -98,13 +134,71 @@ uint32_t sde_sync_get_name_prefix(void *fence);
 
 /**
  * sde_fence_init - initialize fence object
+ *
  * @drm_id: ID number of owning DRM Object
  * @name: Timeline name
+ *
  * Returns: fence context object on success
  */
 struct sde_fence_context *sde_fence_init(const char *name,
 		uint32_t drm_id);
 
+/**
+ * sde_fence_hw_fence_init - initialize hw-fence clients
+ *
+ * @hw_ctl: hw ctl client to init.
+ * @use_ipcc: boolean to indicate if hw should use dpu ipcc signals.
+ *
+ * Returns: Zero on success, otherwise returns an error code.
+ */
+int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc);
+
+/**
+ * sde_fence_hw_fence_deinit - deinitialize hw-fence clients
+ *
+ * @hw_ctl: hw ctl client to init.
+ */
+void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl);
+
+/**
+ * sde_fence_register_hw_fences_wait - registers dpu-client for wait on hw fence or fences
+ *
+ * @hw_ctl: hw ctl client used to register for wait.
+ * @fences: list of dma-fences that have hw-fence support to wait-on
+ * @num_fences: number of fences in the above list
+ *
+ * Returns: Zero on success, otherwise returns an error code.
+ */
+int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
+	u32 num_fences);
+
+/**
+ * sde_fence_update_hw_fences_txq - updates the hw-fence txq with the list of hw-fences to signal
+ *                                  upon triggering the ipcc signal.
+ *
+ * @ctx: sde fence context
+ * @vid_mode: is video-mode update
+ * @line_count: prog line count value, must be non-zero
+ *
+ * Returns: Zero on success, otherwise returns an error code.
+ */
+int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
+	u32 debugfs_hw_fence);
+
+/**
+ * sde_fence_update_input_hw_fence_signal - updates input-fence ipcc signal in dpu and enables
+ *                                  hw-fences for the ctl.
+ *
+ * @ctl: hw ctl to update the input-fence and enable hw-fences
+ * @debugfs_hw_fence: hw-fence timestamp debugfs value
+ * @hw_mdp: pointer to hw_mdp to get timestamp registers
+ * @disable: bool to indicate if we should disable hw-fencing for this commit
+ *
+ * Returns: Zero on success, otherwise returns an error code.
+ */
+int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *ctl, u32 debugfs_hw_fence,
+	struct sde_hw_mdp *hw_mdp, bool disable);
+
 /**
  * sde_fence_deinit - deinit fence container
  * @fence: Pointer fence container
@@ -122,19 +216,21 @@ void sde_fence_prepare(struct sde_fence_context *fence);
  * @fence: Pointer fence container
  * @val: Pointer to output value variable, fence fd will be placed here
  * @offset: Fence signal commit offset, e.g., +1 to signal on next commit
+ * @hw_ctl: Ctl for hw fences
  * Returns: Zero on success
  */
 int sde_fence_create(struct sde_fence_context *fence, uint64_t *val,
-							uint32_t offset);
+				uint32_t offset, struct sde_hw_ctl *hw_ctl);
 
 /**
  * sde_fence_signal - advance fence timeline to signal outstanding fences
  * @fence: Pointer fence container
  * @ts: fence timestamp
  * @fence_event: fence event to indicate nature of fence signal.
+ * @hw_ctl: ctl to signal fences for the timeline rest event
  */
 void sde_fence_signal(struct sde_fence_context *fence, ktime_t ts,
-		enum sde_fence_event fence_event);
+		enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl);
 
 /**
  * sde_fence_timeline_status - prints fence timeline status

+ 227 - 289
msm/sde/sde_hw_catalog.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -155,7 +155,8 @@
 #define SDE_UIDLE_FAL10_TARGET_IDLE 50
 #define SDE_UIDLE_FAL1_TARGET_IDLE 40
 #define SDE_UIDLE_FAL1_MAX_THRESHOLD 15
-#define SDE_UIDLE_REV102_FAL1_MAX_THRESHOLD 255
+#define SDE_UIDLE_FAL1_MAX_THRESHOLD_EXT_REV_102 255
+#define SDE_UIDLE_FAL1_MAX_THRESHOLD_EXT_REV_103 255
 #define SDE_UIDLE_FAL10_THRESHOLD_60 12
 #define SDE_UIDLE_FAL10_THRESHOLD_90 13
 #define SDE_UIDLE_MAX_DWNSCALE 1500
@@ -177,6 +178,7 @@
  *************************************************************/
 enum {
 	SDE_HW_VERSION,
+	SDE_HW_FENCE_VERSION,
 	SDE_HW_PROP_MAX,
 };
 
@@ -220,6 +222,8 @@ enum sde_prop {
 	TRUSTED_VM_ENV,
 	MAX_TRUSTED_VM_DISPLAYS,
 	TVM_INCLUDE_REG,
+	IPCC_PROTOCOL_ID,
+	SDE_EMULATED_ENV,
 	SDE_PROP_MAX,
 };
 
@@ -270,7 +274,6 @@ enum {
 	SSPP_CLK_STATUS,
 	SSPP_SCALE_SIZE,
 	SSPP_VIG_BLOCKS,
-	SSPP_RGB_BLOCKS,
 	SSPP_DMA_BLOCKS,
 	SSPP_EXCL_RECT,
 	SSPP_SMART_DMA,
@@ -298,13 +301,6 @@ enum {
 	VIG_PROP_MAX,
 };
 
-enum {
-	RGB_SCALER_OFF,
-	RGB_SCALER_LEN,
-	RGB_PCC_PROP,
-	RGB_PROP_MAX,
-};
-
 enum {
 	DMA_SUBBLOCK_INDEX,
 	DMA_TOP_OFF,
@@ -439,6 +435,7 @@ enum {
 	RC_LEN,
 	RC_VERSION,
 	RC_MEM_TOTAL_SIZE,
+	RC_MIN_REGION_WIDTH,
 	RC_PROP_MAX,
 };
 
@@ -501,6 +498,8 @@ enum {
 	VBIF_QOS_NRT_REMAP,
 	VBIF_QOS_CWB_REMAP,
 	VBIF_QOS_LUTDMA_REMAP,
+	VBIF_QOS_CNOC_REMAP,
+	VBIF_QOS_OFFLINE_WB_REMAP,
 	VBIF_PROP_MAX,
 };
 
@@ -578,6 +577,7 @@ struct sde_dt_props {
  *************************************************************/
 static struct sde_prop_type sde_hw_prop[] = {
 	{SDE_HW_VERSION, "qcom,sde-hw-version", false, PROP_TYPE_U32},
+	{SDE_HW_FENCE_VERSION, "qcom,hw-fence-sw-version", false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type sde_prop[] = {
@@ -622,6 +622,8 @@ static struct sde_prop_type sde_prop[] = {
 	{MAX_TRUSTED_VM_DISPLAYS, "qcom,sde-max-trusted-vm-displays", false,
 			PROP_TYPE_U32},
 	{TVM_INCLUDE_REG, "qcom,tvm-include-reg", false, PROP_TYPE_U32_ARRAY},
+	{IPCC_PROTOCOL_ID, "qcom,sde-ipcc-protocol-id", false, PROP_TYPE_U32},
+	{SDE_EMULATED_ENV, "qcom,sde-emulated-env", false, PROP_TYPE_BOOL},
 };
 
 static struct sde_prop_type sde_perf_prop[] = {
@@ -688,7 +690,6 @@ static struct sde_prop_type sspp_prop[] = {
 		PROP_TYPE_BIT_OFFSET_ARRAY},
 	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
 	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
-	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
 	{SSPP_DMA_BLOCKS, "qcom,sde-sspp-dma-blocks", false, PROP_TYPE_NODE},
 	{SSPP_EXCL_RECT, "qcom,sde-sspp-excl-rect", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SMART_DMA, "qcom,sde-sspp-smart-dma-priority", false,
@@ -732,12 +733,6 @@ static struct sde_prop_type vig_prop[] = {
 			false, PROP_TYPE_U32_ARRAY},
 };
 
-static struct sde_prop_type rgb_prop[] = {
-	{RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
-	{RGB_SCALER_LEN, "qcom,sde-rgb-scaler-size", false, PROP_TYPE_U32},
-	{RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
-};
-
 static struct sde_prop_type dma_prop[] = {
 	[DMA_SUBBLOCK_INDEX] = {DMA_SUBBLOCK_INDEX, "cell-index", false,
 			PROP_TYPE_U32},
@@ -831,6 +826,7 @@ static struct sde_prop_type rc_prop[] = {
 	{RC_LEN, "qcom,sde-dspp-rc-size", false, PROP_TYPE_U32},
 	{RC_VERSION, "qcom,sde-dspp-rc-version", false, PROP_TYPE_U32},
 	{RC_MEM_TOTAL_SIZE, "qcom,sde-dspp-rc-mem-size", false, PROP_TYPE_U32},
+	{RC_MIN_REGION_WIDTH, "qcom,sde-dspp-rc-min-region-width", false, PROP_TYPE_U32},
 };
 
 static struct sde_prop_type spr_prop[] = {
@@ -931,23 +927,18 @@ static struct sde_prop_type vbif_prop[] = {
 	{VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
 	{VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
 	{VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
-		PROP_TYPE_U32},
-	{VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
-		PROP_TYPE_U32},
-	{VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
-		PROP_TYPE_U32_ARRAY},
+	{VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false, PROP_TYPE_U32},
+	{VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false, PROP_TYPE_U32},
+	{VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false, PROP_TYPE_U32_ARRAY},
 	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
 	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_CWB_REMAP, "qcom,sde-vbif-qos-cwb-remap", false,
-		PROP_TYPE_U32_ARRAY},
-	{VBIF_QOS_LUTDMA_REMAP, "qcom,sde-vbif-qos-lutdma-remap", false,
+	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_CWB_REMAP, "qcom,sde-vbif-qos-cwb-remap", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_LUTDMA_REMAP, "qcom,sde-vbif-qos-lutdma-remap", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_CNOC_REMAP, "qcom,sde-vbif-qos-cnoc-remap", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_OFFLINE_WB_REMAP, "qcom,sde-vbif-qos-offline-wb-remap", false,
 		PROP_TYPE_U32_ARRAY},
 };
 
@@ -1661,103 +1652,6 @@ end:
 	return rc;
 }
 
-static void _sde_sspp_setup_rgbs_pp(struct sde_dt_props *props,
-		struct sde_mdss_cfg *sde_cfg, struct sde_sspp_cfg *sspp)
-{
-	struct sde_sspp_sub_blks *sblk = sspp->sblk;
-
-	sblk->pcc_blk.id = SDE_SSPP_PCC;
-	if (props->exists[RGB_PCC_PROP]) {
-		sblk->pcc_blk.base = PROP_VALUE_ACCESS(props->values,
-			RGB_PCC_PROP, 0);
-		sblk->pcc_blk.version = PROP_VALUE_ACCESS(props->values,
-			RGB_PCC_PROP, 1);
-		sblk->pcc_blk.len = 0;
-		set_bit(SDE_SSPP_PCC, &sspp->features);
-	}
-}
-
-static int _sde_sspp_setup_rgbs(struct device_node *np,
-		struct sde_mdss_cfg *sde_cfg)
-{
-	int i;
-	struct sde_dt_props *props;
-	struct device_node *snp = NULL;
-	int rgb_count = 0;
-	const char *type;
-
-	snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
-	if (!snp)
-		return 0;
-
-	props = sde_get_dt_props(snp, RGB_PROP_MAX, rgb_prop,
-			ARRAY_SIZE(rgb_prop), NULL);
-	if (IS_ERR(props))
-		return PTR_ERR(props);
-
-	for (i = 0; i < sde_cfg->sspp_count; ++i) {
-		struct sde_sspp_cfg *sspp = sde_cfg->sspp + i;
-		struct sde_sspp_sub_blks *sblk = sspp->sblk;
-
-		of_property_read_string_index(np,
-				sspp_prop[SSPP_TYPE].prop_name, i, &type);
-		if (strcmp(type, "rgb"))
-			continue;
-
-		sblk->maxupscale = MAX_UPSCALE_RATIO;
-		sblk->maxdwnscale = MAX_DOWNSCALE_RATIO;
-		sspp->id = SSPP_RGB0 + rgb_count;
-		snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-				sspp->id - SSPP_VIG0);
-		sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + rgb_count;
-		sspp->type = SSPP_TYPE_RGB;
-		set_bit(SDE_PERF_SSPP_QOS, &sspp->perf_features);
-		if (sde_cfg->vbif_qos_nlvl == 8)
-			set_bit(SDE_PERF_SSPP_QOS_8LVL, &sspp->perf_features);
-		rgb_count++;
-
-		if ((sde_cfg->qseed_sw_lib_rev == SDE_SSPP_SCALER_QSEED2) ||
-		    (sde_cfg->qseed_sw_lib_rev == SDE_SSPP_SCALER_QSEED3)) {
-			set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
-			sblk->scaler_blk.id = sde_cfg->qseed_sw_lib_rev;
-			sblk->scaler_blk.base = PROP_VALUE_ACCESS(props->values,
-					RGB_SCALER_OFF, 0);
-			sblk->scaler_blk.len = PROP_VALUE_ACCESS(props->values,
-					RGB_SCALER_LEN, 0);
-			snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-				"sspp_scaler%u", sspp->id - SSPP_VIG0);
-		}
-
-		_sde_sspp_setup_rgbs_pp(props, sde_cfg, sspp);
-
-		sblk->format_list = sde_cfg->dma_formats;
-		sblk->virt_format_list = NULL;
-	}
-
-	sde_put_dt_props(props);
-	return 0;
-}
-
-static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
-	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
-	struct sde_prop_value *prop_value, u32 *cursor_count)
-{
-	if (!IS_SDE_MAJOR_MINOR_SAME(sde_cfg->hw_rev, SDE_HW_VER_300))
-		SDE_ERROR("invalid sspp type %d, xin id %d\n",
-				sspp->type, sspp->xin_id);
-	set_bit(SDE_SSPP_CURSOR, &sspp->features);
-	sblk->maxupscale = SSPP_UNITY_SCALE;
-	sblk->maxdwnscale = SSPP_UNITY_SCALE;
-	sblk->format_list = sde_cfg->cursor_formats;
-	sblk->virt_format_list = NULL;
-	sspp->id = SSPP_CURSOR0 + *cursor_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
-			sspp->id - SSPP_VIG0);
-	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
-	sspp->type = SSPP_TYPE_CURSOR;
-	(*cursor_count)++;
-}
-
 static void _sde_sspp_setup_dgm(struct sde_sspp_cfg *sspp,
 		const struct sde_dt_props *props, const char *name,
 		struct sde_pp_blk *blk, u32 type, u32 prop, bool versioned)
@@ -1932,6 +1826,8 @@ static void sde_sspp_set_features(struct sde_mdss_cfg *sde_cfg,
 
 		sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
 
+		if (sde_cfg->has_line_insertion)
+			set_bit(SDE_SSPP_LINE_INSERTION, &sspp->features);
 		sblk->smart_dma_priority =
 			PROP_VALUE_ACCESS(props->values, SSPP_SMART_DMA, i);
 		if (sblk->smart_dma_priority && sde_cfg->smart_dma_rev)
@@ -1951,10 +1847,13 @@ static void sde_sspp_set_features(struct sde_mdss_cfg *sde_cfg,
 					&sspp->perf_features);
 		}
 
-		if (sde_cfg->uidle_cfg.uidle_rev)
+		if (sde_cfg->uidle_cfg.uidle_rev) {
 			set_bit(SDE_PERF_SSPP_UIDLE, &sspp->perf_features);
+			if (sde_cfg->uidle_cfg.uidle_rev >= SDE_UIDLE_VERSION_1_0_3)
+				set_bit(SDE_PERF_SSPP_UIDLE_FILL_LVL_SCALE, &sspp->perf_features);
+		}
 
-		if (sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache)
+		if (test_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map))
 			set_bit(SDE_PERF_SSPP_SYS_CACHE, &sspp->perf_features);
 
 		if (test_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features))
@@ -1988,6 +1887,9 @@ static void sde_sspp_set_features(struct sde_mdss_cfg *sde_cfg,
 
 		if (test_bit(SDE_FEATURE_UBWC_STATS, sde_cfg->features))
 			set_bit(SDE_SSPP_UBWC_STATS, &sspp->features);
+
+		if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900))
+			set_bit(SDE_SSPP_SCALER_DE_LPF_BLEND, &sspp->features);
 	}
 }
 
@@ -1996,10 +1898,8 @@ static int _sde_sspp_setup_cmn(struct device_node *np,
 {
 	int rc = 0, off_count, i, j;
 	struct sde_dt_props *props;
-	const char *type;
 	struct sde_sspp_cfg *sspp;
 	struct sde_sspp_sub_blks *sblk;
-	u32 cursor_count = 0;
 
 	props = sde_get_dt_props(np, SSPP_PROP_MAX, sspp_prop,
 			ARRAY_SIZE(sspp_prop), &off_count);
@@ -2034,14 +1934,6 @@ static int _sde_sspp_setup_cmn(struct device_node *np,
 		sspp->base = PROP_VALUE_ACCESS(props->values, SSPP_OFF, i);
 		sspp->len = PROP_VALUE_ACCESS(props->values, SSPP_SIZE, 0);
 
-		of_property_read_string_index(np,
-				sspp_prop[SSPP_TYPE].prop_name, i, &type);
-		if (!strcmp(type, "cursor")) {
-			/* No prop values for cursor pipes */
-			_sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
-					&cursor_count);
-		}
-
 		snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
 				sspp->id - SSPP_VIG0);
 
@@ -2097,10 +1989,6 @@ static int sde_sspp_parse_dt(struct device_node *np,
 	if (rc)
 		return rc;
 
-	rc = _sde_sspp_setup_rgbs(np, sde_cfg);
-	if (rc)
-		return rc;
-
 	rc = _sde_sspp_setup_dmas(np, sde_cfg);
 
 	return rc;
@@ -2157,10 +2045,10 @@ static int sde_ctl_parse_dt(struct device_node *np,
 	return 0;
 }
 
-void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
+u32 sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
 		uint32_t disp_type)
 {
-	u32 i, cnt = 0, sec_cnt = 0;
+	u32 i, cnt = 0, sec_cnt = 0, lm_mask = 0;
 
 	if (disp_type == SDE_CONNECTOR_PRIMARY) {
 		for (i = 0; i < sde_cfg->mixer_count; i++) {
@@ -2179,6 +2067,7 @@ void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
 			if (cnt < num_lm) {
 				set_bit(SDE_DISP_PRIMARY_PREF,
 						&sde_cfg->mixer[i].features);
+				lm_mask |=  BIT(sde_cfg->mixer[i].id - 1);
 				cnt++;
 			}
 
@@ -2217,10 +2106,13 @@ void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
 					BIT(SDE_DISP_PRIMARY_PREF))) {
 				set_bit(SDE_DISP_SECONDARY_PREF,
 						&sde_cfg->mixer[i].features);
+				lm_mask |= BIT(sde_cfg->mixer[i].id - 1);
 				cnt++;
 			}
 		}
 	}
+
+	return lm_mask;
 }
 
 static int sde_mixer_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
@@ -2486,6 +2378,7 @@ static int sde_intf_parse_dt(struct device_node *np,
 
 		if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900)) {
 			set_bit(SDE_INTF_MDP_VSYNC_TS, &intf->features);
+			set_bit(SDE_INTF_WD_JITTER, &intf->features);
 		}
 	}
 
@@ -2596,7 +2489,10 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
 			set_bit(SDE_WB_HAS_DCWB, &wb->features);
 			if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
 				set_bit(SDE_WB_DCWB_CTRL, &wb->features);
-			if (major_version >= SDE_HW_MAJOR(SDE_HW_VER_810)) {
+			if (major_version >= SDE_HW_MAJOR(SDE_HW_VER_900)) {
+				sde_cfg->cwb_blk_off = 0x67200;
+				sde_cfg->cwb_blk_stride = 0x400;
+			} else if (major_version >= SDE_HW_MAJOR(SDE_HW_VER_810)) {
 				sde_cfg->cwb_blk_off = 0x66A00;
 				sde_cfg->cwb_blk_stride = 0x400;
 			} else {
@@ -2905,6 +2801,13 @@ static int _sde_rc_parse_dt(struct device_node *np,
 					RC_VERSION, 0);
 			sblk->rc.mem_total_size = PROP_VALUE_ACCESS(
 					props->values, RC_MEM_TOTAL_SIZE, 0);
+
+			if (!props->exists[RC_MIN_REGION_WIDTH])
+				sblk->rc.min_region_width = 4;
+			else
+				sblk->rc.min_region_width = PROP_VALUE_ACCESS(
+						props->values, RC_MIN_REGION_WIDTH, 0);
+
 			sblk->rc.idx = i;
 			set_bit(SDE_DSPP_RC, &dspp->features);
 		}
@@ -3204,6 +3107,10 @@ static int sde_ds_parse_dt(struct device_node *np,
 		else if (sde_cfg->qseed_sw_lib_rev ==
 				SDE_SSPP_SCALER_QSEED3LITE)
 			set_bit(SDE_SSPP_SCALER_QSEED3LITE, &ds->features);
+		if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900)) {
+			set_bit(SDE_DS_DE_LPF_BLEND, &ds->features);
+			set_bit(SDE_DS_MERGE_CTRL, &ds->features);
+		}
 	}
 
 end:
@@ -3301,6 +3208,8 @@ static int sde_dsc_parse_dt(struct device_node *np,
 						&dsc->features);
 			if (SDE_HW_MAJOR(sde_cfg->hw_rev) >= SDE_HW_MAJOR(SDE_HW_VER_900))
 				set_bit(SDE_DSC_4HS, &dsc->features);
+			if (sde_cfg->has_reduced_ob_max)
+				set_bit(SDE_DSC_REDUCED_OB_MAX, &dsc->features);
 		} else {
 			set_bit(SDE_DSC_HW_REV_1_1, &dsc->features);
 		}
@@ -3398,7 +3307,7 @@ end:
 static int sde_cdm_parse_dt(struct device_node *np,
 				struct sde_mdss_cfg *sde_cfg)
 {
-	int rc, prop_count[HW_PROP_MAX], i;
+	int rc, prop_count[HW_PROP_MAX], i, j;
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[HW_PROP_MAX];
 	u32 off_count;
@@ -3410,15 +3319,13 @@ static int sde_cdm_parse_dt(struct device_node *np,
 		goto end;
 	}
 
-	prop_value = kzalloc(HW_PROP_MAX *
-			sizeof(struct sde_prop_value), GFP_KERNEL);
+	prop_value = kzalloc(HW_PROP_MAX * sizeof(struct sde_prop_value), GFP_KERNEL);
 	if (!prop_value) {
 		rc = -ENOMEM;
 		goto end;
 	}
 
-	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
-		&off_count);
+	rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count, &off_count);
 	if (rc)
 		goto end;
 
@@ -3433,13 +3340,13 @@ static int sde_cdm_parse_dt(struct device_node *np,
 		cdm = sde_cfg->cdm + i;
 		cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
 		cdm->id = CDM_0 + i;
-		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
-				cdm->id - CDM_0);
+		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", cdm->id - CDM_0);
 		cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 
-		/* intf3 and wb2 for cdm block */
-		cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
-		cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
+		/* intf3 and wb(s) for cdm block */
+		for (j = 0; j < sde_cfg->wb_count; j++)
+			cdm->wb_connect |= BIT(sde_cfg->wb[j].id);
+		cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : 0;
 
 		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
 			set_bit(SDE_CDM_INPUT_CTRL, &cdm->features);
@@ -3476,6 +3383,12 @@ static int sde_dnsc_blur_parse_dt(struct device_node *np, struct sde_mdss_cfg *s
 	if (rc)
 		goto end;
 
+	if (off_count > DNSC_BLUR_MAX_COUNT) {
+		SDE_ERROR("invalid dnsc_blur block count:%d\n", off_count);
+		rc = -EINVAL;
+		goto end;
+	}
+
 	sde_cfg->dnsc_blur_count = off_count;
 
 	rc = _read_dt_entry(np, dnsc_blur_prop, ARRAY_SIZE(dnsc_blur_prop), prop_count,
@@ -3588,6 +3501,19 @@ static int sde_cache_parse_dt(struct device_node *np,
 	struct llcc_slice_desc *slice;
 	struct device_node *llcc_node;
 	int i;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
+	const u32 sde_sys_cache_usecase_id[SDE_SYS_CACHE_MAX] = {
+		[SDE_SYS_CACHE_DISP] = LLCC_DISP,
+		[SDE_SYS_CACHE_DISP_1] = LLCC_DISP_1,
+		[SDE_SYS_CACHE_DISP_WB] = LLCC_DISP_WB,
+	};
+#else
+	const u32 sde_sys_cache_usecase_id[SDE_SYS_CACHE_MAX] = {
+		[SDE_SYS_CACHE_DISP] = LLCC_DISP,
+		[SDE_SYS_CACHE_DISP_1] = 0,
+		[SDE_SYS_CACHE_DISP_WB] = 0,
+	};
+#endif
 
 	if (!sde_cfg) {
 		SDE_ERROR("invalid argument\n");
@@ -3604,26 +3530,15 @@ static int sde_cache_parse_dt(struct device_node *np,
 		struct sde_sc_cfg *sc_cfg = &sde_cfg->sc_cfg[i];
 		u32 usecase_id = 0;
 
-		if (!sc_cfg->has_sys_cache)
+		if (!test_bit(i, sde_cfg->sde_sys_cache_type_map))
 			continue;
 
-		switch (i) {
-		case SDE_SYS_CACHE_DISP:
-			usecase_id = LLCC_DISP;
-			break;
-
-		case SDE_SYS_CACHE_DISP_WB:
-			usecase_id = LLCC_DISP;
-			break;
-
-		default:
-			usecase_id = 0;
-			SDE_DEBUG("invalid sys cache:%d\n", i);
-			break;
-		}
-
-		if (!usecase_id)
+		usecase_id = sde_sys_cache_usecase_id[i];
+		if (!usecase_id) {
+			clear_bit(i, sde_cfg->sde_sys_cache_type_map);
+			SDE_DEBUG("invalid usecase-id for sys cache:%d\n", i);
 			continue;
+		}
 
 		slice = llcc_slice_getd(usecase_id);
 		if (IS_ERR_OR_NULL(slice)) {
@@ -3631,6 +3546,7 @@ static int sde_cache_parse_dt(struct device_node *np,
 			return -EINVAL;
 		}
 
+		sc_cfg->llcc_uid = usecase_id;
 		sc_cfg->llcc_scid = llcc_get_slice_id(slice);
 		sc_cfg->llcc_slice_size = llcc_get_slice_size(slice);
 		SDE_DEBUG("img cache:%d usecase_id:%d, scid:%d slice_size:%zu kb\n",
@@ -3715,42 +3631,39 @@ static int _sde_vbif_populate_ot_parsing(struct sde_vbif_cfg *vbif,
 }
 
 static int _sde_vbif_populate_qos_parsing(struct sde_mdss_cfg *sde_cfg,
-	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value,
-	int *prop_count)
+	struct sde_vbif_cfg *vbif, struct sde_prop_value *prop_value, int *prop_count)
 {
-	int i, j;
-	int prop_index = VBIF_QOS_RT_REMAP;
-
-	for (i = VBIF_RT_CLIENT;
-			((i < VBIF_MAX_CLIENT) && (prop_index < VBIF_PROP_MAX));
-				i++, prop_index++) {
-		vbif->qos_tbl[i].npriority_lvl = prop_count[prop_index];
-		SDE_DEBUG("qos_tbl[%d].npriority_lvl=%u\n",
-				i, vbif->qos_tbl[i].npriority_lvl);
-
-		if (vbif->qos_tbl[i].npriority_lvl == sde_cfg->vbif_qos_nlvl) {
-			vbif->qos_tbl[i].priority_lvl = kcalloc(
-					vbif->qos_tbl[i].npriority_lvl,
-					sizeof(u32), GFP_KERNEL);
-			if (!vbif->qos_tbl[i].priority_lvl)
+	int i, j, prop_index = VBIF_QOS_RT_REMAP;
+	u32 entries;
+
+	for (i = VBIF_RT_CLIENT; ((i < VBIF_MAX_CLIENT) && (prop_index < VBIF_PROP_MAX));
+						i++, prop_index++) {
+		vbif->qos_tbl[i].count = prop_count[prop_index];
+		SDE_DEBUG("qos_tbl[%d].count=%u\n", i, vbif->qos_tbl[i].count);
+
+		entries = 2 * sde_cfg->vbif_qos_nlvl;
+		if (vbif->qos_tbl[i].count == entries) {
+			vbif->qos_tbl[i].priority_lvl = kcalloc(entries, sizeof(u32), GFP_KERNEL);
+			if (!vbif->qos_tbl[i].priority_lvl) {
+				vbif->qos_tbl[i].count = 0;
 				return -ENOMEM;
-		} else if (vbif->qos_tbl[i].npriority_lvl) {
-			vbif->qos_tbl[i].npriority_lvl = 0;
+			}
+		} else if (vbif->qos_tbl[i].count) {
+			vbif->qos_tbl[i].count = 0;
 			vbif->qos_tbl[i].priority_lvl = NULL;
-			SDE_ERROR("invalid qos table for client:%d, prop:%d\n",
-					i, prop_index);
+			SDE_ERROR("invalid qos table for client:%d, prop:%d\n", i, prop_index);
+			continue;
 		}
 
-		for (j = 0; j < vbif->qos_tbl[i].npriority_lvl; j++) {
+		for (j = 0; j < vbif->qos_tbl[i].count; j++) {
 			vbif->qos_tbl[i].priority_lvl[j] =
-				PROP_VALUE_ACCESS(prop_value, prop_index, j);
-			SDE_DEBUG("client:%d, prop:%d, lvl[%d]=%u\n",
-					i, prop_index, j,
+					PROP_VALUE_ACCESS(prop_value, prop_index, j);
+			SDE_DEBUG("client:%d, prop:%d, lvl[%d]=%u\n", i, prop_index, j,
 					vbif->qos_tbl[i].priority_lvl[j]);
 		}
 
-		if (vbif->qos_tbl[i].npriority_lvl)
-			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
+		vbif->qos_tbl[i].count = entries;
+		set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
 	}
 
 	return 0;
@@ -3867,6 +3780,16 @@ static int sde_vbif_parse_dt(struct device_node *np,
 	if (rc)
 		goto end;
 
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_CNOC_REMAP], 1,
+			&prop_count[VBIF_QOS_CNOC_REMAP], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_OFFLINE_WB_REMAP], 1,
+			&prop_count[VBIF_QOS_OFFLINE_WB_REMAP], NULL);
+	if (rc)
+		goto end;
+
 	sde_cfg->vbif_count = off_count;
 
 	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
@@ -3966,6 +3889,7 @@ static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
 			if (test_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features))
 				sde_cfg->dcwb_count++;
 		}
+		pp->dcwb_id = (sde_cfg->dcwb_count > 0) ? sde_cfg->dcwb_count : DCWB_MAX;
 
 		if (major_version < SDE_HW_MAJOR(SDE_HW_VER_700)) {
 			sblk->dsc.base = PROP_VALUE_ACCESS(prop_value,
@@ -4047,8 +3971,8 @@ static void _sde_top_parse_dt_helper(struct sde_mdss_cfg *cfg,
 			DEFAULT_SDE_MIXER_BLENDSTAGES;
 
 	cfg->ubwc_rev = props->exists[UBWC_VERSION] ?
-			SDE_HW_UBWC_VER(PROP_VALUE_ACCESS(props->values,
-			UBWC_VERSION, 0)) : DEFAULT_SDE_UBWC_NONE;
+			PROP_VALUE_ACCESS(props->values,
+			UBWC_VERSION, 0) : DEFAULT_SDE_UBWC_NONE;
 
 	cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
 
@@ -4083,6 +4007,8 @@ static void _sde_top_parse_dt_helper(struct sde_mdss_cfg *cfg,
 	cfg->mdp[0].smart_panel_align_mode =
 		PROP_VALUE_ACCESS(props->values, SMART_PANEL_ALIGN_MODE, 0);
 
+	cfg->ipcc_protocol_id = PROP_VALUE_ACCESS(props->values, IPCC_PROTOCOL_ID, 0);
+
 	if (props->exists[SEC_SID_MASK]) {
 		cfg->sec_sid_mask_count = props->counts[SEC_SID_MASK];
 		for (i = 0; i < cfg->sec_sid_mask_count; i++)
@@ -4116,6 +4042,9 @@ static void _sde_top_parse_dt_helper(struct sde_mdss_cfg *cfg,
 						i * 2 + 1);
 		}
 	}
+
+	if (PROP_VALUE_ACCESS(props->values, SDE_EMULATED_ENV, 0))
+		set_bit(SDE_FEATURE_EMULATED_ENV, cfg->features);
 }
 
 static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
@@ -4293,15 +4222,18 @@ static int sde_parse_reg_dma_dt(struct device_node *np,
 	sde_cfg->dma_cfg.clk_ctrl = SDE_CLK_CTRL_LUTDMA;
 	sde_cfg->dma_cfg.vbif_idx = VBIF_RT;
 
-	for (i = 0; i < sde_cfg->mdp_count; i++) {
-		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].reg_off =
-			PROP_BITVALUE_ACCESS(prop_value,
-					REG_DMA_CLK_CTRL, 0, 0);
-		sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off =
-			PROP_BITVALUE_ACCESS(prop_value,
-					REG_DMA_CLK_CTRL, 0, 1);
+	if (test_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_cfg->features)) {
+		sde_cfg->dma_cfg.split_vbif_supported = true;
+	} else {
+		for (i = 0; i < sde_cfg->mdp_count; i++) {
+			sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].reg_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						REG_DMA_CLK_CTRL, 0, 0);
+			sde_cfg->mdp[i].clk_ctrls[sde_cfg->dma_cfg.clk_ctrl].bit_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						REG_DMA_CLK_CTRL, 0, 1);
+		}
 	}
-
 end:
 	kfree(prop_value);
 	/* reg dma is optional feature hence return 0 */
@@ -4349,33 +4281,29 @@ static int _sde_qos_parse_dt_cfg(struct sde_mdss_cfg *cfg, int *prop_count,
 	cfg->perf.qos_refresh_count = qos_count;
 
 	cfg->perf.danger_lut = kcalloc(qos_count,
-		sizeof(u64) * SDE_QOS_LUT_USAGE_MAX, GFP_KERNEL);
+		sizeof(u64) * SDE_QOS_LUT_USAGE_MAX * SDE_DANGER_SAFE_LUT_TYPE_MAX, GFP_KERNEL);
 	cfg->perf.safe_lut = kcalloc(qos_count,
-		sizeof(u64) * SDE_QOS_LUT_USAGE_MAX, GFP_KERNEL);
+		sizeof(u64) * SDE_QOS_LUT_USAGE_MAX * SDE_DANGER_SAFE_LUT_TYPE_MAX, GFP_KERNEL);
 	cfg->perf.creq_lut = kcalloc(qos_count,
 		sizeof(u64) * SDE_QOS_LUT_USAGE_MAX * SDE_CREQ_LUT_TYPE_MAX, GFP_KERNEL);
 	if (!cfg->perf.creq_lut || !cfg->perf.safe_lut || !cfg->perf.danger_lut)
 		goto end;
 
 	if (prop_exists[QOS_DANGER_LUT] &&
-	    prop_count[QOS_DANGER_LUT] >= (SDE_QOS_LUT_USAGE_MAX * qos_count)) {
+			(prop_count[QOS_DANGER_LUT] >=
+			    (SDE_QOS_LUT_USAGE_MAX * qos_count * SDE_DANGER_SAFE_LUT_TYPE_MAX))) {
 		for (i = 0; i < prop_count[QOS_DANGER_LUT]; i++) {
-			cfg->perf.danger_lut[i] =
-				PROP_VALUE_ACCESS(prop_value,
-						QOS_DANGER_LUT, i);
-			SDE_DEBUG("danger usage:%i lut:0x%llx\n",
-					i, cfg->perf.danger_lut[i]);
+			cfg->perf.danger_lut[i] = PROP_VALUE_ACCESS(prop_value, QOS_DANGER_LUT, i);
+			SDE_DEBUG("danger usage:%i lut:0x%llx\n", i, cfg->perf.danger_lut[i]);
 		}
 	}
 
 	if (prop_exists[QOS_SAFE_LUT] &&
-	    prop_count[QOS_SAFE_LUT] >= (SDE_QOS_LUT_USAGE_MAX * qos_count)) {
+			(prop_count[QOS_SAFE_LUT] >=
+			    (SDE_QOS_LUT_USAGE_MAX * qos_count * SDE_DANGER_SAFE_LUT_TYPE_MAX))) {
 		for (i = 0; i < prop_count[QOS_SAFE_LUT]; i++) {
-			cfg->perf.safe_lut[i] =
-				PROP_VALUE_ACCESS(prop_value,
-					QOS_SAFE_LUT, i);
-			SDE_DEBUG("safe usage:%d lut:0x%llx\n",
-				i, cfg->perf.safe_lut[i]);
+			cfg->perf.safe_lut[i] = PROP_VALUE_ACCESS(prop_value, QOS_SAFE_LUT, i);
+			SDE_DEBUG("safe usage:%d lut:0x%llx\n", i, cfg->perf.safe_lut[i]);
 		}
 	}
 
@@ -4725,26 +4653,11 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
 	int rc = 0;
 	uint32_t dma_list_size, vig_list_size, wb2_list_size;
 	uint32_t virt_vig_list_size, in_rot_list_size = 0;
-	uint32_t cursor_list_size = 0;
 	uint32_t index = 0;
 	uint32_t in_rot_restricted_list_size = 0;
 	const struct sde_format_extended *inline_fmt_tbl = NULL;
 	const struct sde_format_extended *inline_restricted_fmt_tbl = NULL;
 
-	/* cursor input formats */
-	if (test_bit(SDE_FEATURE_CURSOR, sde_cfg->features)) {
-		cursor_list_size = ARRAY_SIZE(cursor_formats);
-		sde_cfg->cursor_formats = kcalloc(cursor_list_size,
-			sizeof(struct sde_format_extended), GFP_KERNEL);
-		if (!sde_cfg->cursor_formats) {
-			rc = -ENOMEM;
-			goto out;
-		}
-		index = sde_copy_formats(sde_cfg->cursor_formats,
-			cursor_list_size, 0, cursor_formats,
-			ARRAY_SIZE(cursor_formats));
-	}
-
 	/* DMA pipe input formats */
 	dma_list_size = ARRAY_SIZE(plane_formats);
 	if (test_bit(SDE_FEATURE_FP16, sde_cfg->features))
@@ -4754,7 +4667,7 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
 		sizeof(struct sde_format_extended), GFP_KERNEL);
 	if (!sde_cfg->dma_formats) {
 		rc = -ENOMEM;
-		goto free_cursor;
+		goto out;
 	}
 
 	index = sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
@@ -4871,9 +4784,6 @@ free_vig:
 	kfree(sde_cfg->vig_formats);
 free_dma:
 	kfree(sde_cfg->dma_formats);
-free_cursor:
-	if (test_bit(SDE_FEATURE_CURSOR, sde_cfg->features))
-		kfree(sde_cfg->cursor_formats);
 out:
 	return rc;
 }
@@ -4883,42 +4793,40 @@ static void _sde_hw_setup_uidle(struct sde_uidle_cfg *uidle_cfg)
 	if (!uidle_cfg->uidle_rev)
 		return;
 
-	if ((IS_SDE_UIDLE_REV_102(uidle_cfg->uidle_rev)) ||
-			(IS_SDE_UIDLE_REV_101(uidle_cfg->uidle_rev)) ||
-			(IS_SDE_UIDLE_REV_100(uidle_cfg->uidle_rev))) {
-		uidle_cfg->fal10_exit_cnt = SDE_UIDLE_FAL10_EXIT_CNT;
-		uidle_cfg->fal10_exit_danger = SDE_UIDLE_FAL10_EXIT_DANGER;
-		uidle_cfg->fal10_danger = SDE_UIDLE_FAL10_DANGER;
-		uidle_cfg->fal10_target_idle_time = SDE_UIDLE_FAL10_TARGET_IDLE;
-		uidle_cfg->fal1_target_idle_time = SDE_UIDLE_FAL1_TARGET_IDLE;
-		uidle_cfg->max_dwnscale = SDE_UIDLE_MAX_DWNSCALE;
-		uidle_cfg->debugfs_ctrl = true;
-		uidle_cfg->fal1_max_threshold = SDE_UIDLE_FAL1_MAX_THRESHOLD;
-
-		if (IS_SDE_UIDLE_REV_100(uidle_cfg->uidle_rev)) {
-			uidle_cfg->fal10_threshold =
-				SDE_UIDLE_FAL10_THRESHOLD_60;
-			uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_60;
-		} else if (IS_SDE_UIDLE_REV_101(uidle_cfg->uidle_rev)) {
-			set_bit(SDE_UIDLE_QACTIVE_OVERRIDE,
-					&uidle_cfg->features);
-			uidle_cfg->fal10_threshold =
-				SDE_UIDLE_FAL10_THRESHOLD_90;
-			uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_90;
-		} else if (IS_SDE_UIDLE_REV_102(uidle_cfg->uidle_rev)) {
-			set_bit(SDE_UIDLE_QACTIVE_OVERRIDE,
-					&uidle_cfg->features);
-			uidle_cfg->fal10_threshold =
-				SDE_UIDLE_FAL10_THRESHOLD_90;
-			uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_90;
-			uidle_cfg->max_fal1_fps = SDE_UIDLE_MAX_FPS_240;
-			uidle_cfg->fal1_max_threshold =
-					SDE_UIDLE_REV102_FAL1_MAX_THRESHOLD;
-		}
-	} else {
-		pr_err("invalid uidle rev:0x%x, disabling uidle\n",
-			uidle_cfg->uidle_rev);
-		uidle_cfg->uidle_rev = 0;
+	uidle_cfg->fal10_exit_cnt = SDE_UIDLE_FAL10_EXIT_CNT;
+	uidle_cfg->fal10_exit_danger = SDE_UIDLE_FAL10_EXIT_DANGER;
+	uidle_cfg->fal10_danger = SDE_UIDLE_FAL10_DANGER;
+	uidle_cfg->fal10_target_idle_time = SDE_UIDLE_FAL10_TARGET_IDLE;
+	uidle_cfg->fal1_target_idle_time = SDE_UIDLE_FAL1_TARGET_IDLE;
+	uidle_cfg->max_dwnscale = SDE_UIDLE_MAX_DWNSCALE;
+	uidle_cfg->debugfs_ctrl = true;
+	uidle_cfg->fal1_max_threshold = SDE_UIDLE_FAL1_MAX_THRESHOLD;
+
+	if (IS_SDE_UIDLE_REV_100(uidle_cfg->uidle_rev)) {
+		uidle_cfg->fal10_threshold =
+			SDE_UIDLE_FAL10_THRESHOLD_60;
+		uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_60;
+	} else if (IS_SDE_UIDLE_REV_101(uidle_cfg->uidle_rev)) {
+		set_bit(SDE_UIDLE_QACTIVE_OVERRIDE,
+				&uidle_cfg->features);
+		uidle_cfg->fal10_threshold =
+			SDE_UIDLE_FAL10_THRESHOLD_90;
+		uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_90;
+	} else if (IS_SDE_UIDLE_REV_102(uidle_cfg->uidle_rev)) {
+		set_bit(SDE_UIDLE_QACTIVE_OVERRIDE,
+				&uidle_cfg->features);
+		uidle_cfg->fal10_threshold =
+			SDE_UIDLE_FAL10_THRESHOLD_90;
+		uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_90;
+		uidle_cfg->max_fal1_fps = SDE_UIDLE_MAX_FPS_240;
+		uidle_cfg->fal1_max_threshold =
+				SDE_UIDLE_FAL1_MAX_THRESHOLD_EXT_REV_102;
+	} else if (IS_SDE_UIDLE_REV_103(uidle_cfg->uidle_rev)) {
+		set_bit(SDE_UIDLE_QACTIVE_OVERRIDE, &uidle_cfg->features);
+		uidle_cfg->max_fps = SDE_UIDLE_MAX_FPS_240;
+		uidle_cfg->max_fal1_fps = SDE_UIDLE_MAX_FPS_240;
+		uidle_cfg->fal1_max_threshold = SDE_UIDLE_FAL1_MAX_THRESHOLD_EXT_REV_103;
+		uidle_cfg->fal10_threshold = SDE_UIDLE_FAL10_THRESHOLD_60;
 	}
 }
 
@@ -4959,7 +4867,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->vbif_qos_nlvl = 4;
 		sde_cfg->ts_prefill_rev = 1;
 		set_bit(SDE_FEATURE_DECIMATION, sde_cfg->features);
-		set_bit(SDE_FEATURE_CURSOR, sde_cfg->features);
 		clear_bit(SDE_FEATURE_COMBINED_ALPHA, sde_cfg->features);
 		clear_bit(SDE_FEATURE_DELAY_PRG_FETCH, sde_cfg->features);
 		clear_bit(SDE_FEATURE_SUI_MISR, sde_cfg->features);
@@ -5130,7 +5037,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
 		sde_cfg->mdss_hw_block_size = 0x158;
 		set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
-		sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
+		set_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map);
 	} else if (IS_HOLI_TARGET(hw_rev)) {
 		set_bit(SDE_FEATURE_QSYNC, sde_cfg->features);
 		sde_cfg->perf.min_prefill_lines = 24;
@@ -5160,7 +5067,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
 		sde_cfg->mdss_hw_block_size = 0x158;
 		set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
-		sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
+		set_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map);
 	} else if (IS_WAIPIO_TARGET(hw_rev) || IS_CAPE_TARGET(hw_rev)) {
 		sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
 		set_bit(SDE_FEATURE_DEDICATED_CWB, sde_cfg->features);
@@ -5182,7 +5089,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
 		set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
 		sde_cfg->mdss_hw_block_size = 0x158;
-		sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
+		set_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map);
 		set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features);
 		set_bit(SDE_FEATURE_FP16, sde_cfg->features);
 		set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features);
@@ -5220,6 +5127,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_CWB_CROP, sde_cfg->features);
 		set_bit(SDE_FEATURE_QSYNC, sde_cfg->features);
 		sde_cfg->perf.min_prefill_lines = 40;
+		sde_cfg->has_reduced_ob_max = true;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
@@ -5232,7 +5140,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_VBIF_DISABLE_SHAREABLE, sde_cfg->features);
 		set_bit(SDE_FEATURE_DITHER_LUMA_MODE, sde_cfg->features);
 		sde_cfg->mdss_hw_block_size = 0x158;
-		sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
+		set_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map);
 		set_bit(SDE_FEATURE_MULTIRECT_ERROR, sde_cfg->features);
 		set_bit(SDE_FEATURE_FP16, sde_cfg->features);
 		set_bit(SDE_MDP_PERIPH_TOP_0_REMOVED, &sde_cfg->mdp[0].features);
@@ -5265,20 +5173,27 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		set_bit(SDE_FEATURE_AVR_STEP, sde_cfg->features);
 		set_bit(SDE_FEATURE_VBIF_CLK_SPLIT, sde_cfg->features);
 		set_bit(SDE_FEATURE_CTL_DONE, sde_cfg->features);
-		sde_cfg->sc_cfg[SDE_SYS_CACHE_DISP].has_sys_cache = true;
+		set_bit(SDE_FEATURE_TRUSTED_VM, sde_cfg->features);
+		set_bit(SDE_SYS_CACHE_DISP, sde_cfg->sde_sys_cache_type_map);
+		set_bit(SDE_SYS_CACHE_DISP_1, sde_cfg->sde_sys_cache_type_map);
+		set_bit(SDE_SYS_CACHE_DISP_WB, sde_cfg->sde_sys_cache_type_map);
+		set_bit(SDE_FEATURE_SYS_CACHE_NSE, sde_cfg->features);
 		sde_cfg->allowed_dsc_reservation_switch = SDE_DP_DSC_RESERVATION_SWITCH;
 		sde_cfg->autorefresh_disable_seq = AUTOREFRESH_DISABLE_SEQ2;
 		sde_cfg->perf.min_prefill_lines = 40;
 		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->qos_target_time_ns = 11160;
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
 		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_2_0_1;
-		sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_2;
+		sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_3;
+		sde_cfg->sid_rev = SDE_SID_VERSION_2_0_0;
 		sde_cfg->mdss_hw_block_size = 0x158;
 		sde_cfg->demura_supported[SSPP_DMA1][0] = 0;
 		sde_cfg->demura_supported[SSPP_DMA1][1] = 1;
 		sde_cfg->demura_supported[SSPP_DMA3][0] = 0;
 		sde_cfg->demura_supported[SSPP_DMA3][1] = 1;
+		sde_cfg->has_line_insertion = true;
 	} else {
 		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -5325,6 +5240,22 @@ end:
 	return rc;
 }
 
+static void _sde_hw_fence_caps(struct sde_mdss_cfg *sde_cfg)
+{
+	struct sde_ctl_cfg *ctl;
+	int i;
+
+	if (!sde_cfg->hw_fence_rev)
+		return;
+
+	set_bit(SDE_FEATURE_HW_FENCE_IPCC, sde_cfg->features);
+
+	for (i = 0; i < sde_cfg->ctl_count; i++) {
+		ctl = sde_cfg->ctl + i;
+		set_bit(SDE_CTL_HW_FENCE, &ctl->features);
+	}
+}
+
 static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
 	uint32_t hw_rev)
 {
@@ -5370,6 +5301,9 @@ static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
 
 	sde_cfg->min_display_height = MIN_DISPLAY_HEIGHT;
 	sde_cfg->min_display_width = MIN_DISPLAY_WIDTH;
+	sde_cfg->max_cwb = min_t(u32, sde_cfg->wb_count, MAX_CWB_SESSIONS);
+
+	_sde_hw_fence_caps(sde_cfg);
 
 	rc = _sde_hw_dnsc_blur_filter_caps(sde_cfg);
 
@@ -5423,7 +5357,6 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 	kfree(sde_cfg->perf.creq_lut);
 
 	kfree(sde_cfg->dma_formats);
-	kfree(sde_cfg->cursor_formats);
 	kfree(sde_cfg->vig_formats);
 	kfree(sde_cfg->wb_formats);
 	kfree(sde_cfg->virt_vig_formats);
@@ -5466,6 +5399,11 @@ static int sde_hw_ver_parse_dt(struct drm_device *dev, struct device_node *np,
 	else
 		cfg->hw_rev = sde_kms_get_hw_version(dev);
 
+	if (prop_exists[SDE_HW_FENCE_VERSION])
+		cfg->hw_fence_rev = PROP_VALUE_ACCESS(prop_value, SDE_HW_FENCE_VERSION, 0);
+	else
+		cfg->hw_fence_rev = 0; /* disable hw-fences */
+
 end:
 	kfree(prop_value);
 	return rc;

+ 103 - 40
msm/sde/sde_hw_catalog.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
@@ -104,8 +104,13 @@
 #define IS_SDE_CP_VER_1_0(version) \
 	(version == SDE_COLOR_PROCESS_VER(0x1, 0x0))
 
+#define SDE_SID_VERSION_2_0_0       0x200
+#define IS_SDE_SID_REV_200(rev) \
+	((rev) == SDE_SID_VERSION_2_0_0)
+
 #define MAX_XIN_COUNT 16
 #define SSPP_SUBBLK_COUNT_MAX 2
+#define MAX_CWB_SESSIONS 1
 
 #define SDE_CTL_CFG_VERSION_1_0_0       0x100
 #define MAX_INTF_PER_CTL_V1                 2
@@ -148,6 +153,7 @@
 #define SDE_UIDLE_VERSION_1_0_0		0x100
 #define SDE_UIDLE_VERSION_1_0_1		0x101
 #define SDE_UIDLE_VERSION_1_0_2		0x102
+#define SDE_UIDLE_VERSION_1_0_3		0x103
 
 #define IS_SDE_UIDLE_REV_100(rev) \
 	((rev) == SDE_UIDLE_VERSION_1_0_0)
@@ -155,6 +161,8 @@
 	((rev) == SDE_UIDLE_VERSION_1_0_1)
 #define IS_SDE_UIDLE_REV_102(rev) \
 	((rev) == SDE_UIDLE_VERSION_1_0_2)
+#define IS_SDE_UIDLE_REV_103(rev) \
+	((rev) == SDE_UIDLE_VERSION_1_0_3)
 
 #define SDE_UIDLE_MAJOR(rev)		((rev) >> 8)
 
@@ -169,6 +177,7 @@ enum {
 	SDE_HW_UBWC_VER_20 = SDE_HW_UBWC_VER(0x200),
 	SDE_HW_UBWC_VER_30 = SDE_HW_UBWC_VER(0x300),
 	SDE_HW_UBWC_VER_40 = SDE_HW_UBWC_VER(0x400),
+	SDE_HW_UBWC_VER_43 = SDE_HW_UBWC_VER(0x431),
 };
 #define IS_UBWC_10_SUPPORTED(rev) \
 		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_10)
@@ -178,6 +187,8 @@ enum {
 		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_30)
 #define IS_UBWC_40_SUPPORTED(rev) \
 		IS_SDE_MAJOR_SAME((rev), SDE_HW_UBWC_VER_40)
+#define IS_UBWC_43_SUPPORTED(rev) \
+		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_43)
 
 /**
  * Supported system cache settings
@@ -188,14 +199,21 @@ enum {
 #define SYS_CACHE_OP_TYPE	BIT(3)
 #define SYS_CACHE_NO_ALLOC	BIT(4)
 
+/* default line padding ratio limitation */
+#define MAX_VPADDING_RATIO_M	93
+#define MAX_VPADDING_RATIO_N	45
+
 /**
  * sde_sys_cache_type: Types of system cache supported
- * SDE_SYS_CACHE_DISP: Static img system cache
- * SDE_SYS_CACHE_MAX:  Maximum number of sys cache users
- * SDE_SYS_CACHE_NONE: Sys cache not used
+ * SDE_SYS_CACHE_DISP: System cache for static display read/write path use case
+ * SDE_SYS_CACHE_DISP_1: System cache for static display write path use case
+ * SDE_SYS_CACHE_DISP_WB: System cache for IWE use case
+ * SDE_SYS_CACHE_MAX:  Maximum number of system cache users
+ * SDE_SYS_CACHE_NONE: System cache not used
  */
 enum sde_sys_cache_type {
 	SDE_SYS_CACHE_DISP,
+	SDE_SYS_CACHE_DISP_1,
 	SDE_SYS_CACHE_DISP_WB,
 	SDE_SYS_CACHE_MAX,
 	SDE_SYS_CACHE_NONE = SDE_SYS_CACHE_MAX
@@ -264,13 +282,11 @@ enum {
  * @SDE_SSPP_SRC             Src and fetch part of the pipes,
  * @SDE_SSPP_SCALER_QSEED2,  QSEED2 algorithm support
  * @SDE_SSPP_SCALER_QSEED3,  QSEED3 alogorithm support
- * @SDE_SSPP_SCALER_RGB,     RGB Scaler, supported by RGB pipes
  * @SDE_SSPP_CSC,            Support of Color space converion
  * @SDE_SSPP_CSC_10BIT,      Support of 10-bit Color space conversion
  * @SDE_SSPP_HSIC,           Global HSIC control
  * @SDE_SSPP_MEMCOLOR        Memory Color Support
  * @SDE_SSPP_PCC,            Color correction support
- * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
  * @SDE_SSPP_EXCL_RECT,      SSPP supports exclusion rect
  * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
  * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
@@ -295,19 +311,19 @@ enum {
  * @SDE_SSPP_FP16_CSC        FP16 CSC color processing block support
  * @SDE_SSPP_FP16_UNMULT     FP16 alpha unmult color processing block support
  * @SDE_SSPP_UBWC_STATS:     Support for ubwc stats
+ * @SDE_SSPP_SCALER_DE_LPF_BLEND:     Support for detail enhancer
+ * @SDE_SSPP_LINE_INSERTION  Line insertion support
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
 	SDE_SSPP_SRC = 0x1,
 	SDE_SSPP_SCALER_QSEED2,
 	SDE_SSPP_SCALER_QSEED3,
-	SDE_SSPP_SCALER_RGB,
 	SDE_SSPP_CSC,
 	SDE_SSPP_CSC_10BIT,
 	SDE_SSPP_HSIC,
 	SDE_SSPP_MEMCOLOR,
 	SDE_SSPP_PCC,
-	SDE_SSPP_CURSOR,
 	SDE_SSPP_EXCL_RECT,
 	SDE_SSPP_SMART_DMA_V1,
 	SDE_SSPP_SMART_DMA_V2,
@@ -332,6 +348,8 @@ enum {
 	SDE_SSPP_FP16_CSC,
 	SDE_SSPP_FP16_UNMULT,
 	SDE_SSPP_UBWC_STATS,
+	SDE_SSPP_SCALER_DE_LPF_BLEND,
+	SDE_SSPP_LINE_INSERTION,
 	SDE_SSPP_MAX
 };
 
@@ -344,6 +362,7 @@ enum {
  * @SDE_PERF_SSPP_CDP             Supports client driven prefetch
  * @SDE_PERF_SSPP_SYS_CACHE,      SSPP supports system cache
  * @SDE_PERF_SSPP_UIDLE,          sspp supports uidle
+ * @SDE_PERF_SSPP_UIDLE_FILL_LVL_SCALE,          sspp supports uidle fill level scaling
  * @SDE_PERF_SSPP_MAX             Maximum value
  */
 enum {
@@ -354,6 +373,7 @@ enum {
 	SDE_PERF_SSPP_CDP,
 	SDE_PERF_SSPP_SYS_CACHE,
 	SDE_PERF_SSPP_UIDLE,
+	SDE_PERF_SSPP_UIDLE_FILL_LVL_SCALE,
 	SDE_PERF_SSPP_MAX
 };
 
@@ -385,6 +405,18 @@ enum {
 	SDE_MIXER_MAX
 };
 
+/**
+ * Destination scalar features
+ * @SDE_DS_DE_LPF_BLEND       DE_LPF blend supports for destination scalar block
+ * @SDE_DS_MERGE_CTRL  	      mode operation support for destination scalar block
+ * @SDE_DS_DE_LPF_MAX         maximum value
+ */
+enum {
+	SDE_DS_DE_LPF_BLEND = 0x1,
+	SDE_DS_MERGE_CTRL,
+	SDE_DS_DE_LPF_MAX
+};
+
 /**
  * DSPP sub-blocks
  * @SDE_DSPP_IGC             DSPP Inverse gamma correction block
@@ -473,6 +505,7 @@ enum {
  * @SDE_DSC_HW_REV_1_1          dsc block supports dsc 1.1 only
  * @SDE_DSC_HW_REV_1_2          dsc block supports dsc 1.1 and 1.2
  * @SDE_DSC_NATIVE_422_EN,      Supports native422 and native420 encoding
+ * @SDE_DSC_REDUCED_OB_MAX,	DSC size is limited to 10k
  * @SDE_DSC_ENC,                DSC encoder sub block
  * @SDE_DSC_CTL,                DSC ctl sub block
  * @SDE_DSC_4HS,                Dedicated DSC 4HS config registers
@@ -483,6 +516,7 @@ enum {
 	SDE_DSC_HW_REV_1_1,
 	SDE_DSC_HW_REV_1_2,
 	SDE_DSC_NATIVE_422_EN,
+	SDE_DSC_REDUCED_OB_MAX,
 	SDE_DSC_ENC,
 	SDE_DSC_CTL,
 	SDE_DSC_4HS,
@@ -523,6 +557,7 @@ enum {
  *                              blocks
  * @SDE_CTL_UIDLE               CTL supports uidle
  * @SDE_CTL_UNIFIED_DSPP_FLUSH  CTL supports only one flush bit for DSPP
+ * @SDE_CTL_HW_FENCE            CTL supports hw fencing
  * @SDE_CTL_MAX
  */
 enum {
@@ -532,6 +567,7 @@ enum {
 	SDE_CTL_ACTIVE_CFG,
 	SDE_CTL_UIDLE,
 	SDE_CTL_UNIFIED_DSPP_FLUSH,
+	SDE_CTL_HW_FENCE,
 	SDE_CTL_MAX
 };
 
@@ -547,6 +583,7 @@ enum {
  * @SDE_INTF_PANEL_VSYNC_TS     INTF block has panel vsync timestamp logged
  * @SDE_INTF_MDP_VSYNC_TS       INTF block has mdp vsync timestamp logged
  * @SDE_INTF_AVR_STATUS         INTF block has AVR_STATUS field in AVR_CONTROL register
+ * @SDE_INTF_WD_JITTER          INTF block has WD timer jitter support
  * @SDE_INTF_MAX
  */
 enum {
@@ -559,6 +596,7 @@ enum {
 	SDE_INTF_PANEL_VSYNC_TS,
 	SDE_INTF_MDP_VSYNC_TS,
 	SDE_INTF_AVR_STATUS,
+	SDE_INTF_WD_JITTER,
 	SDE_INTF_MAX
 };
 
@@ -662,7 +700,6 @@ enum {
  * @SDE_FEATURE_BASE_LAYER     Base Layer supported
  * @SDE_FEATURE_TOUCH_WAKEUP   Early wakeup with touch supported
  * @SDE_FEATURE_SRC_SPLIT      Source split supported
- * @SDE_FEATURE_CURSOR         Cursor supported
  * @SDE_FEATURE_VIG_P010       P010 ViG pipe format supported
  * @SDE_FEATURE_FP16           FP16 pipe format supported
  * @SDE_FEATURE_HDR            High Dynamic Range supported
@@ -678,7 +715,6 @@ enum {
  * @SDE_FEATURE_INLINE_SKIP_THRESHOLD      Skip inline rotation threshold
  * @SDE_FEATURE_DITHER_LUMA_MODE           Dither LUMA mode supported
  * @SDE_FEATURE_RC_LM_FLUSH_OVERRIDE       RC LM flush override supported
- * @SDE_FEATURE_SYSCACHE       System cache supported
  * @SDE_FEATURE_SUI_MISR       SecureUI MISR supported
  * @SDE_FEATURE_SUI_BLENDSTAGE SecureUI Blendstage supported
  * @SDE_FEATURE_SUI_NS_ALLOWED SecureUI allowed to access non-secure context banks
@@ -686,6 +722,9 @@ enum {
  * @SDE_FEATURE_UBWC_STATS     UBWC statistics supported
  * @SDE_FEATURE_VBIF_CLK_SPLIT VBIF clock split supported
  * @SDE_FEATURE_CTL_DONE       Support for CTL DONE irq
+ * @SDE_FEATURE_SYS_CACHE_NSE  Support for no-self-evict feature
+ * @SDE_FEATURE_HW_FENCE_IPCC  HW fence supports ipcc signaling in dpu
+ * @SDE_FEATURE_EMULATED_ENV   Emulated environment supported
  * @SDE_FEATURE_MAX:             MAX features value
  */
 enum sde_mdss_features {
@@ -703,7 +742,6 @@ enum sde_mdss_features {
 	SDE_FEATURE_BASE_LAYER,
 	SDE_FEATURE_TOUCH_WAKEUP,
 	SDE_FEATURE_SRC_SPLIT,
-	SDE_FEATURE_CURSOR,
 	SDE_FEATURE_VIG_P010,
 	SDE_FEATURE_FP16,
 	SDE_FEATURE_HDR,
@@ -719,7 +757,6 @@ enum sde_mdss_features {
 	SDE_FEATURE_INLINE_SKIP_THRESHOLD,
 	SDE_FEATURE_DITHER_LUMA_MODE,
 	SDE_FEATURE_RC_LM_FLUSH_OVERRIDE,
-	SDE_FEATURE_SYSCACHE,
 	SDE_FEATURE_SUI_MISR,
 	SDE_FEATURE_SUI_BLENDSTAGE,
 	SDE_FEATURE_SUI_NS_ALLOWED,
@@ -727,6 +764,9 @@ enum sde_mdss_features {
 	SDE_FEATURE_UBWC_STATS,
 	SDE_FEATURE_VBIF_CLK_SPLIT,
 	SDE_FEATURE_CTL_DONE,
+	SDE_FEATURE_SYS_CACHE_NSE,
+	SDE_FEATURE_HW_FENCE_IPCC,
+	SDE_FEATURE_EMULATED_ENV,
 	SDE_FEATURE_MAX
 };
 
@@ -850,6 +890,7 @@ enum sde_qos_lut_usage {
 	SDE_QOS_LUT_USAGE_CWB_TILE,
 	SDE_QOS_LUT_USAGE_INLINE,
 	SDE_QOS_LUT_USAGE_INLINE_RESTRICTED_FMTS,
+	SDE_QOS_LUT_USAGE_OFFLINE_WB,
 	SDE_QOS_LUT_USAGE_MAX,
 };
 
@@ -863,6 +904,16 @@ enum sde_creq_lut_types {
 	SDE_CREQ_LUT_TYPE_MAX,
 };
 
+/**
+ * enum sde_danger_safe_lut_types - define danger/safe LUT types possible for all use cases
+ * This is second dimension to sde_qos_lut_usage enum.
+ */
+enum sde_danger_safe_lut_types {
+	SDE_DANGER_SAFE_LUT_TYPE_PORTRAIT,
+	SDE_DANGER_SAFE_LUT_TYPE_LANDSCAPE,
+	SDE_DANGER_SAFE_LUT_TYPE_MAX,
+};
+
 /**
  * struct sde_sspp_sub_blks : SSPP sub-blocks
  * @maxlinewidth: max source pipe line width support
@@ -988,12 +1039,14 @@ struct sde_lm_sub_blks {
  * @version: HW Algorithm version.
  * @idx: HW block instance id.
  * @mem_total_size: data memory size.
+ * @min_region_width: minimum region width in pixels.
  */
 struct sde_dspp_rc {
 	SDE_HW_SUBBLK_INFO;
 	u32 version;
 	u32 idx;
 	u32 mem_total_size;
+	u32 min_region_width;
 };
 
 struct sde_dspp_sub_blks {
@@ -1068,18 +1121,12 @@ enum sde_clk_ctrl_type {
 	SDE_CLK_CTRL_VIG2,
 	SDE_CLK_CTRL_VIG3,
 	SDE_CLK_CTRL_VIG4,
-	SDE_CLK_CTRL_RGB0,
-	SDE_CLK_CTRL_RGB1,
-	SDE_CLK_CTRL_RGB2,
-	SDE_CLK_CTRL_RGB3,
 	SDE_CLK_CTRL_DMA0,
 	SDE_CLK_CTRL_DMA1,
 	SDE_CLK_CTRL_DMA2,
 	SDE_CLK_CTRL_DMA3,
 	SDE_CLK_CTRL_DMA4,
 	SDE_CLK_CTRL_DMA5,
-	SDE_CLK_CTRL_CURSOR0,
-	SDE_CLK_CTRL_CURSOR1,
 	SDE_CLK_CTRL_WB0,
 	SDE_CLK_CTRL_WB1,
 	SDE_CLK_CTRL_WB2,
@@ -1089,8 +1136,8 @@ enum sde_clk_ctrl_type {
 };
 
 #define SDE_CLK_CTRL_VALID(x) (x > SDE_CLK_CTRL_NONE && x < SDE_CLK_CTRL_MAX)
-#define SDE_CLK_CTRL_SSPP_VALID(x) (x >= SDE_CLK_CTRL_VIG0 && x <= SDE_CLK_CTRL_CURSOR1)
-#define SDE_CLK_CTRL_WB_VALID(x) (x >= SDE_CLK_CTRL_WB0 && x <= SDE_CLK_CTRL_WB2)
+#define SDE_CLK_CTRL_SSPP_VALID(x) (x >= SDE_CLK_CTRL_VIG0 && x < SDE_CLK_CTRL_WB0)
+#define SDE_CLK_CTRL_WB_VALID(x) (x >= SDE_CLK_CTRL_WB0 && x < SDE_CLK_CTRL_LUTDMA)
 #define SDE_CLK_CTRL_LUTDMA_VALID(x) (x == SDE_CLK_CTRL_LUTDMA)
 #define SDE_CLK_CTRL_IPCC_MSI_VALID(x) (x == SDE_CLK_CTRL_IPCC_MSI)
 
@@ -1104,18 +1151,12 @@ static const char *sde_clk_ctrl_type_s[SDE_CLK_CTRL_MAX] = {
 	[SDE_CLK_CTRL_VIG2] = "VIG2",
 	[SDE_CLK_CTRL_VIG3] = "VIG3",
 	[SDE_CLK_CTRL_VIG4] = "VIG4",
-	[SDE_CLK_CTRL_RGB0] = "RGB0",
-	[SDE_CLK_CTRL_RGB1] = "RGB1",
-	[SDE_CLK_CTRL_RGB2] = "RGB2",
-	[SDE_CLK_CTRL_RGB3] = "RGB3",
 	[SDE_CLK_CTRL_DMA0] = "DMA0",
 	[SDE_CLK_CTRL_DMA1] = "DMA1",
 	[SDE_CLK_CTRL_DMA2] = "DMA2",
 	[SDE_CLK_CTRL_DMA3] = "DMA3",
 	[SDE_CLK_CTRL_DMA4] = "DMA4",
 	[SDE_CLK_CTRL_DMA5] = "DMA5",
-	[SDE_CLK_CTRL_CURSOR0] = "CURSOR0",
-	[SDE_CLK_CTRL_CURSOR1] = "CURSOR1",
 	[SDE_CLK_CTRL_WB0] = "WB0",
 	[SDE_CLK_CTRL_WB1] = "WB1",
 	[SDE_CLK_CTRL_WB2] = "WB2",
@@ -1312,11 +1353,13 @@ struct sde_ds_cfg {
  * @features           bit mask identifying sub-blocks/features
  * @sblk               sub-blocks information
  * @merge_3d_id        merge_3d block id
+ * @dcwb:              ID of DCWB, DCWB_MAX if invalid
  */
 struct sde_pingpong_cfg  {
 	SDE_HW_BLK_INFO;
 	const struct sde_pingpong_sub_blks *sblk;
 	int merge_3d_id;
+	u32 dcwb_id;
 };
 
 /**
@@ -1484,11 +1527,11 @@ struct sde_vbif_dynamic_ot_tbl {
 
 /**
  * struct sde_vbif_qos_tbl - QoS priority table
- * @npriority_lvl      num of priority level
+ * @count              count of entries - rp_remap + lvl_remap entries
  * @priority_lvl       pointer to array of priority level in ascending order
  */
 struct sde_vbif_qos_tbl {
-	u32 npriority_lvl;
+	u32 count;
 	u32 *priority_lvl;
 };
 
@@ -1498,6 +1541,8 @@ struct sde_vbif_qos_tbl {
  * @VBIF_NRT_CLIENT: non-realtime clients like writeback
  * @VBIF_CWB_CLIENT: concurrent writeback client
  * @VBIF_LUTDMA_CLIENT: LUTDMA client
+ * @VBIF_CNOC_CLIENT: HW fence client
+ * @VBIF_OFFLINE_WB_CLIENT: Offline WB client used in 2-pass composition
  * @VBIF_MAX_CLIENT: max number of clients
  */
 enum sde_vbif_client_type {
@@ -1505,6 +1550,8 @@ enum sde_vbif_client_type {
 	VBIF_NRT_CLIENT,
 	VBIF_CWB_CLIENT,
 	VBIF_LUTDMA_CLIENT,
+	VBIF_CNOC_CLIENT,
+	VBIF_OFFLINE_WB_CLIENT,
 	VBIF_MAX_CLIENT
 };
 
@@ -1564,6 +1611,7 @@ struct sde_reg_dma_blk_info {
  * @version            version of lutdma hw blocks
  * @trigger_sel_off    offset to trigger select registers of lutdma
  * @broadcast_disabled flag indicating if broadcast usage should be avoided
+ * @split_vbif_supported indicates if VBIF clock split is supported
  * @xin_id             VBIF xin client-id for LUTDMA
  * @vbif_idx           VBIF id (RT/NRT)
  * @base_off           Base offset of LUTDMA from the MDSS root
@@ -1574,6 +1622,7 @@ struct sde_reg_dma_cfg {
 	u32 version;
 	u32 trigger_sel_off;
 	u32 broadcast_disabled;
+	u32 split_vbif_supported;
 	u32 xin_id;
 	u32 vbif_idx;
 	u32 base_off;
@@ -1603,12 +1652,12 @@ struct sde_perf_cdp_cfg {
 
 /**
  * struct sde_sc_cfg - define system cache configuration
- * @has_sys_cache: true if system cache is enabled
+ * @llcc_uuid: llcc use case id for the system cache
  * @llcc_scid: scid for the system cache
  * @llcc_slice_size: slice size of the system cache
  */
 struct sde_sc_cfg {
-	bool has_sys_cache;
+	int llcc_uid;
 	int llcc_scid;
 	size_t llcc_slice_size;
 };
@@ -1708,9 +1757,12 @@ struct sde_perf_cfg {
  * @qseed_hw_rev        qseed HW block version
  * @smart_dma_rev       smartDMA block version
  * @ctl_rev             control path block version
+ * @sid_rev             SID version
+ * @has_reduced_ob_max indicate if DSC size is limited to 10k
  * @ts_prefill_rev      prefill traffic shaper feature revision
  * @true_inline_rot_rev inline rotator feature revision
  * @dnsc_blur_rev       downscale blur HW block version
+ * @hw_fence_rev        hw fence feature revision
  * @mdss_count          number of valid MDSS HW blocks
  * @mdss                array of pointers to MDSS HW blocks
  * @mdss_hw_block_size  max offset of MDSS_HW block (0 offset), used for debug
@@ -1779,7 +1831,9 @@ struct sde_perf_cfg {
  * @max_dsc_width       max dsc line width
  * @max_mixer_width     max layer mixer line width
  * @max_mixer_blendstages       max layer mixer blend stages (z orders)
+ * @max_cwb             max number of cwb supported
  * @vbif_qos_nlvl       number of vbif QoS priority levels
+ * @qos_target_time_ns  normalized qos target time for line-based qos
  * @macrotile_mode      UBWC parameter for macro tile channel distribution
  * @pipe_order_type     indicates if it is required to specify pipe order
  * @csc_type            csc or csc_10bit support
@@ -1789,9 +1843,9 @@ struct sde_perf_cfg {
  * @perf                performance control settings
  * @uidle_cfg           settings for uidle feature
  * @irq_offset_list     list of sde_intr_irq_offsets to initialize irq table
+ * @has_line_insertion  line insertion support status
  * @features            bitmap of supported SDE_FEATUREs
  * @dma_formats         supported formats for dma pipe
- * @cursor_formats      supported formats for cursor pipe
  * @vig_formats         supported formats for vig pipe
  * @wb_formats          supported formats for wb
  * @virt_vig_formats    supported formats for virtual vig pipe
@@ -1799,6 +1853,7 @@ struct sde_perf_cfg {
  * @inline_rot_restricted_formats       restricted formats for inline rotation
  * @dnsc_blur_filters        supported filters for downscale blur
  * @dnsc_blur_filter_count   supported filter count for downscale blur
+ * @ipcc_protocol_id    ipcc protocol id for the hw
  */
 struct sde_mdss_cfg {
 	/* Block Revisions */
@@ -1809,9 +1864,12 @@ struct sde_mdss_cfg {
 	u32 qseed_hw_rev;
 	u32 smart_dma_rev;
 	u32 ctl_rev;
+	u32 sid_rev;
+	bool has_reduced_ob_max;
 	u32 ts_prefill_rev;
 	u32 true_inline_rot_rev;
 	u32 dnsc_blur_rev;
+	u32 hw_fence_rev;
 
 	/* HW Blocks */
 	u32 mdss_count;
@@ -1886,23 +1944,26 @@ struct sde_mdss_cfg {
 	u32 max_dsc_width;
 	u32 max_mixer_width;
 	u32 max_mixer_blendstages;
+	u32 max_cwb;
 
 	/* Configs */
 	u32 vbif_qos_nlvl;
+	u32 qos_target_time_ns;
 	u32 macrotile_mode;
 	u32 pipe_order_type;
 	u32 csc_type;
 	u32 allowed_dsc_reservation_switch;
 	enum autorefresh_disable_sequence autorefresh_disable_seq;
 	struct sde_sc_cfg sc_cfg[SDE_SYS_CACHE_MAX];
+	DECLARE_BITMAP(sde_sys_cache_type_map, SDE_SYS_CACHE_MAX);
 	struct sde_perf_cfg perf;
 	struct sde_uidle_cfg uidle_cfg;
 	struct list_head irq_offset_list;
 	DECLARE_BITMAP(features, SDE_FEATURE_MAX);
+	bool has_line_insertion;
 
 	/* Supported Pixel Format Lists */
 	struct sde_format_extended *dma_formats;
-	struct sde_format_extended *cursor_formats;
 	struct sde_format_extended *vig_formats;
 	struct sde_format_extended *wb_formats;
 	struct sde_format_extended *virt_vig_formats;
@@ -1910,6 +1971,8 @@ struct sde_mdss_cfg {
 	struct sde_format_extended *inline_rot_restricted_formats;
 	struct sde_dnsc_blur_filter_info *dnsc_blur_filters;
 	u32 dnsc_blur_filter_count;
+
+	u32 ipcc_protocol_id;
 };
 
 struct sde_mdss_hw_cfg_handler {
@@ -1924,9 +1987,7 @@ struct sde_mdss_hw_cfg_handler {
 #define BLK_MDP(s) ((s)->mdp)
 #define BLK_CTL(s) ((s)->ctl)
 #define BLK_VIG(s) ((s)->vig)
-#define BLK_RGB(s) ((s)->rgb)
 #define BLK_DMA(s) ((s)->dma)
-#define BLK_CURSOR(s) ((s)->cursor)
 #define BLK_MIXER(s) ((s)->mixer)
 #define BLK_DSPP(s) ((s)->dspp)
 #define BLK_DS(s) ((s)->ds)
@@ -1939,13 +2000,15 @@ struct sde_mdss_hw_cfg_handler {
 #define BLK_RC(s) ((s)->rc)
 
 /**
- * sde_hw_set_preference: populate the individual hw lm preferences,
- *                        overwrite if exists
- * @sde_cfg:              pointer to sspp cfg
- * @num_lm:               num lms to set preference
- * @disp_type:            is the given display primary/secondary
+ * sde_hw_mixer_set_preference: populate the individual hw lm preferences,
+ *                              overwrite if exists
+ * @sde_cfg:                    pointer to sspp cfg
+ * @num_lm:                     num lms to set preference
+ * @disp_type:                  is the given display primary/secondary
+ *
+ * Return:                      layer mixer mask allocated for the disp_type
  */
-void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
+u32 sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
 		uint32_t disp_type);
 
 /**

+ 1 - 19
msm/sde/sde_hw_catalog_format.h

@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  * Copyright (c) 2015-2019, 2021 The Linux Foundation. All rights reserved.
  */
 
@@ -72,8 +73,6 @@ static const struct sde_format_extended plane_formats_vig[] = {
 	{DRM_FORMAT_NV12, 0},
 	{DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
 	{DRM_FORMAT_NV21, 0},
-	{DRM_FORMAT_NV16, 0},
-	{DRM_FORMAT_NV61, 0},
 	{DRM_FORMAT_VYUY, 0},
 	{DRM_FORMAT_UYVY, 0},
 	{DRM_FORMAT_YUYV, 0},
@@ -88,23 +87,6 @@ static const struct sde_format_extended plane_formats_vig[] = {
 	{0, 0},
 };
 
-static const struct sde_format_extended cursor_formats[] = {
-	{DRM_FORMAT_ARGB8888, 0},
-	{DRM_FORMAT_ABGR8888, 0},
-	{DRM_FORMAT_RGBA8888, 0},
-	{DRM_FORMAT_BGRA8888, 0},
-	{DRM_FORMAT_XRGB8888, 0},
-	{DRM_FORMAT_ARGB1555, 0},
-	{DRM_FORMAT_ABGR1555, 0},
-	{DRM_FORMAT_RGBA5551, 0},
-	{DRM_FORMAT_BGRA5551, 0},
-	{DRM_FORMAT_ARGB4444, 0},
-	{DRM_FORMAT_ABGR4444, 0},
-	{DRM_FORMAT_RGBA4444, 0},
-	{DRM_FORMAT_BGRA4444, 0},
-	{0, 0},
-};
-
 static const struct sde_format_extended wb2_formats[] = {
 	{DRM_FORMAT_RGB565, 0},
 	{DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},

Some files were not shown because too many files changed in this diff