Przeglądaj źródła

Merge remote-tracking branch 'quic/display-kernel.lnx.1.0' into display-kernel.lnx.5.4

* quic/display-kernel.lnx.1.0:
  disp: msm: sde: update shmbridge header file path
  disp: msm: sde: remove all preclose logic
  disp: msm: make kms a DRM client for lastclose logic
  disp: msm: delay msm_drv probe
  disp: msm: sde: use dma_map_single to flush the buffer
  disp: msm: remove support for Cx iPeak mitigation
  disp: msm: rotator: migrated the new BUS driver for rotator on lahaina
  disp: msm: dsi: remove dsi bus scaling setting
  disp: msm: sde: migrated new sde icb bus scaling driver for lahaina
  display: add uapi and shared headers to techpack folder
  disp: msm: sde: remove sde wrapper for clock set flags
  disp: msm: sde: use device tree node to enable INTF TE capability
  drm/msm/sde: add FETCH_ACTIVE logic and set default group ID
  disp: msm: attach address space to msm_gem_object
  disp: msm: sde: refactor sde_hw_interrupts to use offsets from catalog
  disp: msm: sde: get INTF TEAR IRQ offsets from device tree
  disp: msm: sde: rename MDSS_INTR_* enums to SDE_INTR_*
  disp: msm: sde: add Lahaina version checks
  disp: msm: sde: move all hw version checks in to the catalog
  Disp: Snapshot change for lahaina display driver
  disp: add rev checks for bengal target

Change-Id: I0e09d1bcfca50e86deaf4b00ba8c228c003495bd
Alisha Thapaliya 5 lat temu
rodzic
commit
e5d0b99899
63 zmienionych plików z 3109 dodań i 1748 usunięć
  1. 2 0
      include/Kbuild
  2. 5 0
      include/linux/Kbuild
  3. 107 0
      include/linux/sde_io_util.h
  4. 359 0
      include/linux/sde_rsc.h
  5. 4 0
      include/uapi/Kbuild
  6. 5 0
      include/uapi/drm/Kbuild
  7. 573 0
      include/uapi/drm/msm_drm_pp.h
  8. 649 0
      include/uapi/drm/sde_drm.h
  9. 3 0
      include/uapi/media/Kbuild
  10. 122 0
      include/uapi/media/msm_sde_rotator.h
  11. 2 2
      msm/dp/dp_drm.c
  12. 5 7
      msm/dp/dp_drm.h
  13. 2 2
      msm/dp/dp_mst_drm.c
  14. 0 2
      msm/dsi/dsi_clk.h
  15. 0 21
      msm/dsi/dsi_clk_manager.c
  16. 0 47
      msm/dsi/dsi_ctrl.c
  17. 0 12
      msm/dsi/dsi_ctrl.h
  18. 8 7
      msm/dsi/dsi_display.c
  19. 2 2
      msm/dsi/dsi_drm.c
  20. 0 1
      msm/dsi/dsi_drm.h
  21. 0 1
      msm/dsi/dsi_phy.c
  22. 10 3
      msm/msm_atomic.c
  23. 43 137
      msm/msm_drv.c
  24. 15 4
      msm/msm_drv.h
  25. 20 15
      msm/msm_fb.c
  26. 153 32
      msm/msm_gem.c
  27. 7 3
      msm/msm_gem.h
  28. 1 1
      msm/msm_gem_prime.c
  29. 4 5
      msm/msm_gem_vma.c
  30. 4 3
      msm/msm_kms.h
  31. 0 1
      msm/msm_smmu.c
  32. 8 3
      msm/sde/sde_connector.c
  33. 1 1
      msm/sde/sde_connector.h
  34. 4 18
      msm/sde/sde_crtc.c
  35. 2 46
      msm/sde/sde_encoder.c
  36. 11 4
      msm/sde/sde_encoder_phys_cmd.c
  37. 11 10
      msm/sde/sde_encoder_phys_wb.c
  38. 6 3
      msm/sde/sde_formats.c
  39. 178 26
      msm/sde/sde_hw_catalog.c
  40. 75 48
      msm/sde/sde_hw_catalog.h
  41. 19 2
      msm/sde/sde_hw_ctl.c
  42. 224 448
      msm/sde/sde_hw_interrupts.c
  43. 19 0
      msm/sde/sde_hw_interrupts.h
  44. 6 11
      msm/sde/sde_hw_lm.c
  45. 4 7
      msm/sde/sde_hw_sspp.c
  46. 1 3
      msm/sde/sde_hw_vbif.c
  47. 45 163
      msm/sde/sde_kms.c
  48. 3 6
      msm/sde/sde_plane.c
  49. 1 1
      msm/sde/sde_rm.c
  50. 1 0
      msm/sde/sde_wb.c
  51. 101 229
      msm/sde_power_handle.c
  52. 35 32
      msm/sde_power_handle.h
  53. 32 5
      msm/sde_rsc.c
  54. 0 1
      pll/pll_util.c
  55. 42 64
      rotator/sde_rotator_base.c
  56. 17 4
      rotator/sde_rotator_base.h
  57. 115 187
      rotator/sde_rotator_core.c
  58. 27 9
      rotator/sde_rotator_core.h
  59. 0 105
      rotator/sde_rotator_dev.c
  60. 0 1
      rotator/sde_rotator_dev.h
  61. 15 0
      rotator/sde_rotator_r3.c
  62. 1 1
      rotator/sde_rotator_smmu.c
  63. 0 2
      rotator/sde_rotator_util.c

+ 2 - 0
include/Kbuild

@@ -0,0 +1,2 @@
+# Top-level Makefile calls into asm-$(ARCH)
+# List only non-arch directories below

+ 5 - 0
include/linux/Kbuild

@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+
+header-y += sde_io_util.h
+header-y += sde_rsc.h
+

+ 107 - 0
include/linux/sde_io_util.h

@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012, 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	int min_voltage;
+	int max_voltage;
+	int enable_load;
+	int disable_load;
+	int pre_on_sleep;
+	int post_on_sleep;
+	int pre_off_sleep;
+	int post_off_sleep;
+};
+
+struct dss_gpio {
+	unsigned int gpio;
+	unsigned int value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+	unsigned long max_rate;
+};
+
+struct dss_module_power {
+	unsigned int num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned int num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned int num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_single_clk_set_rate(struct dss_clk *clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */

+ 359 - 0
include/linux/sde_rsc.h

@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _SDE_RSC_H_
+#define _SDE_RSC_H_
+
+#include <linux/kernel.h>
+
+/* primary display rsc index */
+#define SDE_RSC_INDEX		0
+
+#define MAX_RSC_CLIENT_NAME_LEN 128
+
+/* DRM Object IDs are numbered excluding 0, use 0 to indicate invalid CRTC */
+#define SDE_RSC_INVALID_CRTC_ID 0
+
+/**
+ * event will be triggered before sde core power collapse,
+ * mdss gdsc is still on
+ */
+#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
+/**
+ * event will be triggered after sde core collapse complete,
+ * mdss gdsc is off now
+ */
+#define SDE_RSC_EVENT_POST_CORE_PC 0x2
+/**
+ * event will be triggered before restoring the sde core from power collapse,
+ * mdss gdsc is still off
+ */
+#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
+/**
+ * event will be triggered after restoring the sde core from power collapse,
+ * mdss gdsc is on now
+ */
+#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
+/**
+ * event attached with solver state enabled
+ * all clients in clk_state or cmd_state
+ */
+#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
+/**
+ * event attached with solver state disabled
+ * one of the client requested for vid state
+ */
+#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
+
+/**
+ * sde_rsc_client_type: sde rsc client type information
+ * SDE_RSC_PRIMARY_DISP_CLIENT:	A primary display client which can request
+ *				vid or cmd state switch.
+ * SDE_RSC_EXTERNAL_DISPLAY_CLIENT:An external display client which can
+ *                              request only clk state switch.
+ * SDE_RSC_CLK_CLIENT:		A clk client request for only rsc clocks
+ *				enabled and mode_2 exit state.
+ */
+enum sde_rsc_client_type {
+	SDE_RSC_PRIMARY_DISP_CLIENT,
+	SDE_RSC_EXTERNAL_DISP_CLIENT,
+	SDE_RSC_CLK_CLIENT,
+	SDE_RSC_INVALID_CLIENT,
+};
+
+/**
+ * sde_rsc_state: sde rsc state information
+ * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
+ *                    pixel or cmd transfer expected. An idle vote from
+ *                    all clients lead to power collapse state.
+ * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
+ *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
+ *                    sde power handle, etc.
+ * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
+ *                    enable the solver mode.
+ * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
+ *                    solver enable because client is fetching data from
+ *                    continuously.
+ */
+enum sde_rsc_state {
+	SDE_RSC_IDLE_STATE,
+	SDE_RSC_CLK_STATE,
+	SDE_RSC_CMD_STATE,
+	SDE_RSC_VID_STATE,
+};
+
+/**
+ * struct sde_rsc_client: stores the rsc client for sde driver
+ * @name:	name of the client
+ * @current_state:   current client state
+ * @crtc_id:		crtc_id associated with this rsc client.
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @id:		Index of client. It will be assigned during client_create call
+ * @client_type: check sde_rsc_client_type information
+ * @list:	list to attach client master list
+ */
+struct sde_rsc_client {
+	char name[MAX_RSC_CLIENT_NAME_LEN];
+	short current_state;
+	int crtc_id;
+	u32 rsc_index;
+	u32 id;
+	enum sde_rsc_client_type client_type;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_event: local event registration entry structure
+ * @cb_func:	Pointer to desired callback function
+ * @usr:	User pointer to pass to callback on event trigger
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @event_type:	refer comments in event_register
+ * @list:	list to attach event master list
+ */
+struct sde_rsc_event {
+	void (*cb_func)(uint32_t event_type, void *usr);
+	void *usr;
+	u32 rsc_index;
+	uint32_t event_type;
+	struct list_head list;
+};
+
+/**
+ * struct sde_rsc_cmd_config: provides panel configuration to rsc
+ * when client is command mode. It is not required to set it during
+ * video mode.
+ *
+ * @fps:	panel te interval
+ * @vtotal:	current vertical total (height + vbp + vfp)
+ * @jitter_numer: panel jitter numerator value. This config causes rsc/solver
+ *                early before te. Default is 0.8% jitter.
+ * @jitter_denom: panel jitter denominator.
+ * @prefill_lines:	max prefill lines based on panel
+ */
+struct sde_rsc_cmd_config {
+	u32 fps;
+	u32 vtotal;
+	u32 jitter_numer;
+	u32 jitter_denom;
+	u32 prefill_lines;
+};
+
+#ifdef CONFIG_DRM_SDE_RSC
+/**
+ * sde_rsc_client_create() - create the client for sde rsc.
+ * Different displays like DSI, HDMI, DP, WB, etc should call this
+ * api to register their vote for rpmh. They still need to vote for
+ * power handle to get the clocks.
+
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @name:	 Caller needs to provide some valid string to identify
+ *               the client. "primary", "dp", "hdmi" are suggested name.
+ * @client_type: check client_type enum for information
+ * @vsync_source: This parameter is only valid for primary display. It provides
+ *               vsync source information
+ *
+ * Return: client node pointer.
+ */
+struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index, char *name,
+	enum sde_rsc_client_type client_type, u32 vsync_source);
+
+/**
+ * sde_rsc_client_destroy() - Destroy the sde rsc client.
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: none
+ */
+void sde_rsc_client_destroy(struct sde_rsc_client *client);
+
+/**
+ * sde_rsc_client_state_update() - rsc client state update
+ * Video mode, cmd mode and clk state are supported as modes. A client need to
+ * set this property during panel time. A switching client can set the
+ * property to change the state
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @state:	 Client state - video/cmd
+ * @config:	 fps, vtotal, porches, etc configuration for command mode
+ *               panel
+ * @crtc_id:	 current client's crtc id
+ * @wait_vblank_crtc_id:	Output parameter. If set to non-zero, rsc hw
+ *				state update requires a wait for one vblank on
+ *				the primary crtc. In that case, this output
+ *				param will be set to the crtc on which to wait.
+ *				If SDE_RSC_INVALID_CRTC_ID, no wait necessary
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id,
+	int *wait_vblank_crtc_id);
+
+/**
+ * sde_rsc_client_get_vsync_refcount() - returns the status of the vsync
+ * refcount, to signal if the client needs to reset the refcounting logic
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: true if the state update has completed.
+ */
+int sde_rsc_client_get_vsync_refcount(
+		struct sde_rsc_client *caller_client);
+
+/**
+ * sde_rsc_client_reset_vsync_refcount() - reduces the refcounting
+ * logic that waits for the vsync.
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: true if the state update has completed.
+ */
+int sde_rsc_client_reset_vsync_refcount(
+		struct sde_rsc_client *caller_client);
+
+/**
+ * sde_rsc_client_is_state_update_complete() - check if state update is complete
+ * RSC state transition is not complete until HW receives VBLANK signal. This
+ * function checks RSC HW to determine whether that signal has been received.
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ *
+ * Return: true if the state update has completed.
+ */
+bool sde_rsc_client_is_state_update_complete(
+		struct sde_rsc_client *caller_client);
+
+/**
+ * sde_rsc_client_vote() - stores ab/ib vote for rsc client
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @bus_id:	 data bus identifier
+ * @ab:		 aggregated bandwidth vote from client.
+ * @ib:		 instant bandwidth vote from client.
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u32 bus_id, u64 ab_vote, u64 ib_vote);
+
+/**
+ * sde_rsc_register_event - register a callback function for an event
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @event_type:  event type to register; client sets 0x3 if it wants
+ *               to register for CORE_PC and CORE_RESTORE - both events.
+ * @cb_func:     Pointer to desired callback function
+ * @usr:         User pointer to pass to callback on event trigger
+ * Returns: sde_rsc_event pointer on success
+ */
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
+
+/**
+ * sde_rsc_unregister_event - unregister callback for an event
+ * @sde_rsc_event: event returned by sde_rsc_register_event
+ */
+void sde_rsc_unregister_event(struct sde_rsc_event *event);
+
+/**
+ * is_sde_rsc_available - check if display rsc available.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: true if rsc is available; false in all other cases
+ */
+bool is_sde_rsc_available(int rsc_index);
+
+/**
+ * get_sde_rsc_current_state - gets the current state of sde rsc.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: current state if rsc available; SDE_RSC_IDLE_STATE for
+ *          all other cases
+ */
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index);
+
+/**
+ * sde_rsc_client_trigger_vote() - triggers ab/ib vote for rsc client
+ *
+ * @client:	 Client pointer provided by sde_rsc_client_create().
+ * @delta_vote:  if bw vote is increased or decreased
+ *
+ * Return: error code.
+ */
+int sde_rsc_client_trigger_vote(struct sde_rsc_client *caller_client,
+	bool delta_vote);
+
+#else
+
+static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
+	char *name, enum sde_rsc_client_type client_type, u32 vsync_source)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_client_destroy(struct sde_rsc_client *client)
+{
+}
+
+static inline int sde_rsc_client_state_update(struct sde_rsc_client *client,
+	enum sde_rsc_state state,
+	struct sde_rsc_cmd_config *config, int crtc_id,
+	int *wait_vblank_crtc_id)
+{
+	return 0;
+}
+
+static inline int sde_rsc_client_get_vsync_refcount(
+		struct sde_rsc_client *caller_client)
+{
+	return 0;
+}
+
+static inline int sde_rsc_client_reset_vsync_refcount(
+		struct sde_rsc_client *caller_client)
+{
+	return 0;
+}
+
+static inline bool sde_rsc_client_is_state_update_complete(
+		struct sde_rsc_client *caller_client)
+{
+	return false;
+}
+
+static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client,
+	u32 bus_id, u64 ab_vote, u64 ib_vote)
+{
+	return 0;
+}
+
+static inline struct sde_rsc_event *sde_rsc_register_event(int rsc_index,
+		uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	return NULL;
+}
+
+static inline void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+}
+
+static inline bool is_sde_rsc_available(int rsc_index)
+{
+	return false;
+}
+
+static inline enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	return SDE_RSC_IDLE_STATE;
+}
+
+static inline int sde_rsc_client_trigger_vote(
+	struct sde_rsc_client *caller_client, bool delta_vote)
+{
+	return 0;
+}
+#endif /* CONFIG_DRM_SDE_RSC */
+
+#endif /* _SDE_RSC_H_ */

+ 4 - 0
include/uapi/Kbuild

@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+
+header-y += media/
+header-y += drm/

+ 5 - 0
include/uapi/drm/Kbuild

@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+
+header-y += msm_drm_pp.h
+header-y += sde_drm.h
+

+ 573 - 0
include/uapi/drm/msm_drm_pp.h

@@ -0,0 +1,573 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _MSM_DRM_PP_H_
+#define _MSM_DRM_PP_H_
+
+#include <linux/types.h>
+/**
+ * struct drm_msm_pcc_coeff - PCC coefficient structure for each color
+ *                            component.
+ * @c: constant coefficient.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ * @rg: red green coefficient.
+ * @gb: green blue coefficient.
+ * @rb: red blue coefficient.
+ * @rgb: red blue green coefficient.
+ */
+
+struct drm_msm_pcc_coeff {
+	__u32 c;
+	__u32 r;
+	__u32 g;
+	__u32 b;
+	__u32 rg;
+	__u32 gb;
+	__u32 rb;
+	__u32 rgb;
+};
+
+/**
+ * struct drm_msm_pcc - pcc feature structure
+ * @flags: for customizing operations
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ * @r_rr: second order coefficients
+ * @r_gg: second order coefficients
+ * @r_bb: second order coefficients
+ * @g_rr: second order coefficients
+ * @g_gg: second order coefficients
+ * @g_bb: second order coefficients
+ * @b_rr: second order coefficients
+ * @b_gg: second order coefficients
+ * @b_bb: second order coefficients
+ */
+#define DRM_MSM_PCC3
+struct drm_msm_pcc {
+	__u64 flags;
+	struct drm_msm_pcc_coeff r;
+	struct drm_msm_pcc_coeff g;
+	struct drm_msm_pcc_coeff b;
+	__u32 r_rr;
+	__u32 r_gg;
+	__u32 r_bb;
+	__u32 g_rr;
+	__u32 g_gg;
+	__u32 g_bb;
+	__u32 b_rr;
+	__u32 b_gg;
+	__u32 b_bb;
+};
+
+/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
+ * flags: for customizing vlut operation
+ * val: vLUT values
+ */
+#define PA_VLUT_SIZE 256
+struct drm_msm_pa_vlut {
+	__u64 flags;
+	__u32 val[PA_VLUT_SIZE];
+};
+
+#define PA_HSIC_HUE_ENABLE (1 << 0)
+#define PA_HSIC_SAT_ENABLE (1 << 1)
+#define PA_HSIC_VAL_ENABLE (1 << 2)
+#define PA_HSIC_CONT_ENABLE (1 << 3)
+/**
+ * struct drm_msm_pa_hsic - pa hsic feature structure
+ * @flags: flags for the feature customization, values can be:
+ *         - PA_HSIC_HUE_ENABLE: Enable hue adjustment
+ *         - PA_HSIC_SAT_ENABLE: Enable saturation adjustment
+ *         - PA_HSIC_VAL_ENABLE: Enable value adjustment
+ *         - PA_HSIC_CONT_ENABLE: Enable contrast adjustment
+ *
+ * @hue: hue setting
+ * @saturation: saturation setting
+ * @value: value setting
+ * @contrast: contrast setting
+ */
+#define DRM_MSM_PA_HSIC
+struct drm_msm_pa_hsic {
+	__u64 flags;
+	__u32 hue;
+	__u32 saturation;
+	__u32 value;
+	__u32 contrast;
+};
+
+#define MEMCOL_PROT_HUE (1 << 0)
+#define MEMCOL_PROT_SAT (1 << 1)
+#define MEMCOL_PROT_VAL (1 << 2)
+#define MEMCOL_PROT_CONT (1 << 3)
+#define MEMCOL_PROT_SIXZONE (1 << 4)
+#define MEMCOL_PROT_BLEND (1 << 5)
+/* struct drm_msm_memcol - Memory color feature structure.
+ *                         Skin, sky, foliage features are supported.
+ * @prot_flags: Bit mask for enabling protection feature.
+ * @color_adjust_p0: Adjustment curve.
+ * @color_adjust_p1: Adjustment curve.
+ * @color_adjust_p2: Adjustment curve.
+ * @blend_gain: Blend gain weightage from othe PA features.
+ * @sat_hold: Saturation hold value.
+ * @val_hold: Value hold info.
+ * @hue_region: Hue qualifier.
+ * @sat_region: Saturation qualifier.
+ * @val_region: Value qualifier.
+ */
+#define DRM_MSM_MEMCOL
+struct drm_msm_memcol {
+	__u64 prot_flags;
+	__u32 color_adjust_p0;
+	__u32 color_adjust_p1;
+	__u32 color_adjust_p2;
+	__u32 blend_gain;
+	__u32 sat_hold;
+	__u32 val_hold;
+	__u32 hue_region;
+	__u32 sat_region;
+	__u32 val_region;
+};
+
+#define DRM_MSM_SIXZONE
+#define SIXZONE_LUT_SIZE 384
+#define SIXZONE_HUE_ENABLE (1 << 0)
+#define SIXZONE_SAT_ENABLE (1 << 1)
+#define SIXZONE_VAL_ENABLE (1 << 2)
+/* struct drm_msm_sixzone_curve - Sixzone HSV adjustment curve structure.
+ * @p0: Hue adjustment.
+ * @p1: Saturation/Value adjustment.
+ */
+struct drm_msm_sixzone_curve {
+	__u32 p1;
+	__u32 p0;
+};
+
+/* struct drm_msm_sixzone - Sixzone feature structure.
+ * @flags: for feature customization, values can be:
+ *         - SIXZONE_HUE_ENABLE: Enable hue adjustment
+ *         - SIXZONE_SAT_ENABLE: Enable saturation adjustment
+ *         - SIXZONE_VAL_ENABLE: Enable value adjustment
+ * @threshold: threshold qualifier.
+ * @adjust_p0: Adjustment curve.
+ * @adjust_p1: Adjustment curve.
+ * @sat_hold: Saturation hold info.
+ * @val_hold: Value hold info.
+ * @curve: HSV adjustment curve lut.
+ */
+struct drm_msm_sixzone {
+	__u64 flags;
+	__u32 threshold;
+	__u32 adjust_p0;
+	__u32 adjust_p1;
+	__u32 sat_hold;
+	__u32 val_hold;
+	struct drm_msm_sixzone_curve curve[SIXZONE_LUT_SIZE];
+};
+
+#define GAMUT_3D_MODE_17 1
+#define GAMUT_3D_MODE_5 2
+#define GAMUT_3D_MODE_13 3
+
+#define GAMUT_3D_MODE17_TBL_SZ 1229
+#define GAMUT_3D_MODE5_TBL_SZ 32
+#define GAMUT_3D_MODE13_TBL_SZ 550
+#define GAMUT_3D_SCALE_OFF_SZ 16
+#define GAMUT_3D_SCALEB_OFF_SZ 12
+#define GAMUT_3D_TBL_NUM 4
+#define GAMUT_3D_SCALE_OFF_TBL_NUM 3
+#define GAMUT_3D_MAP_EN (1 << 0)
+
+/**
+ * struct drm_msm_3d_col - 3d gamut color component structure
+ * @c0: Holds c0 value
+ * @c2_c1: Holds c2/c1 values
+ */
+struct drm_msm_3d_col {
+	__u32 c2_c1;
+	__u32 c0;
+};
+/**
+ * struct drm_msm_3d_gamut - 3d gamut feature structure
+ * @flags: flags for the feature values are:
+ *         0 - no map
+ *         GAMUT_3D_MAP_EN - enable map
+ * @mode: lut mode can take following values:
+ *        - GAMUT_3D_MODE_17
+ *        - GAMUT_3D_MODE_5
+ *        - GAMUT_3D_MODE_13
+ * @scale_off: Scale offset table
+ * @col: Color component tables
+ */
+struct drm_msm_3d_gamut {
+	__u64 flags;
+	__u32 mode;
+	__u32 scale_off[GAMUT_3D_SCALE_OFF_TBL_NUM][GAMUT_3D_SCALE_OFF_SZ];
+	struct drm_msm_3d_col col[GAMUT_3D_TBL_NUM][GAMUT_3D_MODE17_TBL_SZ];
+};
+
+#define PGC_TBL_LEN 512
+#define PGC_8B_ROUND (1 << 0)
+/**
+ * struct drm_msm_pgc_lut - pgc lut feature structure
+ * @flags: flags for the featue values can be:
+ *         - PGC_8B_ROUND
+ * @c0: color0 component lut
+ * @c1: color1 component lut
+ * @c2: color2 component lut
+ */
+struct drm_msm_pgc_lut {
+	__u64 flags;
+	__u32 c0[PGC_TBL_LEN];
+	__u32 c1[PGC_TBL_LEN];
+	__u32 c2[PGC_TBL_LEN];
+};
+
+#define IGC_TBL_LEN 256
+#define IGC_DITHER_ENABLE (1 << 0)
+/**
+ * struct drm_msm_igc_lut - igc lut feature structure
+ * @flags: flags for the feature customization, values can be:
+ *             - IGC_DITHER_ENABLE: Enable dither functionality
+ * @c0: color0 component lut
+ * @c1: color1 component lut
+ * @c2: color2 component lut
+ * @strength: dither strength, considered valid when IGC_DITHER_ENABLE
+ *            is set in flags. Strength value based on source bit width.
+ * @c0_last: color0 lut_last component
+ * @c1_last: color1 lut_last component
+ * @c2_last: color2 lut_last component
+ */
+struct drm_msm_igc_lut {
+	__u64 flags;
+	__u32 c0[IGC_TBL_LEN];
+	__u32 c1[IGC_TBL_LEN];
+	__u32 c2[IGC_TBL_LEN];
+	__u32 strength;
+	__u32 c0_last;
+	__u32 c1_last;
+	__u32 c2_last;
+};
+#define LAST_LUT 2
+
+#define HIST_V_SIZE 256
+/**
+ * struct drm_msm_hist - histogram feature structure
+ * @flags: for customizing operations
+ * @data: histogram data
+ */
+struct drm_msm_hist {
+	__u64 flags;
+	__u32 data[HIST_V_SIZE];
+};
+
+#define AD4_LUT_GRP0_SIZE 33
+#define AD4_LUT_GRP1_SIZE 32
+/*
+ * struct drm_msm_ad4_init - ad4 init structure set by user-space client.
+ *                           Init param values can change based on tuning
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_init {
+	__u32 init_param_001[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_002[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_003[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_004[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_005[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_006[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_007[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_008[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_009;
+	__u32 init_param_010;
+	__u32 init_param_011;
+	__u32 init_param_012;
+	__u32 init_param_013;
+	__u32 init_param_014;
+	__u32 init_param_015;
+	__u32 init_param_016;
+	__u32 init_param_017;
+	__u32 init_param_018;
+	__u32 init_param_019;
+	__u32 init_param_020;
+	__u32 init_param_021;
+	__u32 init_param_022;
+	__u32 init_param_023;
+	__u32 init_param_024;
+	__u32 init_param_025;
+	__u32 init_param_026;
+	__u32 init_param_027;
+	__u32 init_param_028;
+	__u32 init_param_029;
+	__u32 init_param_030;
+	__u32 init_param_031;
+	__u32 init_param_032;
+	__u32 init_param_033;
+	__u32 init_param_034;
+	__u32 init_param_035;
+	__u32 init_param_036;
+	__u32 init_param_037;
+	__u32 init_param_038;
+	__u32 init_param_039;
+	__u32 init_param_040;
+	__u32 init_param_041;
+	__u32 init_param_042;
+	__u32 init_param_043;
+	__u32 init_param_044;
+	__u32 init_param_045;
+	__u32 init_param_046;
+	__u32 init_param_047;
+	__u32 init_param_048;
+	__u32 init_param_049;
+	__u32 init_param_050;
+	__u32 init_param_051;
+	__u32 init_param_052;
+	__u32 init_param_053;
+	__u32 init_param_054;
+	__u32 init_param_055;
+	__u32 init_param_056;
+	__u32 init_param_057;
+	__u32 init_param_058;
+	__u32 init_param_059;
+	__u32 init_param_060;
+	__u32 init_param_061;
+	__u32 init_param_062;
+	__u32 init_param_063;
+	__u32 init_param_064;
+	__u32 init_param_065;
+	__u32 init_param_066;
+	__u32 init_param_067;
+	__u32 init_param_068;
+	__u32 init_param_069;
+	__u32 init_param_070;
+	__u32 init_param_071;
+	__u32 init_param_072;
+	__u32 init_param_073;
+	__u32 init_param_074;
+	__u32 init_param_075;
+};
+
+/*
+ * struct drm_msm_ad4_cfg - ad4 config structure set by user-space client.
+ *                           Config param values can vary based on tuning,
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_cfg {
+	__u32 cfg_param_001;
+	__u32 cfg_param_002;
+	__u32 cfg_param_003;
+	__u32 cfg_param_004;
+	__u32 cfg_param_005;
+	__u32 cfg_param_006;
+	__u32 cfg_param_007;
+	__u32 cfg_param_008;
+	__u32 cfg_param_009;
+	__u32 cfg_param_010;
+	__u32 cfg_param_011;
+	__u32 cfg_param_012;
+	__u32 cfg_param_013;
+	__u32 cfg_param_014;
+	__u32 cfg_param_015;
+	__u32 cfg_param_016;
+	__u32 cfg_param_017;
+	__u32 cfg_param_018;
+	__u32 cfg_param_019;
+	__u32 cfg_param_020;
+	__u32 cfg_param_021;
+	__u32 cfg_param_022;
+	__u32 cfg_param_023;
+	__u32 cfg_param_024;
+	__u32 cfg_param_025;
+	__u32 cfg_param_026;
+	__u32 cfg_param_027;
+	__u32 cfg_param_028;
+	__u32 cfg_param_029;
+	__u32 cfg_param_030;
+	__u32 cfg_param_031;
+	__u32 cfg_param_032;
+	__u32 cfg_param_033;
+	__u32 cfg_param_034;
+	__u32 cfg_param_035;
+	__u32 cfg_param_036;
+	__u32 cfg_param_037;
+	__u32 cfg_param_038;
+	__u32 cfg_param_039;
+	__u32 cfg_param_040;
+	__u32 cfg_param_041;
+	__u32 cfg_param_042;
+	__u32 cfg_param_043;
+	__u32 cfg_param_044;
+	__u32 cfg_param_045;
+	__u32 cfg_param_046;
+	__u32 cfg_param_047;
+	__u32 cfg_param_048;
+	__u32 cfg_param_049;
+	__u32 cfg_param_050;
+	__u32 cfg_param_051;
+	__u32 cfg_param_052;
+	__u32 cfg_param_053;
+};
+
+#define DITHER_MATRIX_SZ 16
+
+/**
+ * struct drm_msm_dither - dither feature structure
+ * @flags: for customizing operations
+ * @temporal_en: temperal dither enable
+ * @c0_bitdepth: c0 component bit depth
+ * @c1_bitdepth: c1 component bit depth
+ * @c2_bitdepth: c2 component bit depth
+ * @c3_bitdepth: c2 component bit depth
+ * @matrix: dither strength matrix
+ */
+struct drm_msm_dither {
+	__u64 flags;
+	__u32 temporal_en;
+	__u32 c0_bitdepth;
+	__u32 c1_bitdepth;
+	__u32 c2_bitdepth;
+	__u32 c3_bitdepth;
+	__u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
+ * struct drm_msm_pa_dither - dspp dither feature structure
+ * @flags: for customizing operations
+ * @strength: dither strength
+ * @offset_en: offset enable bit
+ * @matrix: dither data matrix
+ */
+#define DRM_MSM_PA_DITHER
+struct drm_msm_pa_dither {
+	__u64 flags;
+	__u32 strength;
+	__u32 offset_en;
+	__u32 matrix[DITHER_MATRIX_SZ];
+};
+
+/**
+ * struct drm_msm_ad4_roi_cfg - ad4 roi params config set
+ * by user-space client.
+ * @h_x - hotizontal direction start
+ * @h_y - hotizontal direction end
+ * @v_x - vertical direction start
+ * @v_y - vertical direction end
+ * @factor_in - the alpha value for inside roi region
+ * @factor_out - the alpha value for outside roi region
+ */
+#define DRM_MSM_AD4_ROI
+struct drm_msm_ad4_roi_cfg {
+	__u32 h_x;
+	__u32 h_y;
+	__u32 v_x;
+	__u32 v_y;
+	__u32 factor_in;
+	__u32 factor_out;
+};
+
+#define LTM_FEATURE_DEF 1
+#define LTM_DATA_SIZE_0 32
+#define LTM_DATA_SIZE_1 128
+#define LTM_DATA_SIZE_2 256
+#define LTM_DATA_SIZE_3 33
+#define LTM_BUFFER_SIZE 5
+#define LTM_GUARD_BYTES 255
+#define LTM_BLOCK_SIZE 2
+
+#define LTM_STATS_SAT (1 << 1)
+#define LTM_STATS_MERGE_SAT (1 << 2)
+
+/*
+ * struct drm_msm_ltm_stats_data - LTM stats data structure
+ */
+struct drm_msm_ltm_stats_data {
+	__u32 stats_01[LTM_DATA_SIZE_0][LTM_DATA_SIZE_1];
+	__u32 stats_02[LTM_DATA_SIZE_2];
+	__u32 stats_03[LTM_DATA_SIZE_0];
+	__u32 stats_04[LTM_DATA_SIZE_0];
+	__u32 stats_05[LTM_DATA_SIZE_0];
+	__u32 status_flag;
+	__u32 display_h;
+	__u32 display_v;
+	__u32 init_h[LTM_BLOCK_SIZE];
+	__u32 init_v;
+	__u32 inc_h;
+	__u32 inc_v;
+	__u32 portrait_en;
+	__u32 merge_en;
+	__u32 cfg_param_01;
+	__u32 cfg_param_02;
+	__u32 cfg_param_03;
+	__u32 cfg_param_04;
+};
+
+/*
+ * struct drm_msm_ltm_init_param - LTM init param structure
+ */
+struct drm_msm_ltm_init_param {
+	__u32 init_param_01;
+	__u32 init_param_02;
+	__u32 init_param_03;
+	__u32 init_param_04;
+};
+
+/*
+ * struct drm_msm_ltm_cfg_param - LTM config param structure
+ */
+struct  drm_msm_ltm_cfg_param {
+	__u32 cfg_param_01;
+	__u32 cfg_param_02;
+	__u32 cfg_param_03;
+	__u32 cfg_param_04;
+	__u32 cfg_param_05;
+	__u32 cfg_param_06;
+};
+
+/*
+ * struct drm_msm_ltm_data - LTM data structure
+ */
+struct drm_msm_ltm_data {
+	__u32 data[LTM_DATA_SIZE_0][LTM_DATA_SIZE_3];
+};
+
+/*
+ * struct drm_msm_ltm_buffers_crtl - LTM buffer control structure.
+ *                                   This struct will be used to init and
+ *                                   de-init the LTM buffers in driver.
+ * @num_of_buffers: valid number of buffers used
+ * @fds: fd array to for all the valid buffers
+ */
+struct drm_msm_ltm_buffers_ctrl {
+	__u32 num_of_buffers;
+	__u32 fds[LTM_BUFFER_SIZE];
+};
+
+/*
+ * struct drm_msm_ltm_buffer - LTM buffer structure.
+ *                             This struct will be passed from driver to user
+ *                             space for LTM stats data notification.
+ * @fd: fd assicated with the buffer that has LTM stats data
+ * @offset: offset from base address that used for alignment
+ * @status status flag for error indication
+ */
+struct drm_msm_ltm_buffer {
+	__u32 fd;
+	__u32 offset;
+	__u32 status;
+};
+
+/**
+ * struct drm_msm_ad4_manual_str_cfg - ad4 manual strength config set
+ * by user-space client.
+ * @in_str - strength for inside roi region
+ * @out_str - strength for outside roi region
+ */
+#define DRM_MSM_AD4_MANUAL_STRENGTH
+struct drm_msm_ad4_manual_str_cfg {
+	__u32 in_str;
+	__u32 out_str;
+};
+#endif /* _MSM_DRM_PP_H_ */

+ 649 - 0
include/uapi/drm/sde_drm.h

@@ -0,0 +1,649 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _SDE_DRM_H_
+#define _SDE_DRM_H_
+
+#include <drm/drm.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Total number of supported color planes */
+#define SDE_MAX_PLANES  4
+
+/* Total number of parameterized detail enhancer mapping curves */
+#define SDE_MAX_DE_CURVES 3
+
+ /* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+#define FILTER_ALPHA_2D			0x3
+
+/* Blend filters */
+#define FILTER_BLEND_CIRCULAR_2D	0x0
+#define FILTER_BLEND_SEPARABLE_1D	0x1
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/**
+ * Blend operations for "blend_op" property
+ *
+ * @SDE_DRM_BLEND_OP_NOT_DEFINED:   No blend operation defined for the layer.
+ * @SDE_DRM_BLEND_OP_OPAQUE:        Apply a constant blend operation. The layer
+ *                                  would appear opaque in case fg plane alpha
+ *                                  is 0xff.
+ * @SDE_DRM_BLEND_OP_PREMULTIPLIED: Apply source over blend rule. Layer already
+ *                                  has alpha pre-multiplication done. If the fg
+ *                                  plane alpha is less than 0xff, apply
+ *                                  modulation as well. This operation is
+ *                                  intended on layers having alpha channel.
+ * @SDE_DRM_BLEND_OP_COVERAGE:      Apply source over blend rule. Layer is not
+ *                                  alpha pre-multiplied. Apply
+ *                                  pre-multiplication. If fg plane alpha is
+ *                                  less than 0xff, apply modulation as well.
+ * @SDE_DRM_BLEND_OP_MAX:           Used to track maximum blend operation
+ *                                  possible by mdp.
+ */
+#define SDE_DRM_BLEND_OP_NOT_DEFINED    0
+#define SDE_DRM_BLEND_OP_OPAQUE         1
+#define SDE_DRM_BLEND_OP_PREMULTIPLIED  2
+#define SDE_DRM_BLEND_OP_COVERAGE       3
+#define SDE_DRM_BLEND_OP_MAX            4
+
+/**
+ * Bit masks for "src_config" property
+ * construct bitmask via (1UL << SDE_DRM_<flag>)
+ */
+#define SDE_DRM_DEINTERLACE         0   /* Specifies interlaced input */
+
+/* DRM bitmasks are restricted to 0..63 */
+#define SDE_DRM_BITMASK_COUNT       64
+
+/**
+ * Framebuffer modes for "fb_translation_mode" PLANE and CONNECTOR property
+ *
+ * @SDE_DRM_FB_NON_SEC:          IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ *                               This is the default mode of all frambuffers.
+ * @SDE_DRM_FB_SEC:              IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               both stage I and stage II translations when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode
+ *                               is non-secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+ * @SDE_DRM_FB_SEC_DIR_TRANS:    IOMMU configuration for this framebuffer mode
+ *                               is secure domain and requires
+ *                               only stage II translation when
+ *                               this buffer is accessed by the display HW.
+ */
+
+#define SDE_DRM_FB_NON_SEC              0
+#define SDE_DRM_FB_SEC                  1
+#define SDE_DRM_FB_NON_SEC_DIR_TRANS    2
+#define SDE_DRM_FB_SEC_DIR_TRANS        3
+
+/**
+ * Secure levels for "security_level" CRTC property.
+ *                        CRTC property which specifies what plane types
+ *                        can be attached to this CRTC. Plane component
+ *                        derives the plane type based on the FB_MODE.
+ * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be
+ *                        attached to this CRTC. This is the default state of
+ *                        the CRTC.
+ * @ SDE_DRM_SEC_ONLY:    Only secure planes can be added to this CRTC. If a
+ *                        CRTC is instructed to be in this mode it follows the
+ *                        platform dependent restrictions.
+ */
+#define SDE_DRM_SEC_NON_SEC            0
+#define SDE_DRM_SEC_ONLY               1
+
+/**
+ * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch:       Number of extra pixels to overfetch from left
+ * @right_ftch:      Number of extra pixels to overfetch from right
+ * @top_ftch:        Number of extra lines to overfetch from top
+ * @btm_ftch:        Number of extra lines to overfetch from bottom
+ * @left_rpt:        Number of extra pixels to repeat from left
+ * @right_rpt:       Number of extra pixels to repeat from right
+ * @top_rpt:         Number of extra lines to repeat from top
+ * @btm_rpt:         Number of extra lines to repeat from bottom
+ */
+struct sde_drm_pix_ext_v1 {
+	/*
+	 * Number of pixels ext in left, right, top and bottom direction
+	 * for all color components.
+	 */
+	int32_t num_ext_pxls_lr[SDE_MAX_PLANES];
+	int32_t num_ext_pxls_tb[SDE_MAX_PLANES];
+
+	/*
+	 * Number of pixels needs to be overfetched in left, right, top
+	 * and bottom directions from source image for scaling.
+	 */
+	int32_t left_ftch[SDE_MAX_PLANES];
+	int32_t right_ftch[SDE_MAX_PLANES];
+	int32_t top_ftch[SDE_MAX_PLANES];
+	int32_t btm_ftch[SDE_MAX_PLANES];
+	/*
+	 * Number of pixels needs to be repeated in left, right, top and
+	 * bottom directions for scaling.
+	 */
+	int32_t left_rpt[SDE_MAX_PLANES];
+	int32_t right_rpt[SDE_MAX_PLANES];
+	int32_t top_rpt[SDE_MAX_PLANES];
+	int32_t btm_rpt[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_drm_scaler_v1 - version 1 of struct sde_drm_scaler
+ * @lr:            Pixel extension settings for left/right
+ * @tb:            Pixel extension settings for top/botton
+ * @init_phase_x:  Initial scaler phase values for x
+ * @phase_step_x:  Phase step values for x
+ * @init_phase_y:  Initial scaler phase values for y
+ * @phase_step_y:  Phase step values for y
+ * @horz_filter:   Horizontal filter array
+ * @vert_filter:   Vertical filter array
+ */
+struct sde_drm_scaler_v1 {
+	/*
+	 * Pix ext settings
+	 */
+	struct sde_drm_pix_ext_v1 pe;
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	/*
+	 * Filter type to be used for scaling in horizontal and vertical
+	 * directions
+	 */
+	uint32_t horz_filter[SDE_MAX_PLANES];
+	uint32_t vert_filter[SDE_MAX_PLANES];
+};
+
+/**
+ * struct sde_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable:         Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip:           Clip coefficient
+ * @limit:          Detail enhancer limit factor
+ * @thr_quiet:      Quite zone threshold
+ * @thr_dieout:     Die-out zone threshold
+ * @thr_low:        Linear zone left threshold
+ * @thr_high:       Linear zone right threshold
+ * @prec_shift:     Detail enhancer precision
+ * @adjust_a:       Mapping curves A coefficients
+ * @adjust_b:       Mapping curves B coefficients
+ * @adjust_c:       Mapping curves C coefficients
+ */
+struct sde_drm_de_v1 {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/*
+ * Scaler configuration flags
+ */
+
+/* Disable dynamic expansion */
+#define SDE_DYN_EXP_DISABLE 0x1
+
+#define SDE_DRM_QSEED3LITE
+#define SDE_DRM_QSEED4
+
+/**
+ * struct sde_drm_scaler_v2 - version 2 of struct sde_drm_scaler
+ * @enable:            Scaler enable
+ * @dir_en:            Detail enhancer enable
+ * @pe:                Pixel extension settings
+ * @horz_decimate:     Horizontal decimation factor
+ * @vert_decimate:     Vertical decimation factor
+ * @init_phase_x:      Initial scaler phase values for x
+ * @phase_step_x:      Phase step values for x
+ * @init_phase_y:      Initial scaler phase values for y
+ * @phase_step_y:      Phase step values for y
+ * @preload_x:         Horizontal preload value
+ * @preload_y:         Vertical preload value
+ * @src_width:         Source width
+ * @src_height:        Source height
+ * @dst_width:         Destination width
+ * @dst_height:        Destination height
+ * @y_rgb_filter_cfg:  Y/RGB plane filter configuration
+ * @uv_filter_cfg:     UV plane filter configuration
+ * @alpha_filter_cfg:  Alpha filter configuration
+ * @blend_cfg:         Selection of blend coefficients
+ * @lut_flag:          LUT configuration flags
+ * @dir_lut_idx:       2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx:    UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx:    UV separable LUT index
+ * @de:                Detail enhancer settings
+ * @dir_weight:        Directional Weight
+ * @unsharp_mask_blend: Unsharp Blend Filter Ratio
+ * @de_blend:          Ratio of two unsharp mask filters
+ * @flags:             Scaler configuration flags
+ */
+struct sde_drm_scaler_v2 {
+	/*
+	 * General definitions
+	 */
+	uint32_t enable;
+	uint32_t dir_en;
+
+	/*
+	 * Pix ext settings
+	 */
+	struct sde_drm_pix_ext_v1 pe;
+
+	/*
+	 * Decimation settings
+	 */
+	uint32_t horz_decimate;
+	uint32_t vert_decimate;
+
+	/*
+	 * Phase settings
+	 */
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	uint32_t preload_x[SDE_MAX_PLANES];
+	uint32_t preload_y[SDE_MAX_PLANES];
+	uint32_t src_width[SDE_MAX_PLANES];
+	uint32_t src_height[SDE_MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_drm_de_v1 de;
+	uint32_t dir_weight;
+	uint32_t unsharp_mask_blend;
+	uint32_t de_blend;
+	uint32_t flags;
+};
+
+/* Number of dest scalers supported */
+#define SDE_MAX_DS_COUNT 2
+
+/*
+ * Destination scaler flag config
+ */
+#define SDE_DRM_DESTSCALER_ENABLE           0x1
+#define SDE_DRM_DESTSCALER_SCALE_UPDATE     0x2
+#define SDE_DRM_DESTSCALER_ENHANCER_UPDATE  0x4
+#define SDE_DRM_DESTSCALER_PU_ENABLE        0x8
+
+/**
+ * struct sde_drm_dest_scaler_cfg - destination scaler config structure
+ * @flags:      Flag to switch between mode for destination scaler
+ *              refer to destination scaler flag config
+ * @index:      Destination scaler selection index
+ * @lm_width:   Layer mixer width configuration
+ * @lm_height:  Layer mixer height configuration
+ * @scaler_cfg: The scaling parameters for all the mode except disable
+ *              Userspace pointer to struct sde_drm_scaler_v2
+ */
+struct sde_drm_dest_scaler_cfg {
+	uint32_t flags;
+	uint32_t index;
+	uint32_t lm_width;
+	uint32_t lm_height;
+	uint64_t scaler_cfg;
+};
+
+/**
+ * struct sde_drm_dest_scaler_data - destination scaler data struct
+ * @num_dest_scaler: Number of dest scalers to be configured
+ * @ds_cfg:          Destination scaler block configuration
+ */
+struct sde_drm_dest_scaler_data {
+	uint32_t num_dest_scaler;
+	struct sde_drm_dest_scaler_cfg ds_cfg[SDE_MAX_DS_COUNT];
+};
+
+/*
+ * Define constants for struct sde_drm_csc
+ */
+#define SDE_CSC_MATRIX_COEFF_SIZE   9
+#define SDE_CSC_CLAMP_SIZE          6
+#define SDE_CSC_BIAS_SIZE           3
+
+/**
+ * struct sde_drm_csc_v1 - version 1 of struct sde_drm_csc
+ * @ctm_coeff:          Matrix coefficients, in S31.32 format
+ * @pre_bias:           Pre-bias array values
+ * @post_bias:          Post-bias array values
+ * @pre_clamp:          Pre-clamp array values
+ * @post_clamp:         Post-clamp array values
+ */
+struct sde_drm_csc_v1 {
+	int64_t ctm_coeff[SDE_CSC_MATRIX_COEFF_SIZE];
+	uint32_t pre_bias[SDE_CSC_BIAS_SIZE];
+	uint32_t post_bias[SDE_CSC_BIAS_SIZE];
+	uint32_t pre_clamp[SDE_CSC_CLAMP_SIZE];
+	uint32_t post_clamp[SDE_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct sde_drm_color - struct to store the color and alpha values
+ * @color_0: Color 0 value
+ * @color_1: Color 1 value
+ * @color_2: Color 2 value
+ * @color_3: Color 3 value
+ */
+struct sde_drm_color {
+	uint32_t color_0;
+	uint32_t color_1;
+	uint32_t color_2;
+	uint32_t color_3;
+};
+
+/* Total number of supported dim layers */
+#define SDE_MAX_DIM_LAYERS 7
+
+/* SDE_DRM_DIM_LAYER_CONFIG_FLAG - flags for Dim Layer */
+/* Color fill inside of the rect, including border */
+#define SDE_DRM_DIM_LAYER_INCLUSIVE     0x1
+/* Color fill outside of the rect, excluding border */
+#define SDE_DRM_DIM_LAYER_EXCLUSIVE     0x2
+
+/**
+ * struct sde_drm_dim_layer - dim layer cfg struct
+ * @flags:         Refer SDE_DRM_DIM_LAYER_CONFIG_FLAG for possible values
+ * @stage:         Blending stage of the dim layer
+ * @color_fill:    Color fill for dim layer
+ * @rect:          Dim layer coordinates
+ */
+struct sde_drm_dim_layer_cfg {
+	uint32_t flags;
+	uint32_t stage;
+	struct sde_drm_color color_fill;
+	struct drm_clip_rect rect;
+};
+
+/**
+ * struct sde_drm_dim_layer_v1 - version 1 of dim layer struct
+ * @num_layers:    Numer of Dim Layers
+ * @layer:         Dim layer user cfgs ptr for the num_layers
+ */
+struct sde_drm_dim_layer_v1 {
+	uint32_t num_layers;
+	struct sde_drm_dim_layer_cfg layer_cfg[SDE_MAX_DIM_LAYERS];
+};
+
+/* Writeback Config version definition */
+#define SDE_DRM_WB_CFG		0x1
+
+/* SDE_DRM_WB_CONFIG_FLAGS - Writeback configuration flags */
+#define SDE_DRM_WB_CFG_FLAGS_CONNECTED	(1<<0)
+
+/**
+ * struct sde_drm_wb_cfg - Writeback configuration structure
+ * @flags:		see DRM_MSM_WB_CONFIG_FLAGS
+ * @connector_id:	writeback connector identifier
+ * @count_modes:	Count of modes in modes_ptr
+ * @modes:		Pointer to struct drm_mode_modeinfo
+ */
+struct sde_drm_wb_cfg {
+	uint32_t flags;
+	uint32_t connector_id;
+	uint32_t count_modes;
+	uint64_t modes;
+};
+
+#define SDE_MAX_ROI_V1	4
+
+/**
+ * struct sde_drm_roi_v1 - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
+ */
+struct sde_drm_roi_v1 {
+	uint32_t num_rects;
+	struct drm_clip_rect roi[SDE_MAX_ROI_V1];
+};
+
+/**
+ * Define extended power modes supported by the SDE connectors.
+ */
+#define SDE_MODE_DPMS_ON	0
+#define SDE_MODE_DPMS_LP1	1
+#define SDE_MODE_DPMS_LP2	2
+#define SDE_MODE_DPMS_STANDBY	3
+#define SDE_MODE_DPMS_SUSPEND	4
+#define SDE_MODE_DPMS_OFF	5
+
+/**
+ * sde recovery events for notifying client
+ */
+#define SDE_RECOVERY_SUCCESS		0
+#define SDE_RECOVERY_CAPTURE		1
+#define SDE_RECOVERY_HARD_RESET		2
+
+/*
+ * Colorimetry Data Block values
+ * These bit nums are defined as per the CTA spec
+ * and indicate the colorspaces supported by the sink
+ */
+#define DRM_EDID_CLRMETRY_xvYCC_601   (1 << 0)
+#define DRM_EDID_CLRMETRY_xvYCC_709   (1 << 1)
+#define DRM_EDID_CLRMETRY_sYCC_601    (1 << 2)
+#define DRM_EDID_CLRMETRY_ADOBE_YCC_601  (1 << 3)
+#define DRM_EDID_CLRMETRY_ADOBE_RGB     (1 << 4)
+#define DRM_EDID_CLRMETRY_BT2020_CYCC (1 << 5)
+#define DRM_EDID_CLRMETRY_BT2020_YCC  (1 << 6)
+#define DRM_EDID_CLRMETRY_BT2020_RGB  (1 << 7)
+#define DRM_EDID_CLRMETRY_DCI_P3      (1 << 15)
+
+/*
+ * HDR Metadata
+ * These are defined as per EDID spec and shall be used by the sink
+ * to set the HDR metadata for playback from userspace.
+ */
+
+#define HDR_PRIMARIES_COUNT   3
+
+/* HDR EOTF */
+#define HDR_EOTF_SDR_LUM_RANGE	0x0
+#define HDR_EOTF_HDR_LUM_RANGE	0x1
+#define HDR_EOTF_SMTPE_ST2084	0x2
+#define HDR_EOTF_HLG		0x3
+
+#define DRM_MSM_EXT_HDR_METADATA
+#define DRM_MSM_EXT_HDR_PLUS_METADATA
+struct drm_msm_ext_hdr_metadata {
+	__u32 hdr_state;        /* HDR state */
+	__u32 eotf;             /* electro optical transfer function */
+	__u32 hdr_supported;    /* HDR supported */
+	__u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */
+	__u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */
+	__u32 white_point_x;    /* white_point_x */
+	__u32 white_point_y;    /* white_point_y */
+	__u32 max_luminance;    /* Max luminance */
+	__u32 min_luminance;    /* Min Luminance */
+	__u32 max_content_light_level; /* max content light level */
+	__u32 max_average_light_level; /* max average light level */
+
+	__u64 hdr_plus_payload;     /* user pointer to dynamic HDR payload */
+	__u32 hdr_plus_payload_size;/* size of dynamic HDR payload data */
+};
+
+/**
+ * HDR sink properties
+ * These are defined as per EDID spec and shall be used by the userspace
+ * to determine the HDR properties to be set to the sink.
+ */
+#define DRM_MSM_EXT_HDR_PROPERTIES
+#define DRM_MSM_EXT_HDR_PLUS_PROPERTIES
+struct drm_msm_ext_hdr_properties {
+	__u8 hdr_metadata_type_one;   /* static metadata type one */
+	__u32 hdr_supported;          /* HDR supported */
+	__u32 hdr_eotf;               /* electro optical transfer function */
+	__u32 hdr_max_luminance;      /* Max luminance */
+	__u32 hdr_avg_luminance;      /* Avg luminance */
+	__u32 hdr_min_luminance;      /* Min Luminance */
+
+	__u32 hdr_plus_supported;     /* HDR10+ supported */
+};
+
+/* HDR WRGB x and y index */
+#define DISPLAY_PRIMARIES_WX 0
+#define DISPLAY_PRIMARIES_WY 1
+#define DISPLAY_PRIMARIES_RX 2
+#define DISPLAY_PRIMARIES_RY 3
+#define DISPLAY_PRIMARIES_GX 4
+#define DISPLAY_PRIMARIES_GY 5
+#define DISPLAY_PRIMARIES_BX 6
+#define DISPLAY_PRIMARIES_BY 7
+#define DISPLAY_PRIMARIES_MAX 8
+
+struct drm_panel_hdr_properties {
+	__u32 hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	__u32 display_primaries[DISPLAY_PRIMARIES_MAX];
+
+	/* peak brightness supported by panel */
+	__u32 peak_brightness;
+	/* Blackness level supported by panel */
+	__u32 blackness_level;
+};
+
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. e.g.: for crtc pass crtc id.
+ * @object_type: DRM object type. e.g.: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ *         e.g.: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ *                  notification.
+ * @index: Object index(e.g.: crtc index), optional for user-space to set.
+ *         Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+	__u32 object_id;
+	__u32 object_type;
+	__u32 event;
+	__u64 client_context;
+	__u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ *                            custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ *        size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+	struct drm_event base;
+	struct drm_msm_event_req info;
+	__u8 data[];
+};
+
+/**
+ * struct drm_msm_power_ctrl: Payload to enable/disable the power vote
+ * @enable: enable/disable the power vote
+ * @flags:  operation control flags, for future use
+ */
+struct drm_msm_power_ctrl {
+	__u32 enable;
+	__u32 flags;
+};
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
+#define DRM_MSM_RMFB2                  0x43
+#define DRM_MSM_POWER_CTRL             0x44
+
+/* sde custom events */
+#define DRM_EVENT_HISTOGRAM 0x80000000
+#define DRM_EVENT_AD_BACKLIGHT 0x80000001
+#define DRM_EVENT_CRTC_POWER 0x80000002
+#define DRM_EVENT_SYS_BACKLIGHT 0x80000003
+#define DRM_EVENT_SDE_POWER 0x80000004
+#define DRM_EVENT_IDLE_NOTIFY 0x80000005
+#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */
+#define DRM_EVENT_SDE_HW_RECOVERY 0X80000007
+#define DRM_EVENT_LTM_HIST 0X80000008
+#define DRM_EVENT_LTM_WB_PB 0X80000009
+#define DRM_EVENT_LTM_OFF 0X8000000A
+
+#define DRM_IOCTL_SDE_WB_CONFIG \
+	DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT   DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_RMFB2 DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_RMFB2), unsigned int)
+#define DRM_IOCTL_MSM_POWER_CTRL DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_POWER_CTRL), struct drm_msm_power_ctrl)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _SDE_DRM_H_ */

+ 3 - 0
include/uapi/media/Kbuild

@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
+
+header-y += msm_sde_rotator.h

+ 122 - 0
include/uapi/media/msm_sde_rotator.h

@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_MSM_SDE_ROTATOR_H__
+#define __UAPI_MSM_SDE_ROTATOR_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* SDE Rotator pixel format definitions */
+#define SDE_PIX_FMT_XRGB_8888		V4L2_PIX_FMT_XBGR32
+#define SDE_PIX_FMT_ARGB_8888		V4L2_PIX_FMT_ABGR32
+#define SDE_PIX_FMT_ABGR_8888		V4L2_PIX_FMT_SDE_ABGR_8888
+#define SDE_PIX_FMT_RGBA_8888		V4L2_PIX_FMT_SDE_RGBA_8888
+#define SDE_PIX_FMT_BGRA_8888		V4L2_PIX_FMT_ARGB32
+#define SDE_PIX_FMT_RGBX_8888		V4L2_PIX_FMT_SDE_RGBX_8888
+#define SDE_PIX_FMT_BGRX_8888		V4L2_PIX_FMT_XRGB32
+#define SDE_PIX_FMT_XBGR_8888		V4L2_PIX_FMT_SDE_XBGR_8888
+#define SDE_PIX_FMT_RGBA_5551		V4L2_PIX_FMT_SDE_RGBA_5551
+#define SDE_PIX_FMT_ARGB_1555		V4L2_PIX_FMT_ARGB555
+#define SDE_PIX_FMT_ABGR_1555		V4L2_PIX_FMT_SDE_ABGR_1555
+#define SDE_PIX_FMT_BGRA_5551		V4L2_PIX_FMT_SDE_BGRA_5551
+#define SDE_PIX_FMT_BGRX_5551		V4L2_PIX_FMT_SDE_BGRX_5551
+#define SDE_PIX_FMT_RGBX_5551		V4L2_PIX_FMT_SDE_RGBX_5551
+#define SDE_PIX_FMT_XBGR_1555		V4L2_PIX_FMT_SDE_XBGR_1555
+#define SDE_PIX_FMT_XRGB_1555		V4L2_PIX_FMT_XRGB555
+#define SDE_PIX_FMT_ARGB_4444		V4L2_PIX_FMT_ARGB444
+#define SDE_PIX_FMT_RGBA_4444		V4L2_PIX_FMT_SDE_RGBA_4444
+#define SDE_PIX_FMT_BGRA_4444		V4L2_PIX_FMT_SDE_BGRA_4444
+#define SDE_PIX_FMT_ABGR_4444		V4L2_PIX_FMT_SDE_ABGR_4444
+#define SDE_PIX_FMT_RGBX_4444		V4L2_PIX_FMT_SDE_RGBX_4444
+#define SDE_PIX_FMT_XRGB_4444		V4L2_PIX_FMT_XRGB444
+#define SDE_PIX_FMT_BGRX_4444		V4L2_PIX_FMT_SDE_BGRX_4444
+#define SDE_PIX_FMT_XBGR_4444		V4L2_PIX_FMT_SDE_XBGR_4444
+#define SDE_PIX_FMT_RGB_888		V4L2_PIX_FMT_RGB24
+#define SDE_PIX_FMT_BGR_888		V4L2_PIX_FMT_BGR24
+#define SDE_PIX_FMT_RGB_565		V4L2_PIX_FMT_RGB565
+#define SDE_PIX_FMT_BGR_565		V4L2_PIX_FMT_SDE_BGR_565
+#define SDE_PIX_FMT_Y_CB_CR_H2V2	V4L2_PIX_FMT_YUV420
+#define SDE_PIX_FMT_Y_CR_CB_H2V2	V4L2_PIX_FMT_YVU420
+#define SDE_PIX_FMT_Y_CR_CB_GH2V2	V4L2_PIX_FMT_SDE_Y_CR_CB_GH2V2
+#define SDE_PIX_FMT_Y_CBCR_H2V2		V4L2_PIX_FMT_NV12
+#define SDE_PIX_FMT_Y_CRCB_H2V2		V4L2_PIX_FMT_NV21
+#define SDE_PIX_FMT_Y_CBCR_H1V2		V4L2_PIX_FMT_SDE_Y_CBCR_H1V2
+#define SDE_PIX_FMT_Y_CRCB_H1V2		V4L2_PIX_FMT_SDE_Y_CRCB_H1V2
+#define SDE_PIX_FMT_Y_CBCR_H2V1		V4L2_PIX_FMT_NV16
+#define SDE_PIX_FMT_Y_CRCB_H2V1		V4L2_PIX_FMT_NV61
+#define SDE_PIX_FMT_YCBYCR_H2V1		V4L2_PIX_FMT_YUYV
+#define SDE_PIX_FMT_Y_CBCR_H2V2_VENUS	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_VENUS
+#define SDE_PIX_FMT_Y_CRCB_H2V2_VENUS	V4L2_PIX_FMT_SDE_Y_CRCB_H2V2_VENUS
+#define SDE_PIX_FMT_RGBA_8888_UBWC	V4L2_PIX_FMT_RGBA8888_UBWC
+#define SDE_PIX_FMT_RGBX_8888_UBWC	V4L2_PIX_FMT_SDE_RGBX_8888_UBWC
+#define SDE_PIX_FMT_RGB_565_UBWC	V4L2_PIX_FMT_SDE_RGB_565_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_UBWC	V4L2_PIX_FMT_NV12_UBWC
+#define SDE_PIX_FMT_RGBA_1010102	V4L2_PIX_FMT_SDE_RGBA_1010102
+#define SDE_PIX_FMT_RGBX_1010102	V4L2_PIX_FMT_SDE_RGBX_1010102
+#define SDE_PIX_FMT_ARGB_2101010	V4L2_PIX_FMT_SDE_ARGB_2101010
+#define SDE_PIX_FMT_XRGB_2101010	V4L2_PIX_FMT_SDE_XRGB_2101010
+#define SDE_PIX_FMT_BGRA_1010102	V4L2_PIX_FMT_SDE_BGRA_1010102
+#define SDE_PIX_FMT_BGRX_1010102	V4L2_PIX_FMT_SDE_BGRX_1010102
+#define SDE_PIX_FMT_ABGR_2101010	V4L2_PIX_FMT_SDE_ABGR_2101010
+#define SDE_PIX_FMT_XBGR_2101010	V4L2_PIX_FMT_SDE_XBGR_2101010
+#define SDE_PIX_FMT_RGBA_1010102_UBWC	V4L2_PIX_FMT_SDE_RGBA_1010102_UBWC
+#define SDE_PIX_FMT_RGBX_1010102_UBWC	V4L2_PIX_FMT_SDE_RGBX_1010102_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS \
+	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC	V4L2_PIX_FMT_NV12_TP10_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC	V4L2_PIX_FMT_NV12_P010_UBWC
+
+/*
+ * struct msm_sde_rotator_fence - v4l2 buffer fence info
+ * @index: id number of the buffer
+ * @type: enum v4l2_buf_type; buffer type
+ * @fd: file descriptor of the fence associated with this buffer
+ */
+struct msm_sde_rotator_fence {
+	__u32	index;
+	__u32	type;
+	__s32	fd;
+	__u32	reserved[5];
+};
+
+/*
+ * struct msm_sde_rotator_comp_ratio - v4l2 buffer compression ratio
+ * @index: id number of the buffer
+ * @type: enum v4l2_buf_type; buffer type
+ * @numer: numerator of the ratio
+ * @denom: denominator of the ratio
+ */
+struct msm_sde_rotator_comp_ratio {
+	__u32	index;
+	__u32	type;
+	__u32	numer;
+	__u32	denom;
+	__u32	reserved[4];
+};
+
+/* SDE Rotator private ioctl ID */
+#define VIDIOC_G_SDE_ROTATOR_FENCE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 10, struct msm_sde_rotator_fence)
+#define VIDIOC_S_SDE_ROTATOR_FENCE \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 11, struct msm_sde_rotator_fence)
+#define VIDIOC_G_SDE_ROTATOR_COMP_RATIO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct msm_sde_rotator_comp_ratio)
+#define VIDIOC_S_SDE_ROTATOR_COMP_RATIO \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct msm_sde_rotator_comp_ratio)
+
+/* SDE Rotator private control ID's */
+#define V4L2_CID_SDE_ROTATOR_SECURE	(V4L2_CID_USER_BASE + 0x1000)
+
+/*
+ * This control Id indicates this context is associated with the
+ * secure camera.
+ */
+#define V4L2_CID_SDE_ROTATOR_SECURE_CAMERA	(V4L2_CID_USER_BASE + 0x2000)
+
+#endif /* __UAPI_MSM_SDE_ROTATOR_H__ */

+ 2 - 2
msm/dp/dp_drm.c

@@ -231,8 +231,8 @@ static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
 }
 
 static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dp_bridge *bridge;
 	struct dp_display *dp;

+ 5 - 7
msm/dp/dp_drm.h

@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 
 #include "msm_drv.h"
 #include "dp_display.h"
@@ -47,7 +46,7 @@ int dp_connector_config_hdr(struct drm_connector *connector,
  */
 int dp_connector_atomic_check(struct drm_connector *connector,
 	void *display,
-	struct drm_connector_state *c_state);
+	struct drm_atomic_state *state);
 
 /**
  * dp_connector_set_colorspace - callback to set new colorspace
@@ -184,15 +183,14 @@ static inline int dp_connector_config_hdr(struct drm_connector *connector,
 	return 0;
 }
 
-int dp_connector_atomic_check(struct drm_connector *connector,
-	void *display,
-	struct drm_connector_state *c_state)
+static inline int dp_connector_atomic_check(struct drm_connector *connector,
+		void *display, struct drm_atomic_state *state)
 {
 	return 0;
 }
 
-int dp_connector_set_colorspace(struct drm_connector *connector,
-	void *display)
+static inline int dp_connector_set_colorspace(struct drm_connector *connector,
+		void *display)
 {
 	return 0;
 }

+ 2 - 2
msm/dp/dp_mst_drm.c

@@ -1025,8 +1025,8 @@ static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge)
 }
 
 static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dp_mst_bridge *bridge;
 	struct dp_display *dp;

+ 0 - 2
msm/dsi/dsi_clk.h

@@ -178,7 +178,6 @@ typedef int (*pre_clockon_cb)(void *priv,
  * @c_clks[MAX_DSI_CTRL]     array of core clock configurations
  * @l_lp_clks[MAX_DSI_CTRL]  array of low power(esc) clock configurations
  * @l_hs_clks[MAX_DSI_CTRL]  array of high speed clock configurations
- * @bus_handle[MAX_DSI_CTRL] array of bus handles
  * @ctrl_index[MAX_DSI_CTRL] array of DSI controller indexes mapped
  *                           to core and link clock configurations
  * @pre_clkoff_cb            callback before clock is turned off
@@ -194,7 +193,6 @@ struct dsi_clk_info {
 	struct dsi_core_clk_info c_clks[MAX_DSI_CTRL];
 	struct dsi_link_lp_clk_info l_lp_clks[MAX_DSI_CTRL];
 	struct dsi_link_hs_clk_info l_hs_clks[MAX_DSI_CTRL];
-	u32 bus_handle[MAX_DSI_CTRL];
 	u32 ctrl_index[MAX_DSI_CTRL];
 	pre_clockoff_cb pre_clkoff_cb;
 	post_clockoff_cb post_clkoff_cb;

+ 0 - 21
msm/dsi/dsi_clk_manager.c

@@ -6,14 +6,12 @@
 #include <linux/of.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
-#include <linux/msm-bus.h>
 #include <linux/pm_runtime.h>
 #include "dsi_clk.h"
 #include "dsi_defs.h"
 
 struct dsi_core_clks {
 	struct dsi_core_clk_info clks;
-	u32 bus_handle;
 };
 
 struct dsi_link_clks {
@@ -265,19 +263,9 @@ int dsi_core_clk_start(struct dsi_core_clks *c_clks)
 		}
 	}
 
-	if (c_clks->bus_handle) {
-		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
-		if (rc) {
-			DSI_ERR("bus scale client enable failed, rc=%d\n", rc);
-			goto error_disable_mmss_clk;
-		}
-	}
 
 	return rc;
 
-error_disable_mmss_clk:
-	if (c_clks->clks.core_mmss_clk)
-		clk_disable_unprepare(c_clks->clks.core_mmss_clk);
 error_disable_bus_clk:
 	if (c_clks->clks.bus_clk)
 		clk_disable_unprepare(c_clks->clks.bus_clk);
@@ -298,14 +286,6 @@ int dsi_core_clk_stop(struct dsi_core_clks *c_clks)
 {
 	int rc = 0;
 
-	if (c_clks->bus_handle) {
-		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 0);
-		if (rc) {
-			DSI_ERR("bus scale client disable failed, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
 	if (c_clks->clks.core_mmss_clk)
 		clk_disable_unprepare(c_clks->clks.core_mmss_clk);
 
@@ -1446,7 +1426,6 @@ void *dsi_display_clk_mngr_register(struct dsi_clk_info *info)
 			sizeof(struct dsi_link_hs_clk_info));
 		memcpy(&mngr->link_clks[i].lp_clks, &info->l_lp_clks[i],
 			sizeof(struct dsi_link_lp_clk_info));
-		mngr->core_clks[i].bus_handle = info->bus_handle[i];
 		mngr->ctrl_index[i] = info->ctrl_index[i];
 	}
 

+ 0 - 47
msm/dsi/dsi_ctrl.c

@@ -7,7 +7,6 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/clk.h>
-#include <linux/msm-bus.h>
 #include <linux/of_irq.h>
 #include <video/mipi_display.h>
 
@@ -812,43 +811,6 @@ error_digital:
 	return rc;
 }
 
-static int dsi_ctrl_axi_bus_client_init(struct platform_device *pdev,
-					struct dsi_ctrl *ctrl)
-{
-	int rc = 0;
-	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
-
-	bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
-	if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
-		rc = PTR_ERR(bus->bus_scale_table);
-		DSI_CTRL_DEBUG(ctrl, "msm_bus_cl_get_pdata() failed, rc = %d\n",
-				rc);
-		bus->bus_scale_table = NULL;
-		return rc;
-	}
-
-	bus->bus_handle = msm_bus_scale_register_client(bus->bus_scale_table);
-	if (!bus->bus_handle) {
-		rc = -EINVAL;
-		DSI_CTRL_ERR(ctrl, "failed to register axi bus client\n");
-	}
-
-	return rc;
-}
-
-static int dsi_ctrl_axi_bus_client_deinit(struct dsi_ctrl *ctrl)
-{
-	struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
-
-	if (bus->bus_handle) {
-		msm_bus_scale_unregister_client(bus->bus_handle);
-
-		bus->bus_handle = 0;
-	}
-
-	return 0;
-}
-
 static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl,
 					struct dsi_host_config *config)
 {
@@ -1928,11 +1890,6 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev)
 		goto fail_supplies;
 	}
 
-	rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
-	if (rc)
-		DSI_CTRL_DEBUG(dsi_ctrl, "failed to init axi bus client, rc = %d\n",
-				rc);
-
 	item->ctrl = dsi_ctrl;
 
 	mutex_lock(&dsi_ctrl_list_lock);
@@ -1977,10 +1934,6 @@ static int dsi_ctrl_dev_remove(struct platform_device *pdev)
 	mutex_unlock(&dsi_ctrl_list_lock);
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
-	rc = dsi_ctrl_axi_bus_client_deinit(dsi_ctrl);
-	if (rc)
-		DSI_CTRL_ERR(dsi_ctrl, "failed to deinitialize axi bus client, rc = %d\n",
-				rc);
 
 	rc = dsi_ctrl_supplies_deinit(dsi_ctrl);
 	if (rc)

+ 0 - 12
msm/dsi/dsi_ctrl.h

@@ -136,16 +136,6 @@ struct dsi_ctrl_clk_info {
 	struct dsi_clk_link_set shadow_clks;
 };
 
-/**
- * struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
- * @bus_scale_table:        Bus scale voting usecases.
- * @bus_handle:             Handle used for voting bandwidth.
- */
-struct dsi_ctrl_bus_scale_info {
-	struct msm_bus_scale_pdata *bus_scale_table;
-	u32 bus_handle;
-};
-
 /**
  * struct dsi_ctrl_state_info - current driver state information
  * @power_state:        Status of power states on DSI controller.
@@ -208,7 +198,6 @@ struct dsi_ctrl_interrupts {
  * @clk_info:            Clock information.
  * @clk_freq:            DSi Link clock frequency information.
  * @pwr_info:            Power information.
- * @axi_bus_info:        AXI bus information.
  * @host_config:         Current host configuration.
  * @mode_bounds:         Boundaries of the default mode ROI.
  *                       Origin is at top left of all CTRLs.
@@ -263,7 +252,6 @@ struct dsi_ctrl {
 	struct dsi_ctrl_clk_info clk_info;
 	struct link_clk_freq clk_freq;
 	struct dsi_ctrl_power_info pwr_info;
-	struct dsi_ctrl_bus_scale_info axi_bus_info;
 
 	struct dsi_host_config host_config;
 	struct dsi_rect mode_bounds;

+ 8 - 7
msm/dsi/dsi_display.c

@@ -1423,7 +1423,7 @@ static ssize_t debugfs_read_esd_check_mode(struct file *file,
 	struct drm_panel_esd_config *esd_config;
 	char *buf;
 	int rc = 0;
-	size_t len;
+	size_t len = 0;
 
 	if (!display)
 		return -ENODEV;
@@ -4874,8 +4874,6 @@ static int dsi_display_bind(struct device *dev,
 				sizeof(struct dsi_link_lp_clk_info));
 
 		info.c_clks[i].drm = drm;
-		info.bus_handle[i] =
-			display_ctrl->ctrl->axi_bus_info.bus_handle;
 		info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
 	}
 
@@ -5382,12 +5380,15 @@ static enum drm_mode_status dsi_display_drm_ext_mode_valid(
 
 static int dsi_display_drm_ext_atomic_check(struct drm_connector *connector,
 		void *disp,
-		struct drm_connector_state *c_state)
+		struct drm_atomic_state *state)
 {
 	struct dsi_display *display = disp;
+	struct drm_connector_state *c_state;
+
+	c_state = drm_atomic_get_new_connector_state(state, connector);
 
 	return display->ext_conn->helper_private->atomic_check(
-			display->ext_conn, c_state);
+			display->ext_conn, state);
 }
 
 static int dsi_display_ext_get_info(struct drm_connector *connector,
@@ -5538,8 +5539,8 @@ static bool dsi_display_drm_ext_bridge_mode_fixup(
 
 static void dsi_display_drm_ext_bridge_mode_set(
 		struct drm_bridge *bridge,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
+		const struct drm_display_mode *mode,
+		const struct drm_display_mode *adjusted_mode)
 {
 	struct dsi_display_ext_bridge *ext_bridge;
 	struct drm_display_mode tmp;

+ 2 - 2
msm/dsi/dsi_drm.c

@@ -314,8 +314,8 @@ static void dsi_bridge_post_disable(struct drm_bridge *bridge)
 }
 
 static void dsi_bridge_mode_set(struct drm_bridge *bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
 

+ 0 - 1
msm/dsi/dsi_drm.h

@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 
 #include "msm_drv.h"
 

+ 0 - 1
msm/dsi/dsi_phy.c

@@ -7,7 +7,6 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/clk.h>
-#include <linux/msm-bus.h>
 #include <linux/list.h>
 
 #include "msm_drv.h"

+ 10 - 3
msm/msm_atomic.c

@@ -21,6 +21,7 @@
 #include "msm_gem.h"
 #include "msm_kms.h"
 #include "sde_trace.h"
+#include <drm/drm_atomic_uapi.h>
 
 #define MULTIPLE_CONN_DETECTED(x) (x > 1)
 
@@ -512,7 +513,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
 
 	obj = msm_framebuffer_bo(new_state->fb, 0);
 	msm_obj = to_msm_bo(obj);
-	fence = reservation_object_get_excl_rcu(msm_obj->resv);
+	fence = dma_resv_get_excl_rcu(msm_obj->resv);
 
 	drm_atomic_set_fence_for_plane(new_state, fence);
 
@@ -715,7 +716,7 @@ int msm_atomic_commit(struct drm_device *dev,
 				msm_framebuffer_bo(new_plane_state->fb, 0);
 			struct msm_gem_object *msm_obj = to_msm_bo(obj);
 			struct dma_fence *fence =
-				reservation_object_get_excl_rcu(msm_obj->resv);
+				dma_resv_get_excl_rcu(msm_obj->resv);
 
 			drm_atomic_set_fence_for_plane(new_plane_state, fence);
 		}
@@ -826,7 +827,13 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
-	msm_atomic_wait_for_commit_done(dev, state);
+	if (kms->funcs->commit) {
+		DRM_DEBUG_ATOMIC("triggering commit\n");
+		kms->funcs->commit(kms, state);
+	}
+
+	if (!state->legacy_cursor_update)
+		msm_atomic_wait_for_commit_done(dev, state);
 
 	kms->funcs->complete_commit(kms, state);
 

+ 43 - 137
msm/msm_drv.c

@@ -41,8 +41,10 @@
 #include <linux/kthread.h>
 #include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
 #include "sde_wb.h"
@@ -56,9 +58,11 @@
  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  *           MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ *           GEM object's debug name
  */
 #define MSM_VERSION_MAJOR	1
-#define MSM_VERSION_MINOR	3
+#define MSM_VERSION_MINOR	4
 #define MSM_VERSION_PATCHLEVEL	0
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -349,6 +353,13 @@ static int msm_drm_uninit(struct device *dev)
 	struct msm_kms *kms = priv->kms;
 	int i;
 
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
@@ -377,15 +388,13 @@ static int msm_drm_uninit(struct device *dev)
 	if (fbdev && priv->fbdev)
 		msm_fbdev_free(ddev);
 #endif
+	drm_atomic_helper_shutdown(ddev);
 	drm_mode_config_cleanup(ddev);
 
 	pm_runtime_get_sync(dev);
 	drm_irq_uninstall(ddev);
 	pm_runtime_put_sync(dev);
 
-	flush_workqueue(priv->wq);
-	destroy_workqueue(priv->wq);
-
 	if (kms && kms->funcs)
 		kms->funcs->destroy(kms);
 
@@ -702,9 +711,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	struct drm_crtc *crtc;
 
 	ddev = drm_dev_alloc(drv, dev);
-	if (!ddev) {
+	if (IS_ERR(ddev)) {
 		dev_err(dev, "failed to allocate drm_device\n");
-		return -ENOMEM;
+		return PTR_ERR(ddev);
 	}
 
 	drm_mode_config_init(ddev);
@@ -804,6 +813,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 		priv->fbdev = msm_fbdev_init(ddev);
 #endif
 
+	/* create drm client only when fbdev is not supported */
+	if (!priv->fbdev) {
+		ret = drm_client_init(ddev, &kms->client, "kms_client", NULL);
+		if (ret) {
+			DRM_ERROR("failed to init kms_client: %d\n", ret);
+			kms->client.dev = NULL;
+			goto fail;
+		}
+
+		drm_client_register(&kms->client);
+	}
+
 	priv->debug_root = debugfs_create_dir("debug",
 					ddev->primary->debugfs_root);
 	if (IS_ERR_OR_NULL(priv->debug_root)) {
@@ -886,15 +907,6 @@ static void context_close(struct msm_file_private *ctx)
 	kfree(ctx);
 }
 
-static void msm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-	struct msm_drm_private *priv = dev->dev_private;
-	struct msm_kms *kms = priv->kms;
-
-	if (kms && kms->funcs && kms->funcs->preclose)
-		kms->funcs->preclose(kms, file);
-}
-
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -919,101 +931,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 	context_close(ctx);
 }
 
-static int msm_disable_all_modes_commit(
-		struct drm_device *dev,
-		struct drm_atomic_state *state)
-{
-	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	unsigned int plane_mask;
-	int ret;
-
-	plane_mask = 0;
-	drm_for_each_plane(plane, dev) {
-		struct drm_plane_state *plane_state;
-
-		plane_state = drm_atomic_get_plane_state(state, plane);
-		if (IS_ERR(plane_state)) {
-			ret = PTR_ERR(plane_state);
-			goto fail;
-		}
-
-		plane_state->rotation = 0;
-
-		plane->old_fb = plane->fb;
-		plane_mask |= 1 << drm_plane_index(plane);
-
-		/* disable non-primary: */
-		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
-			continue;
-
-		DRM_DEBUG("disabling plane %d\n", plane->base.id);
-
-		ret = __drm_atomic_helper_disable_plane(plane, plane_state);
-		if (ret != 0)
-			DRM_ERROR("error %d disabling plane %d\n", ret,
-					plane->base.id);
-	}
-
-	drm_for_each_crtc(crtc, dev) {
-		struct drm_mode_set mode_set;
-
-		memset(&mode_set, 0, sizeof(struct drm_mode_set));
-		mode_set.crtc = crtc;
-
-		DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
-
-		ret = __drm_atomic_helper_set_config(&mode_set, state);
-		if (ret != 0)
-			DRM_ERROR("error %d disabling crtc %d\n", ret,
-					crtc->base.id);
-	}
-
-	DRM_DEBUG("committing disables\n");
-	ret = drm_atomic_commit(state);
-
-fail:
-	DRM_DEBUG("disables result %d\n", ret);
-	return ret;
-}
-
-/**
- * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
- *	based on restore_fbdev_mode_atomic in drm_fb_helper.c
- * @dev: device pointer
- * @Return: 0 on success, otherwise -error
- */
-static int msm_disable_all_modes(
-		struct drm_device *dev,
-		struct drm_modeset_acquire_ctx *ctx)
-{
-	struct drm_atomic_state *state;
-	int ret, i;
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return -ENOMEM;
-
-	state->acquire_ctx = ctx;
-
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		ret = msm_disable_all_modes_commit(dev, state);
-		if (ret != -EDEADLK || ret != -ERESTARTSYS)
-			break;
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(ctx);
-	}
-
-	drm_atomic_state_put(state);
-
-	return ret;
-}
-
 static void msm_lastclose(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
-	struct drm_modeset_acquire_ctx ctx;
 	int i, rc;
 
 	/* check for splash status before triggering cleanup
@@ -1039,32 +960,17 @@ static void msm_lastclose(struct drm_device *dev)
 	flush_workqueue(priv->wq);
 
 	if (priv->fbdev) {
-		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-		return;
+		rc = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+		if (rc)
+			DRM_ERROR("restore FBDEV mode failed: %d\n", rc);
+	} else if (kms->client.dev) {
+		rc = drm_client_modeset_commit_force(&kms->client);
+		if (rc)
+			DRM_ERROR("client modeset commit failed: %d\n", rc);
 	}
 
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	rc = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (rc)
-		goto fail;
-
-	rc = msm_disable_all_modes(dev, &ctx);
-	if (rc)
-		goto fail;
-
 	if (kms && kms->funcs && kms->funcs->lastclose)
-		kms->funcs->lastclose(kms, &ctx);
-
-fail:
-	if (rc == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (rc) {
-		pr_err("last close failed: %d\n", rc);
-	}
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
+		kms->funcs->lastclose(kms);
 }
 
 static irqreturn_t msm_irq(int irq, void *arg)
@@ -1089,7 +995,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	BUG_ON(!kms);
-	return kms->funcs->irq_postinstall(kms);
+
+	if (kms->funcs->irq_postinstall)
+		return kms->funcs->irq_postinstall(kms);
+
+	return 0;
 }
 
 static void msm_irq_uninstall(struct drm_device *dev)
@@ -1135,7 +1045,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 	}
 
 	return msm_gem_new_handle(dev, file, args->size,
-			args->flags, &args->handle);
+			args->flags, &args->handle, NULL);
 }
 
 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -1640,14 +1550,11 @@ static const struct file_operations fops = {
 };
 
 static struct drm_driver msm_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ |
-				DRIVER_GEM |
-				DRIVER_PRIME |
+	.driver_features    = DRIVER_GEM |
 				DRIVER_RENDER |
 				DRIVER_ATOMIC |
 				DRIVER_MODESET,
 	.open               = msm_open,
-	.preclose           = msm_preclose,
 	.postclose          = msm_postclose,
 	.lastclose          = msm_lastclose,
 	.irq_handler        = msm_irq,
@@ -1664,7 +1571,6 @@ static struct drm_driver msm_driver = {
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
 	.gem_prime_import   = msm_gem_prime_import,
-	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
@@ -2074,7 +1980,7 @@ static void __exit msm_drm_unregister(void)
 	msm_smmu_driver_cleanup();
 }
 
-module_init(msm_drm_register);
+late_initcall(msm_drm_register);
 module_exit(msm_drm_unregister);
 
 MODULE_AUTHOR("Rob Clark <[email protected]");

+ 15 - 4
msm/msm_drv.h

@@ -34,16 +34,16 @@
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
 #include <linux/sde_io_util.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
 #include <linux/kthread.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/msm_drm.h>
+#include <drm/sde_drm.h>
 #include <drm/drm_gem.h>
 
 #include "sde_power_handle.h"
@@ -736,6 +736,8 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, int npages);
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, struct sg_table *sgt,
 		unsigned int flags);
@@ -821,8 +823,12 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova);
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj,
@@ -836,7 +842,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -851,7 +856,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle);
+		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -864,6 +869,10 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 		struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
+
+__printf(2, 3)
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
 int msm_gem_delayed_import(struct drm_gem_object *obj);
 
 void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
@@ -974,12 +983,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
 int msm_debugfs_late_init(struct drm_device *dev);
 int msm_rd_debugfs_init(struct drm_minor *minor);
 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...);
 int msm_perf_debugfs_init(struct drm_minor *minor);
 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
 #else
 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
 static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}

+ 20 - 15
msm/msm_fb.c

@@ -19,8 +19,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-buf.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
@@ -40,6 +41,7 @@ struct msm_framebuffer {
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 	.destroy = drm_gem_fb_destroy,
+	.dirty = drm_atomic_helper_dirtyfb,
 };
 
 #ifdef CONFIG_DEBUG_FS
@@ -283,9 +285,11 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	const struct drm_format_info *info = drm_get_format_info(dev,
+								mode_cmd);
 	struct drm_gem_object *bos[4] = {0};
 	struct drm_framebuffer *fb;
-	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+	int ret, i, n = info->num_planes;
 
 	for (i = 0; i < n; i++) {
 		bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
@@ -310,24 +314,24 @@ out_unref:
 }
 
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
-		const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+		const struct drm_mode_fb_cmd2 *mode_cmd,
+		struct drm_gem_object **bos)
 {
+	const struct drm_format_info *info = drm_get_format_info(dev,
+								mode_cmd);
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_framebuffer *msm_fb = NULL;
 	struct drm_framebuffer *fb;
 	const struct msm_format *format;
 	int ret, i, num_planes;
-	unsigned int hsub, vsub;
 	bool is_modified = false;
 
 	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
-	num_planes = drm_format_num_planes(mode_cmd->pixel_format);
-	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+	num_planes = info->num_planes;
 
 	format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
 			mode_cmd->modifier[0]);
@@ -370,7 +374,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 			goto fail;
 		} else {
 			ret = kms->funcs->check_modified_format(
-				kms, msm_fb->format, mode_cmd, bos);
+					kms, msm_fb->format, mode_cmd, bos);
 			if (ret)
 				goto fail;
 		}
@@ -384,16 +388,15 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 		}
 
 		for (i = 0; i < num_planes; i++) {
-			unsigned int width = mode_cmd->width / (i ? hsub : 1);
-			unsigned int height = mode_cmd->height / (i ? vsub : 1);
+			unsigned int width = mode_cmd->width / (i ?
+					info->hsub : 1);
+			unsigned int height = mode_cmd->height / (i ?
+					info->vsub : 1);
 			unsigned int min_size;
-			unsigned int cpp = 0;
-
-			cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
 
 			min_size = (height - 1) * mode_cmd->pitches[i]
-				 + width * cpp
-				 + mode_cmd->offsets[i];
+				+ width * info->cpp[i]
+				+ mode_cmd->offsets[i];
 
 			if (!bos[i] || bos[i]->size < min_size) {
 				ret = -EINVAL;
@@ -450,6 +453,8 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
 		return ERR_CAST(bo);
 	}
 
+	msm_gem_object_set_name(bo, "stolenfb");
+
 	fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
 	if (IS_ERR(fb)) {
 		dev_err(dev->dev, "failed to allocate fb\n");

+ 153 - 32
msm/msm_gem.c

@@ -109,13 +109,12 @@ static struct page **get_pages(struct drm_gem_object *obj)
 			return ptr;
 		}
 
-		/*
-		 * Make sure to flush the CPU cache for newly allocated memory
-		 * so we don't get ourselves into trouble with a dirty cache
+		/* For non-cached buffers, ensure the new pages are clean
+		 * because display controller, GPU, etc. are not coherent:
 		 */
 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
 			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
-			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
+			dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
 				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 		}
 	}
@@ -349,6 +348,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 		return ERR_PTR(-ENOMEM);
 
 	vma->aspace = aspace;
+	msm_obj->aspace = aspace;
 
 	list_add_tail(&vma->list, &msm_obj->vmas);
 
@@ -403,19 +403,14 @@ put_iova(struct drm_gem_object *obj)
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 	int ret = 0;
 
-	mutex_lock(&msm_obj->lock);
-
-	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-		mutex_unlock(&msm_obj->lock);
-		return -EBUSY;
-	}
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	vma = lookup_vma(obj, aspace);
 
@@ -468,6 +463,64 @@ unlock:
 	mutex_unlock(&msm_obj->lock);
 	return ret;
 }
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+	struct page **pages;
+
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+		return -EBUSY;
+
+	vma = lookup_vma(obj, aspace);
+	if (WARN_ON(!vma))
+		return -EINVAL;
+
+	pages = get_pages(obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
+	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+			obj->size >> PAGE_SHIFT, msm_obj->flags);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	u64 local;
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+
+	ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+	if (!ret)
+		ret = msm_gem_pin_iova(obj, aspace);
+
+	if (!ret)
+		*iova = local;
+
+	mutex_unlock(&msm_obj->lock);
+	return ret;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+	ret = msm_gem_get_iova_locked(obj, aspace, iova);
+	mutex_unlock(&msm_obj->lock);
+
+	return ret;
+}
 
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
@@ -486,6 +539,27 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
 	return vma ? vma->iova : 0;
 }
 
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+
+	mutex_lock(&msm_obj->lock);
+	vma = lookup_vma(obj, aspace);
+
+	if (!WARN_ON(!vma))
+		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
+				msm_obj->flags);
+
+	mutex_unlock(&msm_obj->lock);
+}
+
 void msm_gem_put_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
@@ -560,7 +634,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	args->pitch = align_pitch(args->width, args->bpp);
 	args->size  = PAGE_ALIGN(args->pitch * args->height);
 	return msm_gem_new_handle(dev, file, args->size,
-			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -766,7 +840,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+	ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
 						  true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -789,7 +863,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 		struct seq_file *m)
 {
 	if (!dma_fence_is_signaled(fence))
-		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
 				fence->ops->get_driver_name(fence),
 				fence->ops->get_timeline_name(fence),
 				fence->seqno);
@@ -798,8 +872,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct reservation_object *robj = msm_obj->resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj = msm_obj->resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	struct msm_gem_vma *vma;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -825,11 +899,19 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			obj->name, kref_read(&obj->refcount),
 			off, msm_obj->vaddr);
 
-	/* FIXME: we need to print the address space here too */
-	list_for_each_entry(vma, &msm_obj->vmas, list)
-		seq_printf(m, " %08llx", vma->iova);
+	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+	if (!list_empty(&msm_obj->vmas)) {
+
+		seq_puts(m, "      vmas:");
 
-	seq_printf(m, " %zu%s\n", obj->size, madv);
+		list_for_each_entry(vma, &msm_obj->vmas, list)
+			seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+				vma->iova, vma->mapped ? "mapped" : "unmapped",
+				vma->inuse);
+
+		seq_puts(m, "\n");
+	}
 
 	rcu_read_lock();
 	fobj = rcu_dereference(robj->fence);
@@ -856,9 +938,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 	int count = 0;
 	size_t size = 0;
 
+	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 	list_for_each_entry(msm_obj, list, mm_list) {
 		struct drm_gem_object *obj = &msm_obj->base;
-		seq_printf(m, "   ");
+		seq_puts(m, "   ");
 		msm_gem_describe(obj, m);
 		count++;
 		size += obj->size;
@@ -908,7 +991,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 	}
 
 	if (msm_obj->resv == &msm_obj->_resv)
-		reservation_object_fini(msm_obj->resv);
+		dma_resv_fini(msm_obj->resv);
 
 	drm_gem_object_release(obj);
 
@@ -918,7 +1001,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle)
+		uint32_t size, uint32_t flags, uint32_t *handle,
+		char *name)
 {
 	struct drm_gem_object *obj;
 	int ret;
@@ -928,6 +1012,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
+	if (name)
+		msm_gem_object_set_name(obj, "%s", name);
+
 	ret = drm_gem_handle_create(file, obj, handle);
 
 	/* drop reference from allocate - handle holds it now */
@@ -938,7 +1025,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 
 static int msm_gem_new_impl(struct drm_device *dev,
 		uint32_t size, uint32_t flags,
-		struct reservation_object *resv,
+		struct dma_resv *resv,
 		struct drm_gem_object **obj,
 		bool struct_mutex_locked)
 {
@@ -969,7 +1056,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 		msm_obj->resv = resv;
 	} else {
 		msm_obj->resv = &msm_obj->_resv;
-		reservation_object_init(msm_obj->resv);
+		dma_resv_init(msm_obj->resv);
 	}
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
@@ -1004,7 +1091,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
 	if (!iommu_present(&platform_bus_type))
 		use_vram = true;
-	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
 		use_vram = true;
 
 	if (WARN_ON(use_vram && !priv->vram.size))
@@ -1190,23 +1277,29 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
 
 	if (iova) {
 		ret = msm_gem_get_iova(obj, aspace, iova);
-		if (ret) {
-			drm_gem_object_put(obj);
-			return ERR_PTR(ret);
-		}
+		if (ret)
+			goto err;
 	}
 
 	vaddr = msm_gem_get_vaddr(obj);
 	if (IS_ERR(vaddr)) {
 		msm_gem_put_iova(obj, aspace);
-		drm_gem_object_put(obj);
-		return ERR_CAST(vaddr);
+		ret = PTR_ERR(vaddr);
+		goto err;
 	}
 
 	if (bo)
 		*bo = obj;
 
 	return vaddr;
+err:
+	if (locked)
+		drm_gem_object_put(obj);
+	else
+		drm_gem_object_put_unlocked(obj);
+
+	return ERR_PTR(ret);
+
 }
 
 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1222,3 +1315,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 {
 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
 }
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+		struct msm_gem_address_space *aspace, bool locked)
+{
+	if (IS_ERR_OR_NULL(bo))
+		return;
+
+	msm_gem_put_vaddr(bo);
+	msm_gem_unpin_iova(bo, aspace);
+
+	if (locked)
+		drm_gem_object_put(bo);
+	else
+		drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(bo);
+	va_list ap;
+
+	if (!fmt)
+		return;
+
+	va_start(ap, fmt);
+	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+	va_end(ap);
+}

+ 7 - 3
msm/msm_gem.h

@@ -19,7 +19,7 @@
 #define __MSM_GEM_H__
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
@@ -82,6 +82,8 @@ struct msm_gem_vma {
 	uint64_t iova;
 	struct msm_gem_address_space *aspace;
 	struct list_head list;    /* node in msm_gem_object::vmas */
+	bool mapped;
+	int inuse;
 };
 
 struct msm_gem_object {
@@ -124,8 +126,8 @@ struct msm_gem_object {
 	struct list_head vmas;    /* list of msm_gem_vma */
 
 	/* normally (resv == &_resv) except for imported bo's */
-	struct reservation_object *resv;
-	struct reservation_object _resv;
+	struct dma_resv *resv;
+	struct dma_resv _resv;
 
 	/* For physically contiguous buffers.  Used when we don't have
 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
@@ -136,6 +138,7 @@ struct msm_gem_object {
 
 	struct msm_gem_address_space *aspace;
 	bool in_active_list;
+	char name[32]; /* Identifier to print for the debugfs files */
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
@@ -195,6 +198,7 @@ struct msm_gem_submit {
 	struct msm_ringbuffer *ring;
 	unsigned int nr_cmds;
 	unsigned int nr_bos;
+	u32 ident;	   /* A "identifier" for the submit for logging */
 	struct {
 		uint32_t type;
 		uint32_t size;  /* in dwords */

+ 1 - 1
msm/msm_gem_prime.c

@@ -76,7 +76,7 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
 		msm_gem_put_pages(obj);
 }
 
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+struct dma_resv *msm_gem_prime_res_obj(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 

+ 4 - 5
msm/msm_gem_vma.c

@@ -197,9 +197,9 @@ msm_gem_address_space_destroy(struct kref *kref)
 	struct msm_gem_address_space *aspace = container_of(kref,
 			struct msm_gem_address_space, kref);
 
-	if (aspace && aspace->ops->destroy)
-		aspace->ops->destroy(aspace);
-
+	drm_mm_takedown(&aspace->mm);
+	if (aspace->mmu)
+		aspace->mmu->funcs->destroy(aspace->mmu);
 	kfree(aspace);
 }
 
@@ -232,8 +232,7 @@ static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
 	msm_gem_address_space_put(aspace);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, struct sg_table *sgt,
 		unsigned int flags)
 {

+ 4 - 3
msm/msm_kms.h

@@ -93,10 +93,8 @@ struct msm_kms_funcs {
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
 	void (*postopen)(struct msm_kms *kms, struct drm_file *file);
-	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
 	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
-	void (*lastclose)(struct msm_kms *kms,
-			struct drm_modeset_acquire_ctx *ctx);
+	void (*lastclose)(struct msm_kms *kms);
 	int (*register_events)(struct msm_kms *kms,
 			struct drm_mode_object *obj, u32 event, bool en);
 	void (*set_encoder_mode)(struct msm_kms *kms,
@@ -136,6 +134,9 @@ struct msm_kms {
 
 	/* mapper-id used to request GEM buffer mapped for scanout: */
 	struct msm_gem_address_space *aspace;
+
+	/* DRM client used for lastclose cleanup */
+	struct drm_client_dev client;
 };
 
 /**

+ 0 - 1
msm/msm_smmu.c

@@ -21,7 +21,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/msm_dma_iommu_mapping.h>
 
-#include <asm/dma-iommu.h>
 #include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"

+ 8 - 3
msm/sde/sde_connector.c

@@ -16,6 +16,7 @@
 #include "dsi_display.h"
 #include "sde_crtc.h"
 #include "sde_rm.h"
+#include <drm/drm_probe_helper.h>
 
 #define BL_NODE_NAME_SIZE 32
 #define HDR10_PLUS_VSIF_TYPE_CODE      0x81
@@ -2105,23 +2106,26 @@ sde_connector_atomic_best_encoder(struct drm_connector *connector,
 }
 
 static int sde_connector_atomic_check(struct drm_connector *connector,
-		struct drm_connector_state *new_conn_state)
+		struct drm_atomic_state *state)
 {
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	bool qsync_dirty = false, has_modeset = false;
+	struct drm_connector_state *new_conn_state;
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
 		return -EINVAL;
 	}
 
+	c_conn = to_sde_connector(connector);
+	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+
 	if (!new_conn_state) {
 		SDE_ERROR("invalid connector state\n");
 		return -EINVAL;
 	}
 
-	c_conn = to_sde_connector(connector);
 	c_state = to_sde_connector_state(new_conn_state);
 
 	has_modeset = sde_crtc_atomic_check_has_modeset(new_conn_state->state,
@@ -2135,10 +2139,11 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
 		SDE_ERROR("invalid qsync update during modeset\n");
 		return -EINVAL;
 	}
+	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
 
 	if (c_conn->ops.atomic_check)
 		return c_conn->ops.atomic_check(connector,
-				c_conn->display, new_conn_state);
+				c_conn->display, state);
 
 	return 0;
 }

+ 1 - 1
msm/sde/sde_connector.h

@@ -295,7 +295,7 @@ struct sde_connector_ops {
 	 */
 	int (*atomic_check)(struct drm_connector *connector,
 			void *display,
-			struct drm_connector_state *c_state);
+			struct drm_atomic_state *state);
 
 	/**
 	 * pre_destroy - handle pre destroy operations for the connector

+ 4 - 18
msm/sde/sde_crtc.c

@@ -23,9 +23,8 @@
 #include <uapi/drm/sde_drm.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_flip_work.h>
-#include <linux/clk/qcom.h>
 
 #include "sde_kms.h"
 #include "sde_hw_lm.h"
@@ -2794,8 +2793,8 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
 	struct sde_crtc_state *cstate;
 	struct drm_display_mode *mode;
 	struct sde_kms *kms;
-	struct sde_hw_ds *hw_ds;
-	struct sde_hw_ds_cfg *cfg;
+	struct sde_hw_ds *hw_ds = NULL;
+	struct sde_hw_ds_cfg *cfg = NULL;
 	u32 ret = 0;
 	u32 num_ds_enable = 0, hdisplay = 0;
 	u32 max_in_width = 0, max_out_width = 0;
@@ -3774,7 +3773,6 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 	struct sde_crtc_irq_info *node = NULL;
 	int ret = 0;
 	struct drm_event event;
-	struct msm_drm_private *priv;
 
 	if (!crtc) {
 		SDE_ERROR("invalid crtc\n");
@@ -3782,7 +3780,6 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	priv = crtc->dev->dev_private;
 
 	mutex_lock(&sde_crtc->crtc_lock);
 
@@ -3790,12 +3787,6 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 
 	switch (event_type) {
 	case SDE_POWER_EVENT_POST_ENABLE:
-		/* disable mdp LUT memory retention */
-		ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk",
-					CLKFLAG_NORETAIN_MEM);
-		if (ret)
-			SDE_ERROR("disable LUT memory retention err %d\n", ret);
-
 		/* restore encoder; crtc will be programmed during commit */
 		drm_for_each_encoder_mask(encoder, crtc->dev,
 				crtc->state->encoder_mask) {
@@ -3819,11 +3810,6 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg)
 		sde_cp_crtc_post_ipc(crtc);
 		break;
 	case SDE_POWER_EVENT_PRE_DISABLE:
-		/* enable mdp LUT memory retention */
-		ret = sde_power_clk_set_flags(&priv->phandle, "lut_clk",
-					CLKFLAG_RETAIN_MEM);
-		if (ret)
-			SDE_ERROR("enable LUT memory retention err %d\n", ret);
 
 		drm_for_each_encoder_mask(encoder, crtc->dev,
 				crtc->state->encoder_mask) {
@@ -4538,7 +4524,7 @@ static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct sde_kms *kms;
-	struct drm_plane *plane;
+	struct drm_plane *plane = NULL;
 	struct drm_display_mode *mode;
 	int rc = 0, cnt = 0;
 

+ 2 - 46
msm/sde/sde_encoder.c

@@ -19,13 +19,14 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 #include <linux/kthread.h>
 #include <linux/debugfs.h>
+#include <linux/input.h>
 #include <linux/seq_file.h>
 #include <linux/sde_rsc.h>
 
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_intf.h"
@@ -226,7 +227,6 @@ enum sde_enc_rc_states {
  * @recovery_events_enabled:	status of hw recovery feature enable by client
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
- * @pm_qos_cpu_req:		pm_qos request for cpu frequency
  * @mode_info:                  stores the current mode and should be used
  *				 only in commit phase
  */
@@ -292,7 +292,6 @@ struct sde_encoder_virt {
 
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
-	struct pm_qos_request pm_qos_cpu_req;
 	struct msm_mode_info mode_info;
 };
 
@@ -314,44 +313,6 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
 	}
 }
 
-static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	int cpu;
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	cpu_mask = sde_kms->catalog->perf.cpu_mask;
-	cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
-
-	req = &sde_enc->pm_qos_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
-
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
-}
-
-static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
-}
-
 static bool _sde_encoder_is_autorefresh_enabled(
 		struct sde_encoder_virt *sde_enc)
 {
@@ -2165,12 +2126,7 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 		/* enable all the irq */
 		_sde_encoder_irq_control(drm_enc, true);
 
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_add_request(drm_enc, sde_kms);
-
 	} else {
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_remove_request(drm_enc, sde_kms);
 
 		/* disable all the irq */
 		_sde_encoder_irq_control(drm_enc, false);

+ 11 - 4
msm/sde/sde_encoder_phys_cmd.c

@@ -1801,6 +1801,15 @@ static void sde_encoder_phys_cmd_init_ops(struct sde_encoder_phys_ops *ops)
 	ops->collect_misr = sde_encoder_helper_collect_misr;
 }
 
+static inline bool sde_encoder_phys_cmd_intf_te_supported(
+		const struct sde_mdss_cfg *sde_cfg, enum sde_intf idx)
+{
+	if (sde_cfg && ((idx - INTF_0) < sde_cfg->intf_count))
+		return test_bit(SDE_INTF_TE,
+				&(sde_cfg->intf[idx - INTF_0].features));
+	return false;
+}
+
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 		struct sde_enc_phys_init_params *p)
 {
@@ -1841,10 +1850,8 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init(
 	sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
 	phys_enc->comp_type = p->comp_type;
 
-	if (sde_hw_intf_te_supported(phys_enc->sde_kms->catalog))
-		phys_enc->has_intf_te = true;
-	else
-		phys_enc->has_intf_te = false;
+	phys_enc->has_intf_te = sde_encoder_phys_cmd_intf_te_supported(
+			phys_enc->sde_kms->catalog, phys_enc->intf_idx);
 
 	for (i = 0; i < INTR_IDX_MAX; i++) {
 		irq = &phys_enc->irq[i];

+ 11 - 10
msm/sde/sde_encoder_phys_wb.c

@@ -703,8 +703,8 @@ static int sde_encoder_phys_wb_atomic_check(
 	const struct drm_display_mode *mode = &crtc_state->mode;
 	int rc;
 
-	SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id, mode->name,
+	SDE_DEBUG("[atomic_check:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->name,
 			mode->hdisplay, mode->vdisplay);
 
 	if (!conn_state || !conn_state->connector) {
@@ -962,8 +962,8 @@ static void sde_encoder_phys_wb_setup(
 	struct drm_framebuffer *fb;
 	struct sde_rect *wb_roi = &wb_enc->wb_roi;
 
-	SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode.base.id, mode.name,
+	SDE_DEBUG("[mode_set:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode.name,
 			mode.hdisplay, mode.vdisplay);
 
 	memset(wb_roi, 0, sizeof(struct sde_rect));
@@ -1150,9 +1150,9 @@ static void sde_encoder_phys_wb_mode_set(
 	phys_enc->cached_mode = *adj_mode;
 	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
 
-	SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id,
-			mode->name, mode->hdisplay, mode->vdisplay);
+	SDE_DEBUG("[mode_set_cache:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->name,
+			mode->hdisplay, mode->vdisplay);
 
 	phys_enc->hw_ctl = NULL;
 	phys_enc->hw_cdm = NULL;
@@ -1401,6 +1401,7 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 	uint32_t size;
 	int nplanes, i, ret;
 	struct msm_gem_address_space *aspace;
+	const struct drm_format_info *info;
 
 	if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
 		SDE_ERROR("invalid params\n");
@@ -1434,7 +1435,8 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 	}
 
 	/* allocate gem tracking object */
-	nplanes = drm_format_num_planes(pixel_format);
+	info = drm_get_format_info(dev, &mode_cmd);
+	nplanes = info->num_planes;
 	if (nplanes >= SDE_MAX_PLANES) {
 		SDE_ERROR("requested format has too many planes\n");
 		return -EINVAL;
@@ -1452,8 +1454,7 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 
 	for (i = 0; i < nplanes; ++i) {
 		wb_enc->bo_disable[i] = wb_enc->bo_disable[0];
-		mode_cmd.pitches[i] = width *
-			drm_format_plane_cpp(pixel_format, i);
+		mode_cmd.pitches[i] = width * info->cpp[i];
 	}
 
 	fb = msm_framebuffer_init(dev, &mode_cmd, wb_enc->bo_disable);

+ 6 - 3
msm/sde/sde_formats.c

@@ -1168,10 +1168,11 @@ int sde_format_check_modified_format(
 		const struct drm_mode_fb_cmd2 *cmd,
 		struct drm_gem_object **bos)
 {
-	int ret, i, num_base_fmt_planes;
+	const struct drm_format_info *info;
 	const struct sde_format *fmt;
 	struct sde_hw_fmt_layout layout;
 	uint32_t bos_total_size = 0;
+	int ret, i;
 
 	if (!msm_fmt || !cmd || !bos) {
 		DRM_ERROR("invalid arguments\n");
@@ -1179,14 +1180,16 @@ int sde_format_check_modified_format(
 	}
 
 	fmt = to_sde_format(msm_fmt);
-	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+	info = drm_format_info(fmt->base.pixel_format);
+	if (!info)
+		return -EINVAL;
 
 	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
 			&layout, cmd->pitches);
 	if (ret)
 		return ret;
 
-	for (i = 0; i < num_base_fmt_planes; i++) {
+	for (i = 0; i < info->num_planes; i++) {
 		if (!bos[i]) {
 			DRM_ERROR("invalid handle for plane %d\n", i);
 			return -EINVAL;

+ 178 - 26
msm/sde/sde_hw_catalog.c

@@ -277,6 +277,7 @@ enum {
 	INTF_LEN,
 	INTF_PREFETCH,
 	INTF_TYPE,
+	INTF_TE_IRQ,
 	INTF_PROP_MAX,
 };
 
@@ -709,6 +710,7 @@ static struct sde_prop_type intf_prop[] = {
 	{INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
 						PROP_TYPE_U32_ARRAY},
 	{INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
+	{INTF_TE_IRQ, "qcom,sde-intf-tear-irq-off", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type wb_prop[] = {
@@ -1058,6 +1060,74 @@ end:
 	return rc;
 }
 
+static int _add_to_irq_offset_list(struct sde_mdss_cfg *sde_cfg,
+		enum sde_intr_hwblk_type blk_type, u32 instance, u32 offset)
+{
+	struct sde_intr_irq_offsets *item = NULL;
+	bool err = false;
+
+	switch (blk_type) {
+	case SDE_INTR_HWBLK_TOP:
+		if (instance >= SDE_INTR_TOP_MAX)
+			err = true;
+		break;
+	case SDE_INTR_HWBLK_INTF:
+		if (instance >= INTF_MAX)
+			err = true;
+		break;
+	case SDE_INTR_HWBLK_AD4:
+		if (instance >= AD_MAX)
+			err = true;
+		break;
+	case SDE_INTR_HWBLK_INTF_TEAR:
+		if (instance >= INTF_MAX)
+			err = true;
+		break;
+	case SDE_INTR_HWBLK_LTM:
+		if (instance >= LTM_MAX)
+			err = true;
+		break;
+	default:
+		SDE_ERROR("invalid hwblk_type: %d", blk_type);
+		return -EINVAL;
+	}
+
+	if (err) {
+		SDE_ERROR("unable to map instance %d for blk type %d",
+				instance, blk_type);
+		return -EINVAL;
+	}
+
+	/* Check for existing list entry */
+	item = sde_hw_intr_list_lookup(sde_cfg, blk_type, instance);
+	if (IS_ERR_OR_NULL(item)) {
+		SDE_DEBUG("adding intr type %d idx %d offset 0x%x\n",
+				blk_type, instance, offset);
+	} else if (item->base_offset == offset) {
+		SDE_INFO("duplicate intr %d/%d offset 0x%x, skipping\n",
+				blk_type, instance, offset);
+		return 0;
+	} else {
+		SDE_ERROR("type %d, idx %d in list with offset 0x%x != 0x%x\n",
+				blk_type, instance, item->base_offset, offset);
+		return -EINVAL;
+	}
+
+	item = kzalloc(sizeof(*item), GFP_KERNEL);
+	if (!item) {
+		SDE_ERROR("memory allocation failed!\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&item->list);
+	item->type = blk_type;
+	item->instance_idx = instance;
+	item->base_offset = offset;
+	list_add_tail(&item->list, &sde_cfg->irq_offset_list);
+
+	return 0;
+}
+
 static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
 	bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
@@ -1210,6 +1280,9 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
 		sblk->llcc_slice_size =
 			sde_cfg->sc_cfg.llcc_slice_size;
 	}
+
+	if (sde_cfg->inline_disable_const_clr)
+		set_bit(SDE_SSPP_INLINE_CONST_CLR, &sspp->features);
 }
 
 static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
@@ -1850,6 +1923,8 @@ static int sde_mixer_parse_dt(struct device_node *np,
 			set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
 		if (sde_cfg->has_dim_layer)
 			set_bit(SDE_DIM_LAYER, &mixer->features);
+		if (sde_cfg->has_mixer_combined_alpha)
+			set_bit(SDE_MIXER_COMBINED_ALPHA, &mixer->features);
 
 		of_property_read_string_index(np,
 			mixer_prop[MIXER_DISP].prop_name, i, &disp_pref);
@@ -1941,6 +2016,11 @@ static int sde_intf_parse_dt(struct device_node *np,
 		if (!prop_exists[INTF_LEN])
 			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
 
+		rc = _add_to_irq_offset_list(sde_cfg, SDE_INTR_HWBLK_INTF,
+				intf->id, intf->base);
+		if (rc)
+			goto end;
+
 		intf->prog_fetch_lines_worst_case =
 				!prop_exists[INTF_PREFETCH] ?
 				sde_cfg->perf.min_prefill_lines :
@@ -1968,11 +2048,19 @@ static int sde_intf_parse_dt(struct device_node *np,
 		if (IS_SDE_CTL_REV_100(sde_cfg->ctl_rev))
 			set_bit(SDE_INTF_INPUT_CTRL, &intf->features);
 
-		if (IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
-				SDE_HW_VER_500) ||
-				IS_SDE_MAJOR_SAME((sde_cfg->hwversion),
-				SDE_HW_VER_600))
+		if (prop_exists[INTF_TE_IRQ])
+			intf->te_irq_offset = PROP_VALUE_ACCESS(prop_value,
+					INTF_TE_IRQ, i);
+
+		if (intf->te_irq_offset) {
+			rc = _add_to_irq_offset_list(sde_cfg,
+					SDE_INTR_HWBLK_INTF_TEAR,
+					intf->id, intf->te_irq_offset);
+			if (rc)
+				goto end;
+
 			set_bit(SDE_INTF_TE, &intf->features);
+		}
 	}
 
 end:
@@ -2437,6 +2525,11 @@ static int sde_dspp_parse_dt(struct device_node *np,
 			sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
 				AD_VERSION, 0);
 			set_bit(SDE_DSPP_AD, &dspp->features);
+			rc = _add_to_irq_offset_list(sde_cfg,
+					SDE_INTR_HWBLK_AD4, dspp->id,
+					dspp->base + sblk->ad.base);
+			if (rc)
+				goto end;
 		}
 
 		sblk->ltm.id = SDE_DSPP_LTM;
@@ -2448,6 +2541,11 @@ static int sde_dspp_parse_dt(struct device_node *np,
 			sblk->ltm.version = PROP_VALUE_ACCESS(ltm_prop_value,
 				LTM_VERSION, 0);
 			set_bit(SDE_DSPP_LTM, &dspp->features);
+			rc = _add_to_irq_offset_list(sde_cfg,
+					SDE_INTR_HWBLK_LTM, dspp->id,
+					dspp->base + sblk->ltm.base);
+			if (rc)
+				goto end;
 		}
 
 	}
@@ -2902,6 +3000,8 @@ static int _sde_vbif_populate(struct sde_mdss_cfg *sde_cfg,
 	for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
 		vbif->memtype[k++] = PROP_VALUE_ACCESS(
 				prop_value, VBIF_MEMTYPE_1, j);
+	if (sde_cfg->vbif_disable_inner_outer_shareable)
+		set_bit(SDE_VBIF_DISABLE_SHAREABLE, &vbif->features);
 
 	return 0;
 }
@@ -2913,7 +3013,7 @@ static int sde_vbif_parse_dt(struct device_node *np,
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[VBIF_PROP_MAX];
 	u32 off_count, vbif_len;
-	struct sde_vbif_cfg *vbif;
+	struct sde_vbif_cfg *vbif = NULL;
 
 	if (!sde_cfg) {
 		SDE_ERROR("invalid argument\n");
@@ -3363,6 +3463,21 @@ static int sde_top_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
 	if (major_version < SDE_HW_MAJOR(SDE_HW_VER_500))
 		set_bit(SDE_MDP_VSYNC_SEL, &cfg->mdp[0].features);
 
+	rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
+			SDE_INTR_TOP_INTR, cfg->mdp[0].base);
+	if (rc)
+		goto end;
+
+	rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
+			SDE_INTR_TOP_INTR2, cfg->mdp[0].base);
+	if (rc)
+		goto end;
+
+	rc = _add_to_irq_offset_list(cfg, SDE_INTR_HWBLK_TOP,
+			SDE_INTR_TOP_HIST_INTR, cfg->mdp[0].base);
+	if (rc)
+		goto end;
+
 	if (prop_exists[SEC_SID_MASK]) {
 		cfg->sec_sid_mask_count = prop_count[SEC_SID_MASK];
 		for (i = 0; i < cfg->sec_sid_mask_count; i++)
@@ -4045,29 +4160,28 @@ static void _sde_hw_setup_uidle(struct sde_uidle_cfg *uidle_cfg)
 
 static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 {
-	int i, rc = 0;
+	int rc = 0;
 
 	if (!sde_cfg)
 		return -EINVAL;
 
-	for (i = 0; i < MDSS_INTR_MAX; i++)
-		set_bit(i, sde_cfg->mdss_irqs);
+	/* default settings for *MOST* targets */
+	sde_cfg->has_mixer_combined_alpha = true;
 
+	/* target specific settings */
 	if (IS_MSM8996_TARGET(hw_rev)) {
 		sde_cfg->perf.min_prefill_lines = 21;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
+		sde_cfg->has_mixer_combined_alpha = false;
 	} else if (IS_MSM8998_TARGET(hw_rev)) {
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 25;
 		sde_cfg->vbif_qos_nlvl = 4;
 		sde_cfg->ts_prefill_rev = 1;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
 		sde_cfg->has_cursor = true;
 		sde_cfg->has_hdr = true;
+		sde_cfg->has_mixer_combined_alpha = false;
 	} else if (IS_SDM845_TARGET(hw_rev)) {
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->has_cwb_support = true;
@@ -4076,8 +4190,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x3F71;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_vig_p010 = true;
@@ -4086,8 +4198,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->perf.min_prefill_lines = 24;
 		sde_cfg->vbif_qos_nlvl = 8;
 		sde_cfg->ts_prefill_rev = 2;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_vig_p010 = true;
@@ -4110,9 +4220,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
 	} else if (IS_SDMSHRIKE_TARGET(hw_rev)) {
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 24;
@@ -4120,8 +4229,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->ts_prefill_rev = 2;
 		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
 		sde_cfg->delay_prg_fetch_start = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_decimation = true;
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_vig_p010 = true;
@@ -4140,10 +4247,9 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_vig_p010 = true;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
 	} else if (IS_SDMMAGPIE_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_wb_ubwc = true;
@@ -4159,6 +4265,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
 	} else if (IS_KONA_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_wb_ubwc = true;
@@ -4174,8 +4281,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_hdr_plus = true;
 		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
@@ -4191,6 +4296,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->true_inline_prefill_lines_nv12 = 32;
 		sde_cfg->true_inline_prefill_lines = 48;
 		sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_0;
+		sde_cfg->inline_disable_const_clr = true;
 	} else if (IS_SAIPAN_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_wb_ubwc = true;
@@ -4206,8 +4312,6 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->has_qos_fl_nocalc = true;
 		sde_cfg->has_3d_merge_reset = true;
-		clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
-		clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
 		sde_cfg->has_hdr = true;
 		sde_cfg->has_hdr_plus = true;
 		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
@@ -4222,6 +4326,7 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->true_inline_prefill_fudge_lines = 2;
 		sde_cfg->true_inline_prefill_lines_nv12 = 32;
 		sde_cfg->true_inline_prefill_lines = 48;
+		sde_cfg->inline_disable_const_clr = true;
 	} else if (IS_SDMTRINKET_TARGET(hw_rev)) {
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_qsync = true;
@@ -4235,6 +4340,50 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->sui_block_xin_mask = 0xC61;
 		sde_cfg->has_hdr = false;
 		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
+	} else if (IS_BENGAL_TARGET(hw_rev)) {
+		sde_cfg->has_cwb_support = false;
+		sde_cfg->has_qsync = true;
+		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
+		sde_cfg->delay_prg_fetch_start = true;
+		sde_cfg->sui_ns_allowed = true;
+		sde_cfg->sui_misr_supported = true;
+		sde_cfg->sui_block_xin_mask = 0xC01;
+		sde_cfg->has_hdr = false;
+		sde_cfg->has_sui_blendstage = true;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
+	} else if (IS_LAHAINA_TARGET(hw_rev)) {
+		sde_cfg->has_cwb_support = true;
+		sde_cfg->has_wb_ubwc = true;
+		sde_cfg->has_qsync = true;
+		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
+		sde_cfg->delay_prg_fetch_start = true;
+		sde_cfg->sui_ns_allowed = true;
+		sde_cfg->sui_misr_supported = true;
+		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_3d_merge_reset = true;
+		sde_cfg->has_hdr = true;
+		sde_cfg->has_hdr_plus = true;
+		set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
+		sde_cfg->has_vig_p010 = true;
+		sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
+		sde_cfg->true_inline_dwnscale_rt_num =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
+		sde_cfg->true_inline_dwnscale_rt_denom =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
+		sde_cfg->true_inline_dwnscale_nrt =
+			MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
+		sde_cfg->true_inline_prefill_fudge_lines = 2;
+		sde_cfg->true_inline_prefill_lines_nv12 = 32;
+		sde_cfg->true_inline_prefill_lines = 48;
+		sde_cfg->uidle_cfg.uidle_rev = SDE_UIDLE_VERSION_1_0_0;
+		sde_cfg->vbif_disable_inner_outer_shareable = true;
 	} else {
 		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -4315,6 +4464,8 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 	if (!sde_cfg)
 		return;
 
+	sde_hw_catalog_irq_offset_list_delete(&sde_cfg->irq_offset_list);
+
 	for (i = 0; i < sde_cfg->sspp_count; i++)
 		kfree(sde_cfg->sspp[i].sblk);
 
@@ -4375,6 +4526,7 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
 		return ERR_PTR(-ENOMEM);
 
 	sde_cfg->hwversion = hw_rev;
+	INIT_LIST_HEAD(&sde_cfg->irq_offset_list);
 
 	rc = _sde_hardware_pre_caps(sde_cfg, hw_rev);
 	if (rc)

+ 75 - 48
msm/sde/sde_hw_catalog.h

@@ -10,7 +10,6 @@
 #include <linux/bug.h>
 #include <linux/bitmap.h>
 #include <linux/err.h>
-#include <linux/msm-bus.h>
 #include <linux/of_fdt.h>
 #include <drm/drmP.h>
 #include "sde_hw_mdss.h"
@@ -31,28 +30,25 @@
 #define SDE_HW_STEP(rev)		((rev) & 0xFFFF)
 #define SDE_HW_MAJOR_MINOR(rev)		((rev) >> 16)
 
-#define IS_SDE_MAJOR_SAME(rev1, rev2)   \
-	(SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2)))
-
-#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2)   \
-	(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
-
-#define SDE_HW_VER_170	SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
-#define SDE_HW_VER_171	SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
-#define SDE_HW_VER_172	SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
-#define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
-#define SDE_HW_VER_301	SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */
-#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
-#define SDE_HW_VER_401	SDE_HW_VER(4, 0, 1) /* sdm845 v2.0 */
-#define SDE_HW_VER_410	SDE_HW_VER(4, 1, 0) /* sdm670 v1.0 */
-#define SDE_HW_VER_500	SDE_HW_VER(5, 0, 0) /* sm8150 v1.0 */
-#define SDE_HW_VER_501	SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */
-#define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
-#define SDE_HW_VER_520	SDE_HW_VER(5, 2, 0) /* sdmmagpie v1.0 */
-#define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
-#define SDE_HW_VER_540	SDE_HW_VER(5, 4, 0) /* sdmtrinket v1.0 */
+#define SDE_HW_VER_170	SDE_HW_VER(1, 7, 0) /* 8996 */
+#define SDE_HW_VER_300	SDE_HW_VER(3, 0, 0) /* 8998 */
+#define SDE_HW_VER_400	SDE_HW_VER(4, 0, 0) /* sdm845 */
+#define SDE_HW_VER_410	SDE_HW_VER(4, 1, 0) /* sdm670 */
+#define SDE_HW_VER_500	SDE_HW_VER(5, 0, 0) /* sm8150 */
+#define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike */
+#define SDE_HW_VER_520	SDE_HW_VER(5, 2, 0) /* sdmmagpie */
+#define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 */
+#define SDE_HW_VER_540	SDE_HW_VER(5, 4, 0) /* sdmtrinket */
 #define SDE_HW_VER_600	SDE_HW_VER(6, 0, 0) /* kona */
 #define SDE_HW_VER_610	SDE_HW_VER(6, 1, 0) /* sm7250 */
+#define SDE_HW_VER_630	SDE_HW_VER(6, 3, 0) /* bengal */
+#define SDE_HW_VER_700	SDE_HW_VER(7, 0, 0) /* lahaina */
+
+/* Avoid using below IS_XXX macros outside catalog, use feature bit instead */
+#define IS_SDE_MAJOR_SAME(rev1, rev2)   \
+		(SDE_HW_MAJOR((rev1)) == SDE_HW_MAJOR((rev2)))
+#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2)   \
+		(SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
 
 #define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170)
 #define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300)
@@ -65,6 +61,8 @@
 #define IS_SDMTRINKET_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_540)
 #define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
 #define IS_SAIPAN_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_610)
+#define IS_BENGAL_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_630)
+#define IS_LAHAINA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_700)
 
 #define SDE_HW_BLK_NAME_LEN	16
 
@@ -137,26 +135,30 @@ enum {
 #define SSPP_SYS_CACHE_NO_ALLOC	BIT(4)
 
 /**
- * SDE INTERRUPTS - maintains the possible hw irq's allowed by HW
- * The order in this enum must match the order of the irqs defined
- * by 'sde_irq_map'
- */
-enum sde_intr_enum {
-	MDSS_INTR_SSPP_TOP0_INTR,
-	MDSS_INTR_SSPP_TOP0_INTR2,
-	MDSS_INTF_TEAR_1_INTR,
-	MDSS_INTF_TEAR_2_INTR,
-	MDSS_INTR_SSPP_TOP0_HIST_INTR,
-	MDSS_INTR_INTF_0_INTR,
-	MDSS_INTR_INTF_1_INTR,
-	MDSS_INTR_INTF_2_INTR,
-	MDSS_INTR_INTF_3_INTR,
-	MDSS_INTR_INTF_4_INTR,
-	MDSS_INTR_AD4_0_INTR,
-	MDSS_INTR_AD4_1_INTR,
-	MDSS_INTR_LTM_0_INTR,
-	MDSS_INTR_LTM_1_INTR,
-	MDSS_INTR_MAX
+ * All INTRs relevant for a specific target should be enabled via
+ * _add_to_irq_offset_list()
+ */
+enum sde_intr_hwblk_type {
+	SDE_INTR_HWBLK_TOP,
+	SDE_INTR_HWBLK_INTF,
+	SDE_INTR_HWBLK_AD4,
+	SDE_INTR_HWBLK_INTF_TEAR,
+	SDE_INTR_HWBLK_LTM,
+	SDE_INTR_HWBLK_MAX
+};
+
+enum sde_intr_top_intr {
+	SDE_INTR_TOP_INTR = 1,
+	SDE_INTR_TOP_INTR2,
+	SDE_INTR_TOP_HIST_INTR,
+	SDE_INTR_TOP_MAX
+};
+
+struct sde_intr_irq_offsets {
+	struct list_head list;
+	enum sde_intr_hwblk_type type;
+	u32 instance_idx;
+	u32 base_offset;
 };
 
 /**
@@ -210,6 +212,7 @@ enum {
  * @SDE_SSPP_BLOCK_SEC_UI    Blocks secure-ui layers
  * @SDE_SSPP_SCALER_QSEED3LITE Qseed3lite algorithm support
  * @SDE_SSPP_TRUE_INLINE_ROT_V1, Support of SSPP true inline rotation v1
+ * @SDE_SSPP_INLINE_CONST_CLR Inline rotation requires const clr disabled
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -238,6 +241,7 @@ enum {
 	SDE_SSPP_BLOCK_SEC_UI,
 	SDE_SSPP_SCALER_QSEED3LITE,
 	SDE_SSPP_TRUE_INLINE_ROT_V1,
+	SDE_SSPP_INLINE_CONST_CLR,
 	SDE_SSPP_MAX
 };
 
@@ -274,6 +278,7 @@ enum {
  * @SDE_DISP_CWB_PREF         Layer mixer preferred for CWB
  * @SDE_DISP_PRIMARY_PREF     Layer mixer preferred for primary display
  * @SDE_DISP_SECONDARY_PREF   Layer mixer preferred for secondary display
+ * @SDE_MIXER_COMBINED_ALPHA  Layer mixer bg and fg alpha in single register
  * @SDE_MIXER_MAX             maximum value
  */
 enum {
@@ -284,6 +289,7 @@ enum {
 	SDE_DISP_PRIMARY_PREF,
 	SDE_DISP_SECONDARY_PREF,
 	SDE_DISP_CWB_PREF,
+	SDE_MIXER_COMBINED_ALPHA,
 	SDE_MIXER_MAX
 };
 
@@ -458,11 +464,13 @@ enum {
  * VBIF sub-blocks and features
  * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
  * @SDE_VBIF_QOS_REMAP        VBIF supports QoS priority remap
+ * @SDE_VBIF_DISABLE_SHAREABLE: VBIF requires inner/outer shareables disabled
  * @SDE_VBIF_MAX              maximum value
  */
 enum {
 	SDE_VBIF_QOS_OTLIM = 0x1,
 	SDE_VBIF_QOS_REMAP,
+	SDE_VBIF_DISABLE_SHAREABLE,
 	SDE_VBIF_MAX
 };
 
@@ -946,12 +954,14 @@ struct sde_cdm_cfg   {
  * @type:              Interface type(DSI, DP, HDMI)
  * @controller_id:     Controller Instance ID in case of multiple of intf type
  * @prog_fetch_lines_worst_case	Worst case latency num lines needed to prefetch
+ * @te_irq_offset:     Register offset for INTF TE IRQ block
  */
 struct sde_intf_cfg  {
 	SDE_HW_BLK_INFO;
 	u32 type;   /* interface type*/
 	u32 controller_id;
 	u32 prog_fetch_lines_worst_case;
+	u32 te_irq_offset;
 };
 
 /**
@@ -1271,6 +1281,9 @@ struct sde_limit_cfg {
  * @has_3d_merge_reset Supports 3D merge reset
  * @has_decimation     Supports decimation
  * @has_qos_fl_nocalc  flag to indicate QoS fill level needs no calculation
+ * @has_mixer_combined_alpha     Mixer has single register for FG & BG alpha
+ * @vbif_disable_inner_outer_shareable     VBIF requires disabling shareables
+ * @inline_disable_const_clr     Disable constant color during inline rotate
  * @sc_cfg: system cache configuration
  * @uidle_cfg		Settings for uidle feature
  * @sui_misr_supported  indicate if secure-ui-misr is supported
@@ -1286,7 +1299,7 @@ struct sde_limit_cfg {
  * @has_cursor    indicates if hardware cursor is supported
  * @has_vig_p010  indicates if vig pipe supports p010 format
  * @inline_rot_formats	formats supported by the inline rotator feature
- * @mdss_irqs	  bitmap with the irqs supported by the target
+ * @irq_offset_list     list of sde_intr_irq_offsets to initialize irq table
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -1331,6 +1344,9 @@ struct sde_mdss_cfg {
 	bool has_3d_merge_reset;
 	bool has_decimation;
 	bool has_qos_fl_nocalc;
+	bool has_mixer_combined_alpha;
+	bool vbif_disable_inner_outer_shareable;
+	bool inline_disable_const_clr;
 
 	struct sde_sc_cfg sc_cfg;
 
@@ -1416,7 +1432,7 @@ struct sde_mdss_cfg {
 	struct sde_format_extended *virt_vig_formats;
 	struct sde_format_extended *inline_rot_formats;
 
-	DECLARE_BITMAP(mdss_irqs, MDSS_INTR_MAX);
+	struct list_head irq_offset_list;
 };
 
 struct sde_mdss_hw_cfg_handler {
@@ -1470,6 +1486,22 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
  */
 void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
 
+/**
+ * sde_hw_catalog_irq_offset_list_delete - delete the irq_offset_list
+ *                                         maintained by the catalog
+ * @head:      pointer to the catalog's irq_offset_list
+ */
+static inline void sde_hw_catalog_irq_offset_list_delete(
+		struct list_head *head)
+{
+	struct sde_intr_irq_offsets *item, *tmp;
+
+	list_for_each_entry_safe(item, tmp, head, list) {
+		list_del(&item->list);
+		kfree(item);
+	}
+}
+
 /**
  * sde_hw_sspp_multirect_enabled - check multirect enabled for the sspp
  * @cfg:          pointer to sspp cfg
@@ -1480,9 +1512,4 @@ static inline bool sde_hw_sspp_multirect_enabled(const struct sde_sspp_cfg *cfg)
 			 test_bit(SDE_SSPP_SMART_DMA_V2, &cfg->features) ||
 			 test_bit(SDE_SSPP_SMART_DMA_V2p5, &cfg->features);
 }
-
-static inline bool sde_hw_intf_te_supported(const struct sde_mdss_cfg *sde_cfg)
-{
-	return test_bit(SDE_INTF_TE, &(sde_cfg->intf[0].features));
-}
 #endif /* _SDE_HW_CATALOG_H */

+ 19 - 2
msm/sde/sde_hw_ctl.c

@@ -36,6 +36,7 @@
 #define   CTL_CWB_ACTIVE                0x0F0
 #define   CTL_INTF_ACTIVE               0x0F4
 #define   CTL_CDM_ACTIVE                0x0F8
+#define   CTL_FETCH_PIPE_ACTIVE         0x0FC
 
 #define   CTL_MERGE_3D_FLUSH           0x100
 #define   CTL_DSC_FLUSH                0x104
@@ -61,6 +62,8 @@
 #define UPDATE_MASK(m, idx, en)           \
 	((m) = (en) ? ((m) | BIT((idx))) : ((m) & ~BIT((idx))))
 
+#define CTL_INVALID_BIT                0xffff
+
 /**
  * List of SSPP bits in CTL_FLUSH
  */
@@ -110,6 +113,13 @@ static const u32 intf_tbl[INTF_MAX] = {SDE_NONE, 31, 30, 29, 28};
  * top level control.
  */
 
+/**
+ * List of SSPP bits in CTL_FETCH_PIPE_ACTIVE
+ */
+static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
+	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
+	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
+
 /**
  * list of WB bits in CTL_WB_FLUSH
  */
@@ -764,6 +774,7 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
 		SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
 		SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
 	}
+	SDE_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
 }
 
 static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
@@ -772,6 +783,7 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
 	struct sde_hw_blk_reg_map *c;
 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
 	u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+	u32 active_fetch_pipes = 0;
 	int i, j;
 	u8 stages;
 	int pipes_per_stage;
@@ -801,10 +813,11 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
 		ext = i >= 7;
 
 		for (j = 0 ; j < pipes_per_stage; j++) {
+			enum sde_sspp pipe = stage_cfg->stage[i][j];
 			enum sde_sspp_multirect_index rect_index =
 				stage_cfg->multirect_index[i][j];
 
-			switch (stage_cfg->stage[i][j]) {
+			switch (pipe) {
 			case SSPP_VIG0:
 				if (rect_index == SDE_SSPP_RECT_1) {
 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
@@ -894,6 +907,9 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
 			default:
 				break;
 			}
+
+			if (fetch_tbl[pipe] != CTL_INVALID_BIT)
+				active_fetch_pipes |= BIT(fetch_tbl[pipe]);
 		}
 	}
 
@@ -902,6 +918,7 @@ exit:
 	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+	SDE_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, active_fetch_pipes);
 }
 
 static u32 sde_hw_ctl_get_staged_sspp(struct sde_hw_ctl *ctx, enum sde_lm lm,
@@ -958,7 +975,7 @@ static int sde_hw_ctl_intf_cfg_v1(struct sde_hw_ctl *ctx,
 	u32 wb_active = 0;
 	u32 merge_3d_active = 0;
 	u32 cwb_active = 0;
-	u32 mode_sel = 0;
+	u32 mode_sel = 0xf0000000;
 	u32 cdm_active = 0;
 	u32 intf_master = 0;
 	u32 i;

+ 224 - 448
msm/sde/sde_hw_interrupts.c

@@ -13,27 +13,17 @@
 
 /**
  * Register offsets in MDSS register file for the interrupt registers
- * w.r.t. to the MDSS base
+ * w.r.t. base for that block. Base offsets for IRQs should come from the
+ * device tree and get stored in the catalog(irq_offset_list) until they
+ * are added to the sde_irq_tbl during the table initialization.
  */
 #define HW_INTR_STATUS			0x0010
-#define MDP_SSPP_TOP0_OFF		0x1000
-#define MDP_INTF_0_OFF			0x6B000
-#define MDP_INTF_1_OFF			0x6B800
-#define MDP_INTF_2_OFF			0x6C000
-#define MDP_INTF_3_OFF			0x6C800
-#define MDP_INTF_4_OFF			0x6D000
-#define MDP_AD4_0_OFF			0x7D000
-#define MDP_AD4_1_OFF			0x7E000
 #define MDP_AD4_INTR_EN_OFF		0x41c
 #define MDP_AD4_INTR_CLEAR_OFF		0x424
 #define MDP_AD4_INTR_STATUS_OFF		0x420
-#define MDP_INTF_TEAR_INTF_1_IRQ_OFF	0x6E800
-#define MDP_INTF_TEAR_INTF_2_IRQ_OFF	0x6E900
 #define MDP_INTF_TEAR_INTR_EN_OFF	0x0
-#define MDP_INTF_TEAR_INTR_STATUS_OFF   0x4
-#define MDP_INTF_TEAR_INTR_CLEAR_OFF    0x8
-#define MDP_LTM_0_OFF			0x7F000
-#define MDP_LTM_1_OFF			0x7F100
+#define MDP_INTF_TEAR_INTR_STATUS_OFF	0x4
+#define MDP_INTF_TEAR_INTR_CLEAR_OFF	0x8
 #define MDP_LTM_INTR_EN_OFF		0x50
 #define MDP_LTM_INTR_STATUS_OFF		0x54
 #define MDP_LTM_INTR_CLEAR_OFF		0x58
@@ -206,8 +196,6 @@
  * @clr_off:	offset to CLEAR reg
  * @en_off:	offset to ENABLE reg
  * @status_off:	offset to STATUS reg
- * @sde_irq_idx;	global index in the 'sde_irq_map' table,
- *		to know which interrupt type, instance, mask, etc. to use
  * @map_idx_start   first offset in the sde_irq_map table
  * @map_idx_end    last offset in the sde_irq_map table
  */
@@ -215,7 +203,6 @@ struct sde_intr_reg {
 	u32 clr_off;
 	u32 en_off;
 	u32 status_off;
-	int sde_irq_idx;
 	u32 map_idx_start;
 	u32 map_idx_end;
 };
@@ -226,7 +213,7 @@ struct sde_intr_reg {
  * @instance_idx:	instance index of the associated HW block in SDE
  * @irq_mask:		corresponding bit in the interrupt status reg
  * @reg_idx:		index in the 'sde_irq_tbl' table, to know which
- *		registers offsets to use. -1 = invalid offset
+ *			registers offsets to use.
  */
 struct sde_irq_type {
 	u32 intr_type;
@@ -240,11 +227,13 @@ struct sde_irq_type {
  *                     a matching interface type and instance index.
  * Each of these tables are copied to a dynamically allocated
  * table, that will be used to service each of the irqs
+ * -1 indicates an uninitialized value which should be set when copying
+ * these tables to the sde_irq_map.
  */
 static struct sde_irq_type sde_irq_intr_map[] = {
 
 	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
-	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, -1},
 	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
 	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
 
@@ -304,7 +293,6 @@ static struct sde_irq_type sde_irq_intr2_map[] = {
 
 	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
 		SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
-
 	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
 		SDE_INTR_PING_PONG_S0_WR_PTR, -1},
 
@@ -363,15 +351,12 @@ static struct sde_irq_type sde_irq_intr2_map[] = {
 };
 
 static struct sde_irq_type sde_irq_hist_map[] = {
-
 	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
 		SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
-
 	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
 		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
-
 	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
 		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
@@ -382,11 +367,9 @@ static struct sde_irq_type sde_irq_hist_map[] = {
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
 		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
-
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
 		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
-
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
 		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
@@ -395,167 +378,46 @@ static struct sde_irq_type sde_irq_hist_map[] = {
 		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
 };
 
-static struct sde_irq_type sde_irq_intf0_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_inf1_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_intf2_map[] = {
-
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
-};
-
-static struct sde_irq_type sde_irq_intf3_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+static struct sde_irq_type sde_irq_intf_map[] = {
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, -1,
 		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, -1,
 		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, -1,
 		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, -1,
 		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
 
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, -1,
 		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, -1,
 		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, -1,
 		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, -1,
 		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
 
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
+	{ SDE_IRQ_TYPE_PROG_LINE, -1, SDE_INTR_PROG_LINE, -1},
 };
 
-static struct sde_irq_type sde_irq_inf4_map[] = {
-
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
-		SDE_INTR_VIDEO_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
-		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
-		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
-		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
-		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
-
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
+static struct sde_irq_type sde_irq_ad4_map[] = {
+	{ SDE_IRQ_TYPE_AD4_BL_DONE, -1, SDE_INTR_BACKLIGHT_UPDATED, -1},
 };
 
-static struct sde_irq_type sde_irq_ad4_0_map[] = {
-
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
-};
-
-static struct sde_irq_type sde_irq_ad4_1_map[] = {
-
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
-};
-
-static struct sde_irq_type sde_irq_intf1_te_map[] = {
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
-		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_RD_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
-		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
-};
-
-static struct sde_irq_type sde_irq_intf2_te_map[] = {
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
+static struct sde_irq_type sde_irq_intf_te_map[] = {
+	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, -1,
 		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
+	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, -1,
 		SDE_INTR_INTF_TEAR_WR_PTR, -1},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
+	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, -1,
 		SDE_INTR_INTF_TEAR_RD_PTR, -1},
-
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
+	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, -1,
 		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
 };
 
-static struct sde_irq_type sde_irq_ltm_0_map[] = {
-
-	{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_0, SDE_INTR_LTM_STATS_DONE, -1},
-	{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_0, SDE_INTR_LTM_STATS_WB_PB, -1},
-};
-
-static struct sde_irq_type sde_irq_ltm_1_map[] = {
-
-	{ SDE_IRQ_TYPE_LTM_STATS_DONE, DSPP_1, SDE_INTR_LTM_STATS_DONE, -1},
-	{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, DSPP_1, SDE_INTR_LTM_STATS_WB_PB, -1},
+static struct sde_irq_type sde_irq_ltm_map[] = {
+	{ SDE_IRQ_TYPE_LTM_STATS_DONE, -1, SDE_INTR_LTM_STATS_DONE, -1},
+	{ SDE_IRQ_TYPE_LTM_STATS_WB_PB, -1, SDE_INTR_LTM_STATS_WB_PB, -1},
 };
 
 static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
@@ -596,7 +458,6 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
 	int end_idx;
 	u32 irq_status;
 	unsigned long irq_flags;
-	int sde_irq_idx;
 
 	if (!intr)
 		return;
@@ -610,11 +471,6 @@ static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
 	for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
 		irq_status = intr->save_irq_status[reg_idx];
 
-		/* get the global offset in 'sde_irq_map' */
-		sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
-		if (sde_irq_idx < 0)
-			continue;
-
 		/*
 		 * Each Interrupt register has dynamic range of indexes,
 		 * initialized during hw_intr_init when sde_irq_tbl is created.
@@ -1017,6 +873,83 @@ static u32 sde_hw_intr_get_intr_status_nomask(struct sde_hw_intr *intr,
 	return intr_status;
 }
 
+static int _set_sde_irq_tbl_offset_top(struct sde_intr_reg *sde_irq,
+		struct sde_intr_irq_offsets *item)
+{
+	u32 base_offset;
+
+	if (!sde_irq || !item)
+		return -EINVAL;
+
+	base_offset = item->base_offset;
+	switch (item->instance_idx) {
+	case SDE_INTR_TOP_INTR:
+		sde_irq->clr_off = base_offset + INTR_CLEAR;
+		sde_irq->en_off = base_offset + INTR_EN;
+		sde_irq->status_off = base_offset + INTR_STATUS;
+		break;
+	case SDE_INTR_TOP_INTR2:
+		sde_irq->clr_off = base_offset + INTR2_CLEAR;
+		sde_irq->en_off = base_offset + INTR2_EN;
+		sde_irq->status_off = base_offset + INTR2_STATUS;
+		break;
+	case SDE_INTR_TOP_HIST_INTR:
+		sde_irq->clr_off = base_offset + HIST_INTR_CLEAR;
+		sde_irq->en_off = base_offset + HIST_INTR_EN;
+		sde_irq->status_off = base_offset + HIST_INTR_STATUS;
+		break;
+	default:
+		pr_err("invalid TOP intr for instance %d\n",
+				item->instance_idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int _set_sde_irq_tbl_offset(struct sde_intr_reg *sde_irq,
+		struct sde_intr_irq_offsets *item)
+{
+	u32 base_offset, rc = 0;
+
+	if (!sde_irq || !item)
+		return -EINVAL;
+
+	base_offset = item->base_offset;
+	switch (item->type) {
+	case SDE_INTR_HWBLK_TOP:
+		rc = _set_sde_irq_tbl_offset_top(sde_irq, item);
+		break;
+	case SDE_INTR_HWBLK_INTF:
+		sde_irq->clr_off = base_offset + INTF_INTR_CLEAR;
+		sde_irq->en_off = base_offset + INTF_INTR_EN;
+		sde_irq->status_off = base_offset + INTF_INTR_STATUS;
+		break;
+	case SDE_INTR_HWBLK_AD4:
+		sde_irq->clr_off = base_offset + MDP_AD4_INTR_CLEAR_OFF;
+		sde_irq->en_off = base_offset + MDP_AD4_INTR_EN_OFF;
+		sde_irq->status_off = base_offset + MDP_AD4_INTR_STATUS_OFF;
+		break;
+	case SDE_INTR_HWBLK_INTF_TEAR:
+		sde_irq->clr_off = base_offset + MDP_INTF_TEAR_INTR_CLEAR_OFF;
+		sde_irq->en_off = base_offset + MDP_INTF_TEAR_INTR_EN_OFF;
+		sde_irq->status_off = base_offset +
+				MDP_INTF_TEAR_INTR_STATUS_OFF;
+		break;
+	case SDE_INTR_HWBLK_LTM:
+		sde_irq->clr_off = base_offset + MDP_LTM_INTR_CLEAR_OFF;
+		sde_irq->en_off = base_offset + MDP_LTM_INTR_EN_OFF;
+		sde_irq->status_off = base_offset + MDP_LTM_INTR_STATUS_OFF;
+		break;
+	default:
+		pr_err("unrecognized intr blk type %d\n",
+				item->type);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
 static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
 {
 	ops->set_mask = sde_hw_intr_set_mask;
@@ -1050,143 +983,6 @@ static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
 	return &m->mdss[0];
 }
 
-static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
-	struct sde_intr_reg *sde_irq_tbl)
-{
-	int idx;
-	struct sde_intr_reg *sde_irq;
-
-	for (idx = 0; idx < irq_tbl_size; idx++) {
-		sde_irq = &sde_irq_tbl[idx];
-
-		switch (sde_irq->sde_irq_idx) {
-		case MDSS_INTR_SSPP_TOP0_INTR:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+INTR_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+INTR_STATUS;
-			break;
-		case MDSS_INTR_SSPP_TOP0_INTR2:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+INTR2_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+INTR2_STATUS;
-			break;
-		case MDSS_INTR_SSPP_TOP0_HIST_INTR:
-			sde_irq->clr_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
-			sde_irq->status_off =
-				MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_0_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_0_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_0_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_0_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_1_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_1_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_1_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_1_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_2_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_2_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_2_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_2_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_3_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_3_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_3_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_3_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_INTF_4_INTR:
-			sde_irq->clr_off =
-				MDP_INTF_4_OFF+INTF_INTR_CLEAR;
-			sde_irq->en_off =
-				MDP_INTF_4_OFF+INTF_INTR_EN;
-			sde_irq->status_off =
-				MDP_INTF_4_OFF+INTF_INTR_STATUS;
-			break;
-		case MDSS_INTR_AD4_0_INTR:
-			sde_irq->clr_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_AD4_1_INTR:
-			sde_irq->clr_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTF_TEAR_1_INTR:
-			sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_EN_OFF;
-			sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTF_TEAR_2_INTR:
-			sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_CLEAR_OFF;
-			sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_EN_OFF;
-			sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
-				MDP_INTF_TEAR_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_LTM_0_INTR:
-			sde_irq->clr_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_LTM_0_OFF + MDP_LTM_INTR_STATUS_OFF;
-			break;
-		case MDSS_INTR_LTM_1_INTR:
-			sde_irq->clr_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_CLEAR_OFF;
-			sde_irq->en_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_EN_OFF;
-			sde_irq->status_off =
-				MDP_LTM_1_OFF + MDP_LTM_INTR_STATUS_OFF;
-			break;
-		default:
-			pr_err("wrong irq idx %d\n",
-				sde_irq->sde_irq_idx);
-			return -EINVAL;
-		}
-
-		pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
-			idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
-			sde_irq->en_off, sde_irq->status_off);
-	}
-
-	return 0;
-}
-
 void sde_hw_intr_destroy(struct sde_hw_intr *intr)
 {
 	if (intr) {
@@ -1198,120 +994,110 @@ void sde_hw_intr_destroy(struct sde_hw_intr *intr)
 	}
 }
 
-static inline u32 _get_irq_map_size(int idx)
+static inline u32 _get_irq_map_size_top(enum sde_intr_top_intr inst)
 {
 	u32 ret = 0;
 
-	switch (idx) {
-	case MDSS_INTR_SSPP_TOP0_INTR:
+	switch (inst) {
+	case SDE_INTR_TOP_INTR:
 		ret = ARRAY_SIZE(sde_irq_intr_map);
 		break;
-	case MDSS_INTR_SSPP_TOP0_INTR2:
+	case SDE_INTR_TOP_INTR2:
 		ret = ARRAY_SIZE(sde_irq_intr2_map);
 		break;
-	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
+	case SDE_INTR_TOP_HIST_INTR:
 		ret = ARRAY_SIZE(sde_irq_hist_map);
 		break;
-	case MDSS_INTR_INTF_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf0_map);
-		break;
-	case MDSS_INTR_INTF_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_inf1_map);
-		break;
-	case MDSS_INTR_INTF_2_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf2_map);
-		break;
-	case MDSS_INTR_INTF_3_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf3_map);
-		break;
-	case MDSS_INTR_INTF_4_INTR:
-		ret = ARRAY_SIZE(sde_irq_inf4_map);
-		break;
-	case MDSS_INTR_AD4_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_ad4_0_map);
-		break;
-	case MDSS_INTR_AD4_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_ad4_1_map);
+	default:
+		pr_err("invalid top inst:%d\n", inst);
+	}
+
+	return ret;
+}
+
+static inline u32 _get_irq_map_size(struct sde_intr_irq_offsets *item)
+{
+	u32 ret = 0;
+
+	switch (item->type) {
+	case SDE_INTR_HWBLK_TOP:
+		ret = _get_irq_map_size_top(item->instance_idx);
 		break;
-	case MDSS_INTF_TEAR_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf1_te_map);
+	case SDE_INTR_HWBLK_INTF:
+		ret = ARRAY_SIZE(sde_irq_intf_map);
 		break;
-	case MDSS_INTF_TEAR_2_INTR:
-		ret = ARRAY_SIZE(sde_irq_intf2_te_map);
+	case SDE_INTR_HWBLK_AD4:
+		ret = ARRAY_SIZE(sde_irq_ad4_map);
 		break;
-	case MDSS_INTR_LTM_0_INTR:
-		ret = ARRAY_SIZE(sde_irq_ltm_0_map);
+	case SDE_INTR_HWBLK_INTF_TEAR:
+		ret = ARRAY_SIZE(sde_irq_intf_te_map);
 		break;
-	case MDSS_INTR_LTM_1_INTR:
-		ret = ARRAY_SIZE(sde_irq_ltm_1_map);
+	case SDE_INTR_HWBLK_LTM:
+		ret = ARRAY_SIZE(sde_irq_ltm_map);
 		break;
 	default:
-		pr_err("invalid idx:%d\n", idx);
+		pr_err("invalid type: %d\n", item->type);
 	}
 
 	return ret;
 }
 
-static inline struct sde_irq_type *_get_irq_map_addr(int idx)
+static inline struct sde_irq_type *_get_irq_map_addr_top(
+		enum sde_intr_top_intr inst)
 {
 	struct sde_irq_type *ret = NULL;
 
-	switch (idx) {
-	case MDSS_INTR_SSPP_TOP0_INTR:
+	switch (inst) {
+	case SDE_INTR_TOP_INTR:
 		ret = sde_irq_intr_map;
 		break;
-	case MDSS_INTR_SSPP_TOP0_INTR2:
+	case SDE_INTR_TOP_INTR2:
 		ret = sde_irq_intr2_map;
 		break;
-	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
+	case SDE_INTR_TOP_HIST_INTR:
 		ret = sde_irq_hist_map;
 		break;
-	case MDSS_INTR_INTF_0_INTR:
-		ret = sde_irq_intf0_map;
-		break;
-	case MDSS_INTR_INTF_1_INTR:
-		ret = sde_irq_inf1_map;
-		break;
-	case MDSS_INTR_INTF_2_INTR:
-		ret = sde_irq_intf2_map;
-		break;
-	case MDSS_INTR_INTF_3_INTR:
-		ret = sde_irq_intf3_map;
-		break;
-	case MDSS_INTR_INTF_4_INTR:
-		ret = sde_irq_inf4_map;
-		break;
-	case MDSS_INTR_AD4_0_INTR:
-		ret = sde_irq_ad4_0_map;
-		break;
-	case MDSS_INTR_AD4_1_INTR:
-		ret = sde_irq_ad4_1_map;
+	default:
+		pr_err("invalid top inst:%d\n", inst);
+	}
+
+	return ret;
+}
+
+static inline struct sde_irq_type *_get_irq_map_addr(
+		struct sde_intr_irq_offsets *item)
+{
+	struct sde_irq_type *ret = NULL;
+
+	switch (item->type) {
+	case SDE_INTR_HWBLK_TOP:
+		ret = _get_irq_map_addr_top(item->instance_idx);
 		break;
-	case MDSS_INTF_TEAR_1_INTR:
-		ret = sde_irq_intf1_te_map;
+	case SDE_INTR_HWBLK_INTF:
+		ret = sde_irq_intf_map;
 		break;
-	case MDSS_INTF_TEAR_2_INTR:
-		ret = sde_irq_intf2_te_map;
+	case SDE_INTR_HWBLK_AD4:
+		ret = sde_irq_ad4_map;
 		break;
-	case MDSS_INTR_LTM_0_INTR:
-		ret = sde_irq_ltm_0_map;
+	case SDE_INTR_HWBLK_INTF_TEAR:
+		ret = sde_irq_intf_te_map;
 		break;
-	case MDSS_INTR_LTM_1_INTR:
-		ret = sde_irq_ltm_1_map;
+	case SDE_INTR_HWBLK_LTM:
+		ret = sde_irq_ltm_map;
 		break;
 	default:
-		pr_err("invalid idx:%d\n", idx);
+		pr_err("invalid type: %d\n", item->type);
 	}
 
 	return ret;
 }
 
 static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
-	u32 irq_idx, u32 low_idx, u32 high_idx)
+	struct sde_intr_irq_offsets *item, u32 low_idx, u32 high_idx)
 {
 	int i, j = 0;
-	struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
-	u32 src_size = _get_irq_map_size(irq_idx);
+	struct sde_irq_type *src = _get_irq_map_addr(item);
+	u32 src_size = _get_irq_map_size(item);
 
 	if (!src)
 		return -EINVAL;
@@ -1332,61 +1118,60 @@ static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
 static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
 	struct sde_mdss_cfg *m)
 {
+	struct sde_intr_irq_offsets *item;
 	int i, idx, sde_irq_tbl_idx = 0, ret = 0;
 	u32 low_idx, high_idx;
 	u32 sde_irq_map_idx = 0;
 
-	/* Initialize the offset of the irq's in the sde_irq_map table */
-	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
-		if (test_bit(idx, m->mdss_irqs)) {
-			low_idx = sde_irq_map_idx;
-			high_idx = low_idx + _get_irq_map_size(idx);
+	/* Initialize offsets in the sde_irq_map & sde_irq_tbl tables */
+	list_for_each_entry(item, &m->irq_offset_list, list) {
+		low_idx = sde_irq_map_idx;
+		high_idx = low_idx + _get_irq_map_size(item);
 
-			pr_debug("init[%d]=%d low:%d high:%d\n",
-				sde_irq_tbl_idx, idx, low_idx, high_idx);
+		pr_debug("init[%d]=%d low:%d high:%d\n",
+			sde_irq_tbl_idx, idx, low_idx, high_idx);
 
-			if (sde_irq_tbl_idx >= intr->sde_irq_size ||
-				sde_irq_tbl_idx < 0) {
-				ret = -EINVAL;
-				goto exit;
-			}
-
-			/* init sde_irq_map with the global irq mapping table */
-			if (_sde_copy_regs(intr->sde_irq_map,
-					intr->sde_irq_map_size,
-					idx, low_idx, high_idx)) {
-				ret = -EINVAL;
-				goto exit;
-			}
+		if (sde_irq_tbl_idx >= intr->sde_irq_size ||
+			sde_irq_tbl_idx < 0) {
+			ret = -EINVAL;
+			goto exit;
+		}
 
-			/* init irq map with its reg idx within the irq tbl */
-			for (i = low_idx; i < high_idx; i++) {
-				intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
-				pr_debug("sde_irq_map[%d].reg_idx=%d\n",
-						i, sde_irq_tbl_idx);
-			}
+		/* init sde_irq_map with the global irq mapping table */
+		if (_sde_copy_regs(intr->sde_irq_map, intr->sde_irq_map_size,
+				item, low_idx, high_idx)) {
+			ret = -EINVAL;
+			goto exit;
+		}
 
-			/* track the idx of the mapping table for this irq in
-			 * sde_irq_map, this to only access the indexes of this
-			 * irq during the irq dispatch
-			 */
-			intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
-			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
-				low_idx;
-			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
-				high_idx;
-
-			/* increment idx for both tables accordingly */
-			sde_irq_tbl_idx++;
-			sde_irq_map_idx = high_idx;
+		/* init irq map with its reg & instance idxs in the irq tbl */
+		for (i = low_idx; i < high_idx; i++) {
+			intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
+			if (item->type != SDE_INTR_HWBLK_TOP)
+				intr->sde_irq_map[i].instance_idx =
+						item->instance_idx;
+			pr_debug("sde_irq_map[%d].reg_idx=%d .inst_idx = %d\n",
+				i, sde_irq_tbl_idx, item->instance_idx);
 		}
-	}
 
-	/* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
-	ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
-			intr->sde_irq_tbl);
+		/* track the idx of the mapping table for this irq in
+		 * sde_irq_map, this to only access the indexes of this
+		 * irq during the irq dispatch
+		 */
+		intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start = low_idx;
+		intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end = high_idx;
+		ret = _set_sde_irq_tbl_offset(
+				&intr->sde_irq_tbl[sde_irq_tbl_idx], item);
+		if (ret)
+			goto exit;
+
+		/* increment idx for both tables accordingly */
+		sde_irq_tbl_idx++;
+		sde_irq_map_idx = high_idx;
+	}
 
 exit:
+	sde_hw_catalog_irq_offset_list_delete(&m->irq_offset_list);
 	return ret;
 }
 
@@ -1395,10 +1180,10 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
 {
 	struct sde_hw_intr *intr = NULL;
 	struct sde_mdss_base_cfg *cfg;
+	struct sde_intr_irq_offsets *item;
 	u32 irq_regs_count = 0;
 	u32 irq_map_count = 0;
 	u32 size;
-	int idx;
 	int ret = 0;
 
 	if (!addr || !m) {
@@ -1419,33 +1204,24 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
 	}
 	__setup_intr_ops(&intr->ops);
 
-	if (MDSS_INTR_MAX >= UINT_MAX) {
-		pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
-		ret  = -EINVAL;
-		goto exit;
-	}
-
 	/* check how many irq's this target supports */
-	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
-		if (test_bit(idx, m->mdss_irqs)) {
-			irq_regs_count++;
-
-			size = _get_irq_map_size(idx);
-			if (!size || irq_map_count >= UINT_MAX - size) {
-				pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
-					idx, size, irq_map_count);
-				ret = -EINVAL;
-				goto exit;
-			}
-
-			irq_map_count += size;
+	list_for_each_entry(item, &m->irq_offset_list, list) {
+		size = _get_irq_map_size(item);
+		if (!size || irq_map_count >= UINT_MAX - size) {
+			pr_err("wrong map cnt idx:%d blk:%d/%d sz:%d cnt:%d\n",
+				irq_regs_count, item->type, item->instance_idx,
+				size, irq_map_count);
+			ret = -EINVAL;
+			goto exit;
 		}
+
+		irq_regs_count++;
+		irq_map_count += size;
 	}
 
-	if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
-		irq_map_count == 0) {
-		pr_err("wrong mapping of supported irqs 0x%lx\n",
-			m->mdss_irqs[0]);
+	if (irq_regs_count == 0 || irq_map_count == 0) {
+		pr_err("invalid irq map: %d %d\n",
+				irq_regs_count, irq_map_count);
 		ret = -EINVAL;
 		goto exit;
 	}

+ 19 - 0
msm/sde/sde_hw_interrupts.h

@@ -314,4 +314,23 @@ struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
  * @intr: pointer to interrupts hw object
  */
 void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+
+/**
+ * sde_hw_intr_list_lookup(): get the list entry for a given intr
+ * @sde_cfg: catalog containing the irq_offset_list
+ * @type: the sde_intr_hwblk_type to lookup
+ * @idx: the instance id to lookup for the specified hwblk_type
+ * @return: pointer to sde_intr_irq_offsets list entry, or NULL if lookup fails
+ */
+static inline struct sde_intr_irq_offsets *sde_hw_intr_list_lookup(
+	struct sde_mdss_cfg *sde_cfg, enum sde_intr_hwblk_type type, u32 idx)
+{
+	struct sde_intr_irq_offsets *item;
+
+	list_for_each_entry(item, &sde_cfg->irq_offset_list, list) {
+		if (type == item->type && idx == item->instance_idx)
+			return item;
+	}
+	return NULL;
+}
 #endif

+ 6 - 11
msm/sde/sde_hw_lm.c

@@ -110,8 +110,9 @@ static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
 	}
 }
 
-static void sde_hw_lm_setup_blend_config_sdm845(struct sde_hw_mixer *ctx,
-	u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+static void sde_hw_lm_setup_blend_config_combined_alpha(
+	struct sde_hw_mixer *ctx, u32 stage,
+	u32 fg_alpha, u32 bg_alpha, u32 blend_op)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	int stage_off;
@@ -280,15 +281,9 @@ static void _setup_mixer_ops(struct sde_mdss_cfg *m,
 		unsigned long features)
 {
 	ops->setup_mixer_out = sde_hw_lm_setup_out;
-	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) ||
-			IS_SM8150_TARGET(m->hwversion) ||
-			IS_SDMSHRIKE_TARGET(m->hwversion) ||
-			IS_SM6150_TARGET(m->hwversion) ||
-			IS_SDMMAGPIE_TARGET(m->hwversion) ||
-			IS_KONA_TARGET(m->hwversion) ||
-			IS_SAIPAN_TARGET(m->hwversion) ||
-			IS_SDMTRINKET_TARGET(m->hwversion))
-		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
+	if (test_bit(SDE_MIXER_COMBINED_ALPHA, &features))
+		ops->setup_blend_config =
+				sde_hw_lm_setup_blend_config_combined_alpha;
 	else
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
 	ops->setup_alpha_out = sde_hw_lm_setup_color3;

+ 4 - 7
msm/sde/sde_hw_sspp.c

@@ -3,7 +3,6 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-#include "sde_hw_util.h"
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_lm.h"
@@ -299,7 +298,7 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
 	u32 opmode = 0;
 	u32 alpha_en_mask = 0, color_en_mask = 0;
 	u32 op_mode_off, unpack_pat_off, format_off;
-	u32 idx, core_rev;
+	u32 idx;
 	bool const_color_en = true;
 
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
@@ -316,7 +315,6 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
 	}
 
 	c = &ctx->hw;
-	core_rev = readl_relaxed(c->base_off + 0x0);
 	opmode = SDE_REG_READ(c, op_mode_off + idx);
 	opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
 			MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
@@ -354,10 +352,9 @@ static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
 		(fmt->unpack_align_msb << 18) |
 		((fmt->bpp - 1) << 9);
 
-	if(IS_SDE_MAJOR_SAME(core_rev, SDE_HW_VER_600)) {
-		if(flags & SDE_SSPP_ROT_90)
-			const_color_en = false;
-	}
+	if ((flags & SDE_SSPP_ROT_90) && test_bit(SDE_SSPP_INLINE_CONST_CLR,
+			&ctx->cap->features))
+		const_color_en = false;
 
 	if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
 		if (SDE_FORMAT_IS_UBWC(fmt))

+ 1 - 3
msm/sde/sde_hw_vbif.c

@@ -234,9 +234,7 @@ static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
 	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
 	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
 		ops->set_qos_remap = sde_hw_set_qos_remap;
-	if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
-			IS_SDMMAGPIE_TARGET(m->hwversion) ||
-			IS_SDMTRINKET_TARGET(m->hwversion))
+	if (test_bit(SDE_VBIF_DISABLE_SHAREABLE, &cap))
 		ops->set_mem_type = sde_hw_set_mem_type_v1;
 	else
 		ops->set_mem_type = sde_hw_set_mem_type;

+ 45 - 163
msm/sde/sde_kms.c

@@ -25,7 +25,8 @@
 #include <linux/of_irq.h>
 #include <linux/dma-buf.h>
 #include <linux/memblock.h>
-#include <linux/bootmem.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -50,7 +51,7 @@
 
 #include <soc/qcom/scm.h>
 #include "soc/qcom/secure_buffer.h"
-#include "soc/qcom/qtee_shmbridge.h"
+#include <linux/qtee_shmbridge.h>
 
 #define CREATE_TRACE_POINTS
 #include "sde_trace.h"
@@ -271,6 +272,7 @@ static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  */
 static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
 {
+	struct drm_device *dev;
 	struct scm_desc desc = {0};
 	uint32_t num_sids;
 	uint32_t *sec_sid;
@@ -280,6 +282,8 @@ static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
 	struct qtee_shm shm;
 	bool qtee_en = qtee_shmbridge_is_enabled();
 
+	dev = sde_kms->dev;
+
 	num_sids = sde_cfg->sec_sid_mask_count;
 	if (!num_sids) {
 		SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
@@ -312,7 +316,8 @@ static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
 		sec_sid[i] = sde_cfg->sec_sid_mask[i];
 		SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
 	}
-	dmac_flush_range(sec_sid, sec_sid + num_sids);
+	dma_map_single(dev->dev, sec_sid, num_sids *sizeof(uint32_t),
+			DMA_TO_DEVICE);
 
 	SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
 				vmid, num_sids, qtee_en);
@@ -506,7 +511,7 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 	if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
 		ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
 		if (ret) {
-			smmu_state->sui_misr_state == NONE;
+			smmu_state->sui_misr_state = NONE;
 			goto end;
 		}
 	}
@@ -1862,141 +1867,6 @@ static void sde_kms_destroy(struct msm_kms *kms)
 	kfree(sde_kms);
 }
 
-static void _sde_kms_plane_force_remove(struct drm_plane *plane,
-			struct drm_atomic_state *state)
-{
-	struct drm_plane_state *plane_state;
-	int ret = 0;
-
-	plane_state = drm_atomic_get_plane_state(state, plane);
-	if (IS_ERR(plane_state)) {
-		ret = PTR_ERR(plane_state);
-		SDE_ERROR("error %d getting plane %d state\n",
-				ret, plane->base.id);
-		return;
-	}
-
-	plane->old_fb = plane->fb;
-
-	SDE_DEBUG("disabling plane %d\n", plane->base.id);
-
-	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
-	if (ret != 0)
-		SDE_ERROR("error %d disabling plane %d\n", ret,
-				plane->base.id);
-}
-
-static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
-		struct drm_atomic_state *state)
-{
-	struct drm_device *dev = sde_kms->dev;
-	struct drm_framebuffer *fb, *tfb;
-	struct list_head fbs;
-	struct drm_plane *plane;
-	int ret = 0;
-	u32 plane_mask = 0;
-
-	INIT_LIST_HEAD(&fbs);
-
-	list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
-		if (drm_framebuffer_read_refcount(fb) > 1) {
-			list_move_tail(&fb->filp_head, &fbs);
-
-			drm_for_each_plane(plane, dev) {
-				if (plane->fb == fb) {
-					plane_mask |=
-						1 << drm_plane_index(plane);
-					 _sde_kms_plane_force_remove(
-								plane, state);
-				}
-			}
-		} else {
-			list_del_init(&fb->filp_head);
-			drm_framebuffer_put(fb);
-		}
-	}
-
-	if (list_empty(&fbs)) {
-		SDE_DEBUG("skip commit as no fb(s)\n");
-		drm_atomic_state_put(state);
-		return 0;
-	}
-
-	SDE_DEBUG("committing after removing all the pipes\n");
-	ret = drm_atomic_commit(state);
-
-	if (ret) {
-		/*
-		 * move the fbs back to original list, so it would be
-		 * handled during drm_release
-		 */
-		list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
-			list_move_tail(&fb->filp_head, &file->fbs);
-
-		SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
-		goto end;
-	}
-
-	while (!list_empty(&fbs)) {
-		fb = list_first_entry(&fbs, typeof(*fb), filp_head);
-
-		list_del_init(&fb->filp_head);
-		drm_framebuffer_put(fb);
-	}
-
-end:
-	return ret;
-}
-
-static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-	unsigned int i;
-	struct drm_atomic_state *state = NULL;
-	struct drm_modeset_acquire_ctx ctx;
-	int ret = 0;
-
-	/* cancel pending flip event */
-	for (i = 0; i < priv->num_crtcs; i++)
-		sde_crtc_complete_flip(priv->crtcs[i], file);
-
-	drm_modeset_acquire_init(&ctx, 0);
-retry:
-	ret = drm_modeset_lock_all_ctx(dev, &ctx);
-	if (ret == -EDEADLK) {
-		drm_modeset_backoff(&ctx);
-		goto retry;
-	} else if (WARN_ON(ret)) {
-		goto end;
-	}
-
-	state = drm_atomic_state_alloc(dev);
-	if (!state) {
-		ret = -ENOMEM;
-		goto end;
-	}
-
-	state->acquire_ctx = &ctx;
-
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		ret = _sde_kms_remove_fbs(sde_kms, file, state);
-		if (ret != -EDEADLK)
-			break;
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(&ctx);
-	}
-
-end:
-	if (state)
-		drm_atomic_state_put(state);
-
-	SDE_DEBUG("sde preclose done, ret:%d\n", ret);
-	drm_modeset_drop_locks(&ctx);
-	drm_modeset_acquire_fini(&ctx);
-}
-
 static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
 		struct drm_atomic_state *state)
 {
@@ -2067,13 +1937,13 @@ static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
 	return ret;
 }
 
-static void sde_kms_lastclose(struct msm_kms *kms,
-		struct drm_modeset_acquire_ctx *ctx)
+static void sde_kms_lastclose(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms;
 	struct drm_device *dev;
 	struct drm_atomic_state *state;
-	int ret, i;
+	struct drm_modeset_acquire_ctx ctx;
+	int ret;
 
 	if (!kms) {
 		SDE_ERROR("invalid argument\n");
@@ -2082,32 +1952,45 @@ static void sde_kms_lastclose(struct msm_kms *kms,
 
 	sde_kms = to_sde_kms(kms);
 	dev = sde_kms->dev;
+	drm_modeset_acquire_init(&ctx, 0);
 
 	state = drm_atomic_state_alloc(dev);
-	if (!state)
-		return;
+	if (!state) {
+		ret = -ENOMEM;
+		goto out_ctx;
+	}
 
-	state->acquire_ctx = ctx;
+	state->acquire_ctx = &ctx;
 
-	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
-		/* add reset of custom properties to the state */
-		ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
-		if (ret)
-			break;
+retry:
+	ret = drm_modeset_lock_all_ctx(dev, &ctx);
+	if (ret)
+		goto out_state;
 
-		ret = drm_atomic_commit(state);
-		if (ret != -EDEADLK)
-			break;
+	ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
+	if (ret)
+		goto out_state;
 
-		drm_atomic_state_clear(state);
-		drm_modeset_backoff(ctx);
-		SDE_DEBUG("deadlock backoff on attempt %d\n", i);
-	}
+	ret = drm_atomic_commit(state);
+out_state:
+	if (ret == -EDEADLK)
+		goto backoff;
+
+	drm_atomic_state_put(state);
+out_ctx:
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
 
 	if (ret)
-		SDE_ERROR("failed to run last close: %d\n", ret);
+		SDE_ERROR("kms lastclose failed: %d\n", ret);
 
-	drm_atomic_state_put(state);
+	return;
+
+backoff:
+	drm_atomic_state_clear(state);
+	drm_modeset_backoff(&ctx);
+
+	goto retry;
 }
 
 static int sde_kms_check_secure_transition(struct msm_kms *kms,
@@ -2467,9 +2350,9 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 		/* currently consider modes[0] as the preferred mode */
 		drm_mode = list_first_entry(&connector->modes,
 				struct drm_display_mode, head);
-		SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
-				drm_mode->name, drm_mode->base.id,
-				drm_mode->type, drm_mode->flags);
+		SDE_DEBUG("drm_mode->name = %s, type=0x%x, flags=0x%x\n",
+				drm_mode->name, drm_mode->type,
+				drm_mode->flags);
 
 		/* Update CRTC drm structure */
 		crtc->state->active = true;
@@ -2881,7 +2764,6 @@ static const struct msm_kms_funcs kms_funcs = {
 	.irq_postinstall = sde_irq_postinstall,
 	.irq_uninstall   = sde_irq_uninstall,
 	.irq             = sde_irq,
-	.preclose        = sde_kms_preclose,
 	.lastclose       = sde_kms_lastclose,
 	.prepare_fence   = sde_kms_prepare_fence,
 	.prepare_commit  = sde_kms_prepare_commit,

+ 3 - 6
msm/sde/sde_plane.c

@@ -1406,6 +1406,7 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde,
 {
 	struct sde_hw_pixel_ext *pe;
 	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+	const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
 
 	if (!psde || !fmt || !pstate) {
 		SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
@@ -1421,10 +1422,8 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde,
 		sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
 
 	/* don't chroma subsample if decimating */
-	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
-		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
-	chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 :
-		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 : info->hsub;
+	chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 : info->vsub;
 
 	/* update scaler */
 	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3) ||
@@ -4007,8 +4006,6 @@ static void sde_plane_destroy(struct drm_plane *plane)
 		msm_property_destroy(&psde->property_info);
 		mutex_destroy(&psde->lock);
 
-		drm_plane_helper_disable(plane, NULL);
-
 		/* this will destroy the states as well */
 		drm_plane_cleanup(plane);
 

+ 1 - 1
msm/sde/sde_rm.c

@@ -1951,7 +1951,7 @@ static void _sde_rm_release_rsvp(
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt)
 {
 	struct sde_rm_rsvp *rsvp;
-	struct drm_connector *conn;
+	struct drm_connector *conn = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	uint64_t top_ctrl;

+ 1 - 0
msm/sde/sde_wb.c

@@ -6,6 +6,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
 #include <uapi/drm/sde_drm.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_kms.h"
 #include "sde_kms.h"

+ 101 - 229
msm/sde_power_handle.c

@@ -14,8 +14,6 @@
 #include <linux/mutex.h>
 #include <linux/of_platform.h>
 
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
 #include <linux/sde_io_util.h>
 #include <linux/sde_rsc.h>
 
@@ -23,6 +21,13 @@
 #include "sde_trace.h"
 #include "sde_dbg.h"
 
+static const struct sde_power_bus_scaling_data sde_reg_bus_table[] = {
+	{0, 0},
+	{0, 76800},
+	{0, 150000},
+	{0, 300000},
+};
+
 static const char *data_bus_name[SDE_POWER_HANDLE_DBUS_ID_MAX] = {
 	[SDE_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,sde-data-bus",
 	[SDE_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,sde-llcc-bus",
@@ -277,67 +282,49 @@ clk_err:
 	return rc;
 }
 
-#ifdef CONFIG_QCOM_BUS_SCALING
-
 #define MAX_AXI_PORT_COUNT 3
 
 static int _sde_power_data_bus_set_quota(
 	struct sde_power_data_bus_handle *pdbus,
 	u64 in_ab_quota, u64 in_ib_quota)
 {
-	int new_uc_idx;
-	u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
-	u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
-	int rc;
+	int rc = 0, i = 0, j = 0;
 
-	if (pdbus->data_bus_hdl < 1) {
-		pr_err("invalid bus handle %d\n", pdbus->data_bus_hdl);
+	if (!pdbus->data_paths_cnt) {
+		pr_err("invalid data bus handle\n");
 		return -EINVAL;
 	}
 
-	if (!in_ab_quota && !in_ib_quota)  {
-		new_uc_idx = 0;
-	} else {
-		int i;
-		struct msm_bus_vectors *vect = NULL;
-		struct msm_bus_scale_pdata *bw_table =
-			pdbus->data_bus_scale_table;
-		u32 total_data_paths_cnt = pdbus->data_paths_cnt;
-
-		if (!bw_table || !total_data_paths_cnt ||
-		    total_data_paths_cnt > MAX_AXI_PORT_COUNT) {
-			pr_err("invalid input\n");
-			return -EINVAL;
-		}
+	pr_debug("ab=%llu ib=%llu\n", in_ab_quota, in_ib_quota);
 
-		ab_quota[0] = div_u64(in_ab_quota, total_data_paths_cnt);
-		ib_quota[0] = div_u64(in_ib_quota, total_data_paths_cnt);
+	in_ab_quota = div_u64(in_ab_quota, pdbus->data_paths_cnt);
 
-		for (i = 1; i < total_data_paths_cnt; i++) {
-			ab_quota[i] = ab_quota[0];
-			ib_quota[i] = ib_quota[0];
+	SDE_ATRACE_BEGIN("msm_bus_scale_req");
+	for (i = 0; i < pdbus->data_paths_cnt; i++) {
+		if (pdbus->data_bus_hdl[i]) {
+			rc = icc_set_bw(pdbus->data_bus_hdl[i],
+				in_ab_quota, in_ib_quota);
+			if (rc)
+				goto err;
 		}
+	}
 
-		new_uc_idx = (pdbus->curr_bw_uc_idx %
-			(bw_table->num_usecases - 1)) + 1;
+	pdbus->curr_val.ab = in_ab_quota;
+	pdbus->curr_val.ib = in_ib_quota;
 
-		for (i = 0; i < total_data_paths_cnt; i++) {
-			vect = &bw_table->usecase[new_uc_idx].vectors[i];
-			vect->ab = ab_quota[i];
-			vect->ib = ib_quota[i];
+	SDE_ATRACE_END("msm_bus_scale_req");
 
-			pr_debug(
-				"%s uc_idx=%d idx=%d ab=%llu ib=%llu\n",
-				bw_table->name, new_uc_idx, i, vect->ab,
-				vect->ib);
-		}
-	}
-	pdbus->curr_bw_uc_idx = new_uc_idx;
+	return rc;
+err:
+	for (j = 0; j < i; j++)
+		if (pdbus->data_bus_hdl[j])
+			icc_set_bw(pdbus->data_bus_hdl[j],
+				   pdbus->curr_val.ab,
+				   pdbus->curr_val.ib);
 
-	SDE_ATRACE_BEGIN("msm_bus_scale_req");
-	rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
-			new_uc_idx);
 	SDE_ATRACE_END("msm_bus_scale_req");
+	pr_err("failed to set data bus vote ab=%llu ib=%llu rc=%d\n",
+	       rc, in_ab_quota, in_ib_quota);
 
 	return rc;
 }
@@ -356,7 +343,7 @@ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
 
 	trace_sde_perf_update_bus(bus_id, ab_quota, ib_quota);
 
-	if (phandle->data_bus_handle[bus_id].data_bus_hdl)
+	if (phandle->data_bus_handle[bus_id].data_paths_cnt > 0)
 		rc = _sde_power_data_bus_set_quota(
 			&phandle->data_bus_handle[bus_id], ab_quota, ib_quota);
 
@@ -368,96 +355,96 @@ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
 static void sde_power_data_bus_unregister(
 		struct sde_power_data_bus_handle *pdbus)
 {
-	if (pdbus->data_bus_hdl) {
-		msm_bus_scale_unregister_client(pdbus->data_bus_hdl);
-		pdbus->data_bus_hdl = 0;
+	int i = 0;
+
+	for (i = 0; i < pdbus->data_paths_cnt; i++) {
+		if (pdbus->data_bus_hdl[i]) {
+			icc_put(pdbus->data_bus_hdl[i]);
+			pdbus->data_bus_hdl[i] = NULL;
+		}
 	}
 }
 
 static int sde_power_data_bus_parse(struct platform_device *pdev,
 	struct sde_power_data_bus_handle *pdbus, const char *name)
 {
-	struct device_node *node;
-	int rc = 0;
-	int paths;
-
-	node = of_get_child_by_name(pdev->dev.of_node, name);
-	if (!node)
-		goto end;
-
-	rc = of_property_read_u32(node, "qcom,msm-bus,num-paths", &paths);
-	if (rc) {
-		pr_err("Error. qcom,msm-bus,num-paths not found\n");
-		return rc;
+	char bus_name[32];
+	int i = 0, ret = 0;
+
+	for (i = 0; i < DATA_BUS_PATH_MAX; i++) {
+		snprintf(bus_name, sizeof(bus_name), "%s%d", name, i);
+		ret = of_property_match_string(pdev->dev.of_node,
+			"interconnect-names", bus_name);
+		if (ret < 0) {
+			if (!pdbus->data_paths_cnt) {
+				pr_debug("sde: bus %s dt node missing\n", bus_name);
+				return 0;
+			} else
+				goto end;
+		} else
+			pdbus->data_bus_hdl[i] = of_icc_get(&pdev->dev, bus_name);
+
+		if (IS_ERR_OR_NULL(pdbus->data_bus_hdl[i])) {
+			pr_debug("icc get path failed for %s\n", bus_name);
+			break;
+		}
+		pdbus->data_paths_cnt++;
 	}
-	pdbus->data_paths_cnt = paths;
 
-	pdbus->data_bus_scale_table = msm_bus_pdata_from_node(pdev, node);
-	if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
-		pr_err("reg bus handle parsing failed\n");
-		rc = PTR_ERR(pdbus->data_bus_scale_table);
-		if (!pdbus->data_bus_scale_table)
-			rc = -EINVAL;
-		goto end;
-	}
-	pdbus->data_bus_hdl = msm_bus_scale_register_client(
-			pdbus->data_bus_scale_table);
-	if (!pdbus->data_bus_hdl) {
-		pr_err("data_bus_client register failed\n");
-		rc = -EINVAL;
-		goto end;
+	if (!pdbus->data_paths_cnt) {
+		pr_err("get none data bus path for %s\n", name);
+		return -EINVAL;
 	}
-	pr_debug("register %s data_bus_hdl=%x\n", name, pdbus->data_bus_hdl);
 
 end:
-	return rc;
+	if (of_find_property(pdev->dev.of_node,
+			     "qcom,msm-bus,active-only", NULL)) {
+		pdbus->bus_active_only = true;
+		for (i = 0; i < pdbus->data_paths_cnt; i++) {
+			icc_set_tag(pdbus->data_bus_hdl[i],
+				    QCOM_ICC_TAG_ACTIVE_ONLY);
+		}
+	}
+
+	pr_debug("register %s data_bus success, path number=%d\n",
+		name, pdbus->data_paths_cnt);
+
+	return 0;
 }
 
 static int sde_power_reg_bus_parse(struct platform_device *pdev,
 	struct sde_power_handle *phandle)
 {
-	struct device_node *node;
-	struct msm_bus_scale_pdata *bus_scale_table;
 	int rc = 0;
 
-	node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-reg-bus");
-	if (node) {
-		bus_scale_table = msm_bus_pdata_from_node(pdev, node);
-		if (IS_ERR_OR_NULL(bus_scale_table)) {
-			pr_err("reg bus handle parsing failed\n");
-			rc = PTR_ERR(bus_scale_table);
-			if (!bus_scale_table)
-				rc = -EINVAL;
-			goto end;
-		}
-		phandle->reg_bus_hdl = msm_bus_scale_register_client(
-			      bus_scale_table);
-		if (!phandle->reg_bus_hdl) {
-			pr_err("reg_bus_client register failed\n");
-			rc = -EINVAL;
-			goto end;
-		}
-		pr_debug("register reg_bus_hdl=%x\n", phandle->reg_bus_hdl);
+	phandle->reg_bus_hdl = of_icc_get(&pdev->dev, "qcom,sde-reg-bus");
+	if (IS_ERR_OR_NULL(phandle->reg_bus_hdl)) {
+		pr_err("reg bus handle parsing failed\n");
+		phandle->reg_bus_hdl = NULL;
+		rc = -EINVAL;
+	} else {
+		pr_debug("reg_bus_hdl parsing success\n");
 	}
 
-end:
 	return rc;
 }
 
-static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+static void sde_power_reg_bus_unregister(struct icc_path *reg_bus_hdl)
 {
 	if (reg_bus_hdl)
-		msm_bus_scale_unregister_client(reg_bus_hdl);
+		icc_put(reg_bus_hdl);
 }
 
-static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+static int sde_power_reg_bus_update(struct icc_path *reg_bus_hdl,
+	u32 usecase_ndx)
 {
 	int rc = 0;
 
 	if (reg_bus_hdl) {
 		SDE_ATRACE_BEGIN("msm_bus_scale_req");
-		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
-								usecase_ndx);
+		rc = icc_set_bw(reg_bus_hdl,
+			sde_reg_bus_table[usecase_ndx].ab,
+			sde_reg_bus_table[usecase_ndx].ib);
 		SDE_ATRACE_END("msm_bus_scale_req");
 	}
 
@@ -466,52 +453,6 @@ static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
 
 	return rc;
 }
-#else
-static int _sde_power_data_bus_set_quota(
-	struct sde_power_data_bus_handle *pdbus,
-	u64 in_ab_quota, u64 in_ib_quota)
-{
-	return 0;
-}
-
-static int sde_power_data_bus_parse(struct platform_device *pdev,
-		struct sde_power_data_bus_handle *pdbus, const char *name)
-{
-	return 0;
-}
-
-static void sde_power_data_bus_unregister(
-		struct sde_power_data_bus_handle *pdbus)
-{
-}
-
-int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
-	u32 bus_id, u64 ab_quota, u64 ib_quota)
-{
-	return 0;
-}
-
-static int sde_power_reg_bus_parse(struct platform_device *pdev,
-	struct sde_power_handle *phandle)
-{
-	return 0;
-}
-
-static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
-{
-}
-
-static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
-{
-	return 0;
-}
-
-int sde_power_data_bus_state_update(struct sde_power_handle *phandle,
-							bool enable)
-{
-	return 0;
-}
-#endif
 
 int sde_power_resource_init(struct platform_device *pdev,
 	struct sde_power_handle *phandle)
@@ -576,12 +517,6 @@ int sde_power_resource_init(struct platform_device *pdev,
 		}
 	}
 
-	if (of_find_property(pdev->dev.of_node, "qcom,dss-cx-ipeak", NULL))
-		phandle->dss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node,
-						"qcom,dss-cx-ipeak");
-	else
-		pr_debug("cx ipeak client parse failed\n");
-
 	INIT_LIST_HEAD(&phandle->event_list);
 
 	phandle->rsc_client = NULL;
@@ -635,9 +570,6 @@ void sde_power_resource_deinit(struct platform_device *pdev,
 	}
 	mutex_unlock(&phandle->phandle_lock);
 
-	if (phandle->dss_cx_ipeak)
-		cx_ipeak_unregister(phandle->dss_cx_ipeak);
-
 	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
 		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
 
@@ -675,6 +607,13 @@ int sde_power_scale_reg_bus(struct sde_power_handle *phandle,
 						usecase_ndx);
 	if (rc)
 		pr_err("failed to set reg bus vote rc=%d\n", rc);
+	else {
+		phandle->reg_bus_curr_val.ab =
+			sde_reg_bus_table[usecase_ndx].ab;
+		phandle->reg_bus_curr_val.ib =
+			sde_reg_bus_table[usecase_ndx].ib;
+		phandle->current_usecase_ndx = usecase_ndx;
+	}
 
 	if (!skip_lock)
 		mutex_unlock(&phandle->phandle_lock);
@@ -723,7 +662,7 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, bool enable)
 				SDE_POWER_EVENT_PRE_ENABLE);
 
 		for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX &&
-		     phandle->data_bus_handle[i].data_bus_hdl; i++) {
+			phandle->data_bus_handle[i].data_paths_cnt > 0; i++) {
 			rc = _sde_power_data_bus_set_quota(
 				&phandle->data_bus_handle[i],
 				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
@@ -777,7 +716,7 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, bool enable)
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
 
 		for (i = SDE_POWER_HANDLE_DBUS_ID_MAX - 1; i >= 0; i--)
-			if (phandle->data_bus_handle[i].data_bus_hdl)
+			if (phandle->data_bus_handle[i].data_paths_cnt > 0)
 				_sde_power_data_bus_set_quota(
 					&phandle->data_bus_handle[i],
 					SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA,
@@ -799,7 +738,7 @@ rsc_err:
 reg_bus_hdl_err:
 	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
-	for (i-- ; i >= 0 && phandle->data_bus_handle[i].data_bus_hdl; i--)
+	for (i-- ; i >= 0 && phandle->data_bus_handle[i].data_paths_cnt > 0; i--)
 		_sde_power_data_bus_set_quota(
 			&phandle->data_bus_handle[i],
 			SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA,
@@ -809,47 +748,11 @@ vreg_err:
 	return rc;
 }
 
-int sde_cx_ipeak_vote(struct sde_power_handle *phandle, struct dss_clk *clock,
-		u64 requested_clk_rate, u64 prev_clk_rate, bool enable_vote)
-{
-	int ret = 0;
-	u64 curr_core_clk_rate, max_core_clk_rate, prev_core_clk_rate;
-
-	if (!phandle->dss_cx_ipeak) {
-		pr_debug("%pS->%s: Invalid input\n",
-				__builtin_return_address(0), __func__);
-		return -EOPNOTSUPP;
-	}
-
-	if (strcmp("core_clk", clock->clk_name)) {
-		pr_debug("Not a core clk , cx_ipeak vote not needed\n");
-		return -EOPNOTSUPP;
-	}
-
-	curr_core_clk_rate = clock->rate;
-	max_core_clk_rate = clock->max_rate;
-	prev_core_clk_rate = prev_clk_rate;
-
-	if (enable_vote && requested_clk_rate == max_core_clk_rate &&
-				curr_core_clk_rate != requested_clk_rate)
-		ret = cx_ipeak_update(phandle->dss_cx_ipeak, true);
-	else if (!enable_vote && requested_clk_rate != max_core_clk_rate &&
-				prev_core_clk_rate == max_core_clk_rate)
-		ret = cx_ipeak_update(phandle->dss_cx_ipeak, false);
-
-	if (ret)
-		SDE_EVT32(ret, enable_vote, requested_clk_rate,
-					curr_core_clk_rate, prev_core_clk_rate);
-
-	return ret;
-}
-
 int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
 	u64 rate)
 {
 	int i, rc = -EINVAL;
 	struct dss_module_power *mp;
-	u64 prev_clk_rate, requested_clk_rate;
 
 	if (!phandle) {
 		pr_err("invalid input power handle\n");
@@ -863,15 +766,8 @@ int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
 					(rate > mp->clk_config[i].max_rate))
 				rate = mp->clk_config[i].max_rate;
 
-			prev_clk_rate = mp->clk_config[i].rate;
-			requested_clk_rate = rate;
-			sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
-				requested_clk_rate, prev_clk_rate, true);
 			mp->clk_config[i].rate = rate;
 			rc = msm_dss_single_clk_set_rate(&mp->clk_config[i]);
-			if (!rc)
-				sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
-				   requested_clk_rate, prev_clk_rate, false);
 			break;
 		}
 	}
@@ -947,30 +843,6 @@ struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
 	return clk;
 }
 
-int sde_power_clk_set_flags(struct sde_power_handle *phandle,
-		char *clock_name, unsigned long flags)
-{
-	struct clk *clk;
-
-	if (!phandle) {
-		pr_err("invalid input power handle\n");
-		return -EINVAL;
-	}
-
-	if (!clock_name) {
-		pr_err("invalid input clock name\n");
-		return -EINVAL;
-	}
-
-	clk = sde_power_clk_get_clk(phandle, clock_name);
-	if (!clk) {
-		pr_err("get_clk failed for clk: %s\n", clock_name);
-		return -EINVAL;
-	}
-
-	return clk_set_flags(clk, flags);
-}
-
 struct sde_power_event *sde_power_handle_register_event(
 		struct sde_power_handle *phandle,
 		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),

+ 35 - 32
msm/sde_power_handle.h

@@ -18,7 +18,7 @@
 #define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	3000000000ULL
 
 #include <linux/sde_io_util.h>
-#include <soc/qcom/cx_ipeak.h>
+#include <linux/interconnect.h>
 
 /* event will be triggered before power handler disable */
 #define SDE_POWER_EVENT_PRE_DISABLE	0x1
@@ -31,6 +31,23 @@
 
 /* event will be triggered after power handler enable */
 #define SDE_POWER_EVENT_POST_ENABLE	0x8
+#define DATA_BUS_PATH_MAX	0x2
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC            0
+#define QCOM_ICC_BUCKET_WAKE           1
+#define QCOM_ICC_BUCKET_SLEEP          2
+#define QCOM_ICC_NUM_BUCKETS           3
+#define QCOM_ICC_TAG_AMC               BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE              BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP             BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY       (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS            (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+                                        QCOM_ICC_TAG_SLEEP)
 
 /**
  * mdss_bus_vote_type: register bus vote type
@@ -73,30 +90,27 @@ enum SDE_POWER_HANDLE_DBUS_ID {
 	SDE_POWER_HANDLE_DBUS_ID_MAX,
 };
 
+/**
+ * struct sde_power_bus_scaling_data: struct for bus setting
+ * @ab: average bandwidth in kilobytes per second
+ * @ib: peak bandwidth in kilobytes per second
+ */
+struct sde_power_bus_scaling_data {
+	uint64_t ab; /* Arbitrated bandwidth */
+	uint64_t ib; /* Instantaneous bandwidth */
+};
+
 /**
  * struct sde_power_data_handle: power handle struct for data bus
- * @data_bus_scale_table: pointer to bus scaling table
  * @data_bus_hdl: current data bus handle
+ * @curr_val : save the current bus value
  * @data_paths_cnt: number of rt data path ports
- * @curr_bw_uc_idx: current use case index of data bus
- * @ao_bw_uc_idx: active only use case index of data bus
- * @ab_rt: realtime ab quota
- * @ib_rt: realtime ib quota
- * @ab_nrt: non-realtime ab quota
- * @ib_nrt: non-realtime ib quota
- * @enable: true if bus is enabled
  */
 struct sde_power_data_bus_handle {
-	struct msm_bus_scale_pdata *data_bus_scale_table;
-	u32 data_bus_hdl;
+	struct icc_path *data_bus_hdl[DATA_BUS_PATH_MAX];
+	struct sde_power_bus_scaling_data curr_val;
 	u32 data_paths_cnt;
-	u32 curr_bw_uc_idx;
-	u32 ao_bw_uc_idx;
-	u64 ab_rt;
-	u64 ib_rt;
-	u64 ab_nrt;
-	u64 ib_nrt;
-	bool enable;
+	bool bus_active_only;
 };
 
 /*
@@ -124,24 +138,24 @@ struct sde_power_event {
  * @dev: pointer to device structure
  * @usecase_ndx: current usecase index
  * @reg_bus_hdl: current register bus handle
+ * @reg_bus_curr_val: save currecnt reg bus value
  * @data_bus_handle: context structure for data bus control
  * @event_list: current power handle event list
  * @rsc_client: sde rsc client pointer
  * @rsc_client_init: boolean to control rsc client create
- * @dss_cx_ipeak: client pointer for cx ipeak driver
  */
 struct sde_power_handle {
 	struct dss_module_power mp;
 	struct mutex phandle_lock;
 	struct device *dev;
 	u32 current_usecase_ndx;
-	u32 reg_bus_hdl;
+	struct icc_path *reg_bus_hdl;
+	struct sde_power_bus_scaling_data reg_bus_curr_val;
 	struct sde_power_data_bus_handle data_bus_handle
 		[SDE_POWER_HANDLE_DBUS_ID_MAX];
 	struct list_head event_list;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_client_init;
-	struct cx_ipeak_client *dss_cx_ipeak;
 };
 
 /**
@@ -235,17 +249,6 @@ u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata,
 struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
 		char *clock_name);
 
-/**
- * sde_power_clk_set_flags() - set the clock flags
- * @pdata:  power handle containing the resources
- * @clock_name: clock name to get the clk pointer.
- * @flags: flags to set
- *
- * Return: error code.
- */
-int sde_power_clk_set_flags(struct sde_power_handle *pdata,
-		char *clock_name, unsigned long flags);
-
 /**
  * sde_power_data_bus_set_quota() - set data bus quota for power client
  * @phandle:  power handle containing the resources

+ 32 - 5
msm/sde_rsc.c

@@ -15,7 +15,6 @@
 #include <linux/mutex.h>
 #include <linux/of_platform.h>
 #include <linux/module.h>
-#include <linux/msm-bus.h>
 
 #include <soc/qcom/rpmh.h>
 #include <drm/drmP.h>
@@ -64,6 +63,21 @@
 static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
 static struct device *rpmh_dev[MAX_RSC_COUNT];
 
+static void sde_rsc_set_data_bus_mode(struct sde_power_handle *phandle, u32 tag)
+{
+	int i = 0, j = 0;
+
+	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) {
+		if (!phandle->data_bus_handle[i].bus_active_only)
+			continue;
+
+		for (j = 0; j < phandle->data_bus_handle[i].data_paths_cnt; j++)
+			icc_set_tag(phandle->data_bus_handle[i].data_bus_hdl[j],
+				    tag);
+
+	}
+}
+
 /**
  * sde_rsc_client_create() - create the client for sde rsc.
  * Different displays like DSI, HDMI, DP, WB, etc should call this
@@ -523,8 +537,11 @@ static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
-		if (!rc)
+		if (!rc) {
 			rpmh_mode_solver_set(rsc->rpmh_dev, true);
+			sde_rsc_set_data_bus_mode(&rsc->phandle,
+						  QCOM_ICC_TAG_WAKE);
+		}
 	}
 
 vsync_wait:
@@ -574,8 +591,11 @@ static int sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
-		if (!rc)
+		if (!rc) {
 			rpmh_mode_solver_set(rsc->rpmh_dev, false);
+			sde_rsc_set_data_bus_mode(&rsc->phandle,
+						  QCOM_ICC_TAG_AMC);
+		}
 	}
 
 	/* indicate wait for vsync for cmd/vid to clk state switch */
@@ -661,9 +681,13 @@ static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 
 	if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_VID_STATE);
-		if (!rc)
+		if (!rc) {
 			rpmh_mode_solver_set(rsc->rpmh_dev,
 				rsc->version == SDE_RSC_REV_3 ? true : false);
+			sde_rsc_set_data_bus_mode(&rsc->phandle,
+				rsc->version == SDE_RSC_REV_3 ?
+				QCOM_ICC_TAG_WAKE : QCOM_ICC_TAG_AMC);
+		}
 	}
 
 vsync_wait:
@@ -737,8 +761,11 @@ static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc,
 			rc = CLK_MODE_SWITCH_SUCCESS;
 	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
-		if (!rc)
+		if (!rc) {
 			rpmh_mode_solver_set(rsc->rpmh_dev, true);
+			sde_rsc_set_data_bus_mode(&rsc->phandle,
+						  QCOM_ICC_TAG_WAKE);
+		}
 	}
 
 	return rc;

+ 0 - 1
pll/pll_util.c

@@ -346,7 +346,6 @@ pnode_err:
 	if (pnode)
 		of_node_put(pnode);
 
-	dma_release_declared_memory(&pdev->dev);
 	return rc;
 }
 

+ 42 - 64
rotator/sde_rotator_base.c

@@ -14,8 +14,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
 #include <linux/regulator/consumer.h>
 
 #define CREATE_TRACE_POINTS
@@ -26,6 +24,13 @@
 #include "sde_rotator_dev.h"
 #include "sde_rotator_vbif.h"
 
+static const struct sde_rot_bus_data sde_rot_reg_bus_table[] = {
+	{0, 0},
+	{0, 76800},
+	{0, 150000},
+	{0, 300000},
+};
+
 static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
 {
 	u64 result = (val * (u64)numer);
@@ -51,6 +56,11 @@ static inline bool validate_comp_ratio(struct sde_mult_factor *factor)
 	return factor->numer && factor->denom;
 }
 
+const struct sde_rot_bus_data *sde_get_rot_reg_bus_value(u32 usecase_ndx)
+{
+	return &sde_rot_reg_bus_table[usecase_ndx];
+}
+
 u32 sde_apply_comp_ratio_factor(u32 quota,
 	struct sde_mdp_format_params *fmt,
 	struct sde_mult_factor *factor)
@@ -471,6 +481,7 @@ int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
 	int ret = 0;
 	bool changed = false;
 	u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
+	const struct sde_rot_bus_data *reg_bus_value = NULL;
 	struct reg_bus_client *client, *temp_client;
 	struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
 
@@ -487,19 +498,27 @@ int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
 			max_usecase_ndx = client->usecase_ndx;
 	}
 
-	if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx) {
+	if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx)
 		changed = true;
-		sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
-	}
 
 	SDEROT_DBG(
 		"%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
 		__builtin_return_address(0), changed, max_usecase_ndx,
 		bus_client->name, bus_client->id, usecase_ndx);
-	if (changed)
-		ret = msm_bus_scale_client_update_request(sde_res->reg_bus_hdl,
-			max_usecase_ndx);
+	if (changed) {
+		reg_bus_value = sde_get_rot_reg_bus_value(max_usecase_ndx);
+		ret = icc_set_bw(sde_res->reg_bus_hdl, reg_bus_value->ab,
+			reg_bus_value->ib);
+	}
 
+	if (ret) {
+		pr_err("rotator: reg_bus_hdl set failed ab=%llu, ib=%llu\n",
+		       reg_bus_value->ab, reg_bus_value->ib);
+		if (sde_res->reg_bus_usecase_ndx == VOTE_INDEX_DISABLE)
+			pr_err("rotator: reg_bus_hdl was disabled\n");
+	} else {
+		sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
+	}
 	mutex_unlock(&sde_res->reg_bus_lock);
 	return ret;
 }
@@ -820,74 +839,33 @@ static void sde_mdp_destroy_dt_misc(struct platform_device *pdev,
 	mdata->vbif_nrt_qos = NULL;
 }
 
-#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
-	{						\
-		.src = MSM_BUS_MASTER_AMPSS_M0,		\
-		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
-		.ab = (ab_val),				\
-		.ib = (ib_val),				\
-	}
-
-#define BUS_VOTE_19_MHZ 153600000
-#define BUS_VOTE_40_MHZ 320000000
-#define BUS_VOTE_80_MHZ 640000000
-
-#ifdef CONFIG_QCOM_BUS_SCALING
-
-static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
-	MDP_REG_BUS_VECTOR_ENTRY(0, 0),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
-	MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
-};
-static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
-		mdp_reg_bus_vectors)];
-static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
-	.usecase = mdp_reg_bus_usecases,
-	.num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
-	.name = "sde_reg",
-	.active_only = true,
-};
-
 static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
 {
-	struct msm_bus_scale_pdata *reg_bus_pdata;
-	int i;
+	int rc = 0;
 
-	if (!mdata->reg_bus_hdl) {
-		reg_bus_pdata = &mdp_reg_bus_scale_table;
-		for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
-			mdp_reg_bus_usecases[i].num_paths = 1;
-			mdp_reg_bus_usecases[i].vectors =
-				&mdp_reg_bus_vectors[i];
-		}
+	mdata->reg_bus_hdl = of_icc_get(&mdata->pdev->dev, "qcom,sde-reg-bus");
 
-		mdata->reg_bus_hdl =
-			msm_bus_scale_register_client(reg_bus_pdata);
-		if (!mdata->reg_bus_hdl) {
-			/* Continue without reg_bus scaling */
-			SDEROT_WARN("reg_bus_client register failed\n");
-		} else
-			SDEROT_DBG("register reg_bus_hdl=%x\n",
-					mdata->reg_bus_hdl);
+	if (mdata->reg_bus_hdl == NULL) {
+		pr_err("rotator: reg bus dt node missing\n");
+		return 0;
+	} else if (IS_ERR(mdata->reg_bus_hdl)) {
+		SDEROT_ERR("reg bus handle parsing failed\n");
+		mdata->reg_bus_hdl = NULL;
+		rc = -EINVAL;
+	} else {
+		SDEROT_DBG("rotator reg_bus_hdl parsing success\n");
 	}
 
-	return 0;
-}
-#else
-static inline int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
-{
-	return 0;
+	return rc;
 }
-#endif
 
 static void sde_mdp_bus_scale_unregister(struct sde_rot_data_type *mdata)
 {
-	SDEROT_DBG("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
+	SDEROT_DBG("unregister reg_bus_hdl\n");
 
 	if (mdata->reg_bus_hdl) {
-		msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
-		mdata->reg_bus_hdl = 0;
+		icc_put(mdata->reg_bus_hdl);
+		mdata->reg_bus_hdl = NULL;
 	}
 }
 

+ 17 - 4
rotator/sde_rotator_base.h

@@ -13,6 +13,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/interconnect.h>
 
 #include "sde_rotator_hwio.h"
 #include "sde_rotator_io_util.h"
@@ -45,6 +46,7 @@
 #define SDE_MDP_HW_REV_530	SDE_MDP_REV(5, 3, 0)	/* sm6150 v1.0 */
 #define SDE_MDP_HW_REV_540	SDE_MDP_REV(5, 4, 0)	/* sdmtrinket v1.0 */
 #define SDE_MDP_HW_REV_600	SDE_MDP_REV(6, 0, 0)    /* msmnile+ v1.0 */
+#define SDE_MDP_HW_REV_630	SDE_MDP_REV(6, 3, 0)	/* bengal v1.0 */
 
 #define SDE_MDP_VBIF_4_LEVEL_REMAPPER	4
 #define SDE_MDP_VBIF_8_LEVEL_REMAPPER	8
@@ -89,9 +91,9 @@ struct sde_mdp_vbif_halt_params {
 
 enum sde_bus_vote_type {
 	VOTE_INDEX_DISABLE,
-	VOTE_INDEX_19_MHZ,
-	VOTE_INDEX_40_MHZ,
-	VOTE_INDEX_80_MHZ,
+	VOTE_INDEX_76_MHZ,
+	VOTE_INDEX_150_MHZ,
+	VOTE_INDEX_300_MHZ,
 	VOTE_INDEX_MAX,
 };
 
@@ -191,6 +193,16 @@ struct sde_smmu_client {
 	u32 sid;
 };
 
+/*
+ * struct sde_rot_bus_data: struct for bus setting
+ * @ab: average bandwidth in kilobytes per second
+ * @ib: peak bandwidth in kilobytes per second
+ */
+struct sde_rot_bus_data {
+	uint64_t ab; /* Arbitrated bandwidth */
+	uint64_t ib; /* Instantaneous bandwidth */
+};
+
 /*
  * struct sde_rot_debug_bus: rotator debugbus header structure
  * @wr_addr: write address for debugbus controller
@@ -250,7 +262,7 @@ struct sde_rot_data_type {
 	u32 rot_block_size;
 
 	/* register bus (AHB) */
-	u32 reg_bus_hdl;
+	struct icc_path *reg_bus_hdl;
 	u32 reg_bus_usecase_ndx;
 	struct list_head reg_bus_clist;
 	struct mutex reg_bus_lock;
@@ -319,6 +331,7 @@ void vbif_unlock(struct platform_device *parent_pdev);
 void sde_mdp_halt_vbif_xin(struct sde_mdp_vbif_halt_params *params);
 
 int sde_mdp_init_vbif(void);
+const struct sde_rot_bus_data *sde_get_rot_reg_bus_value(u32 usecase_ndx);
 
 #define SDE_VBIF_WRITE(mdata, offset, value) \
 		(sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))

+ 115 - 187
rotator/sde_rotator_core.c

@@ -13,8 +13,6 @@
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/debugfs.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
 #include <linux/regulator/consumer.h>
 #include <linux/dma-direction.h>
 #include <soc/qcom/scm.h>
@@ -69,50 +67,20 @@
  */
 #define ROT_MAX_HW_BLOCKS 2
 
-#define SDE_REG_BUS_VECTOR_ENTRY(ab_val, ib_val)	\
-	{						\
-		.src = MSM_BUS_MASTER_AMPSS_M0,		\
-		.dst = MSM_BUS_SLAVE_DISPLAY_CFG,	\
-		.ab = (ab_val),				\
-		.ib = (ib_val),				\
-	}
-
 #define BUS_VOTE_19_MHZ 153600000
 
 /* forward prototype */
 static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
 
-#ifdef CONFIG_QCOM_BUS_SCALING
-static struct msm_bus_vectors rot_reg_bus_vectors[] = {
-	SDE_REG_BUS_VECTOR_ENTRY(0, 0),
-	SDE_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
-};
-static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
-		rot_reg_bus_vectors)];
-static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
-	.usecase = rot_reg_bus_usecases,
-	.num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
-	.name = "mdss_rot_reg",
-	.active_only = 1,
-};
-
 static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
 		u64 quota)
 {
-	int new_uc_idx;
-	int ret;
+	int ret = 0, i = 0, j = 0;
+	u64 ab = 0;
 
-	if (!bus) {
-		SDEROT_ERR("null parameter\n");
-		return -EINVAL;
-	}
-
-	if (!bus->bus_hdl) {
-		SDEROT_DBG("bus scaling not enabled\n");
+	if (!bus || !bus->data_paths_cnt) {
+		SDEROT_DBG("bus scaling not register\n");
 		return 0;
-	} else if (bus->bus_hdl < 0) {
-		SDEROT_ERR("invalid bus handle %d\n", bus->bus_hdl);
-		return -EINVAL;
 	}
 
 	if (bus->curr_quota_val == quota) {
@@ -120,46 +88,31 @@ static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
 		return 0;
 	}
 
-	if (!bus->bus_scale_pdata || !bus->bus_scale_pdata->num_usecases) {
-		SDEROT_ERR("invalid bus scale data\n");
-		return -EINVAL;
-	}
-
-	if (!quota) {
-		new_uc_idx = 0;
-	} else {
-		struct msm_bus_vectors *vect = NULL;
-		struct msm_bus_scale_pdata *bw_table =
-			bus->bus_scale_pdata;
-		u64 port_quota = quota;
-		u32 total_axi_port_cnt;
-		int i;
-
-		new_uc_idx = (bus->curr_bw_uc_idx %
-			(bw_table->num_usecases - 1)) + 1;
-
-		total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
-		if (total_axi_port_cnt == 0) {
-			SDEROT_ERR("Number of bw paths is 0\n");
-			return -ENODEV;
-		}
-		do_div(port_quota, total_axi_port_cnt);
+	SDEROT_EVTLOG(quota);
+	SDEROT_DBG("quota=%llu\n", quota);
+	ATRACE_BEGIN("msm_bus_scale_req_rot");
+	ab = div_u64(quota, bus->data_paths_cnt);
 
-		for (i = 0; i < total_axi_port_cnt; i++) {
-			vect = &bw_table->usecase[new_uc_idx].vectors[i];
-			vect->ab = port_quota;
-			vect->ib = 0;
+	for (i = 0; i < bus->data_paths_cnt; i++) {
+		if (bus->data_bus_hdl[i]) {
+			ret = icc_set_bw(bus->data_bus_hdl[i], ab, ab);
+			if (ret)
+				goto err;
 		}
 	}
-	bus->curr_bw_uc_idx = new_uc_idx;
+
+	ATRACE_END("msm_bus_scale_req_rot");
 	bus->curr_quota_val = quota;
 
-	SDEROT_EVTLOG(new_uc_idx, quota);
-	SDEROT_DBG("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
-	ATRACE_BEGIN("msm_bus_scale_req_rot");
-	ret = msm_bus_scale_client_update_request(bus->bus_hdl,
-		new_uc_idx);
+	return 0;
+err:
+	ab = div_u64(bus->curr_quota_val, bus->data_paths_cnt);
+	for (j = 0; j < i; j++)
+		icc_set_bw(bus->data_bus_hdl[j], ab, ab);
 	ATRACE_END("msm_bus_scale_req_rot");
+	pr_err("failed to set data bus quota %llu\n", quota);
+	if (!bus->curr_quota_val) {
+		pr_err("rotator: data bus was set to 0\n");
 
 	return ret;
 }
@@ -168,43 +121,40 @@ static int sde_rotator_enable_reg_bus(struct sde_rot_mgr *mgr, u64 quota)
 {
 	int ret = 0, changed = 0;
 	u32 usecase_ndx = 0;
+	const struct sde_rot_bus_data *reg_bus_value = NULL;
 
-	if (!mgr || !mgr->reg_bus.bus_hdl)
+	if (!mgr || !mgr->reg_bus.data_paths_cnt)
 		return 0;
 
 	if (quota)
-		usecase_ndx = 1;
+		usecase_ndx = VOTE_INDEX_76_MHZ;
 
-	if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
-		mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
+	if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx)
 		changed++;
-	}
 
 	SDEROT_DBG("%s, changed=%d register bus %s\n", __func__, changed,
 		quota ? "Enable":"Disable");
 
 	if (changed) {
 		ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
-		ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
-			usecase_ndx);
+
+		reg_bus_value = sde_get_rot_reg_bus_value(usecase_ndx);
+		ret = icc_set_bw(mgr->reg_bus.data_bus_hdl[0],
+			reg_bus_value->ab, reg_bus_value->ib);
 		ATRACE_END("msm_bus_scale_req_rot_reg");
+
+	}
+	if (ret) {
+		pr_err("rotator: set reg bus failed ab=%llu, lb=%llu\n",
+		       reg_bus_value->ab, reg_bus_value->ib);
+		if (mgr->reg_bus.curr_bw_uc_idx == VOTE_INDEX_DISABLE)
+			pr_err("rotator: reg bus was disabled\n");
+	} else {
+		mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
 	}
 
 	return ret;
 }
-#else
-static inline int sde_rotator_enable_reg_bus(struct sde_rot_mgr *mgr, u64 quota)
-{
-	return 0;
-}
-
-static inline int sde_rotator_bus_scale_set_quota(
-		struct sde_rot_bus_data_type *bus, u64 quota)
-{
-	return 0;
-}
-#endif
-
 /*
  * Clock rate of all open sessions working a particular hw block
  * are added together to get the required rate for that hw block.
@@ -375,6 +325,7 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
 {
 	int ret = 0;
 	int changed = 0;
+	int i = 0, bus_cnt = 0;
 
 	if (enable) {
 		if (mgr->rot_enable_clk_cnt == 0)
@@ -425,9 +376,15 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
 				goto error_rot_sub;
 
 			/* Active+Sleep */
-			msm_bus_scale_client_update_context(
-				mgr->data_bus.bus_hdl, false,
-				mgr->data_bus.curr_bw_uc_idx);
+			if (mgr->data_bus.bus_active_only) {
+				bus_cnt = mgr->data_bus.data_paths_cnt;
+				for (i = 0; i < bus_cnt; i++) {
+					icc_set_tag(
+						mgr->data_bus.data_bus_hdl[i],
+						(QCOM_ICC_TAG_ACTIVE_ONLY |
+						 QCOM_ICC_TAG_SLEEP));
+				}
+			}
 			trace_rot_bw_ao_as_context(0);
 		} else {
 			sde_rotator_disable_clk(mgr,
@@ -440,9 +397,15 @@ int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
 			sde_rotator_disable_clk(mgr, SDE_ROTATOR_CLK_MNOC_AHB);
 
 			/* Active Only */
-			msm_bus_scale_client_update_context(
-				mgr->data_bus.bus_hdl, true,
-				mgr->data_bus.curr_bw_uc_idx);
+			if (mgr->data_bus.bus_active_only) {
+				bus_cnt = mgr->data_bus.data_paths_cnt;
+				for (i = 0; i < bus_cnt; i++) {
+					icc_set_tag(
+						mgr->data_bus.data_bus_hdl[i],
+						QCOM_ICC_TAG_ACTIVE_ONLY);
+				}
+			}
+
 			trace_rot_bw_ao_as_context(1);
 		}
 	}
@@ -2766,56 +2729,67 @@ static struct attribute_group sde_rotator_fs_attr_group = {
 	.attrs = sde_rotator_fs_attrs
 };
 
-#ifdef CONFIG_QCOM_BUS_SCALING
 static int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
 	struct platform_device *dev)
 {
-	int ret = 0, i;
-	int usecases;
-	struct device_node *node;
+	char bus_name[32];
+	int ret = 0, i = 0;
 
-	mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
-	if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
-		ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
-		if (ret) {
-			SDEROT_ERR("msm_bus_cl_get_pdata failed. ret=%d\n",
-					ret);
-			mgr->data_bus.bus_scale_pdata = NULL;
-		}
+	mgr->reg_bus.data_bus_hdl[0] = of_icc_get(&dev->dev,
+						  "qcom,sde-reg-bus");
+
+	if (mgr->reg_bus.data_bus_hdl[0] == NULL) {
+		mgr->reg_bus.data_paths_cnt = 0;
+		pr_debug("rotator: reg bus dt node missing\n");
+		goto data_bus;
+	} else if (IS_ERR(mgr->reg_bus.data_bus_hdl[0])) {
+		SDEROT_ERR("sde rotator parse reg bus failed. ret=%d\n",
+			   ret);
+		mgr->reg_bus.data_bus_hdl[0] = NULL;
+		ret = -EINVAL;
+		return ret;
 	}
+	mgr->reg_bus.data_paths_cnt = 1;
+
+data_bus:
+	for (i = 0; i < SDE_ROTATION_BUS_PATH_MAX; i++) {
+		snprintf(bus_name, 32, "%s%d", "qcom,rot-data-bus", i);
+		ret = of_property_match_string(pdev->dev.of_node,
+			"interconnect-names", bus_name);
+		if (ret < 0) {
+			if (!mgr->data_bus.data_paths_cnt) {
+				pr_debug("rotator: bus %s dt node missing\n", bus_name);
+				return 0;
+			} else
+				goto end;
+		} else
+			mgr->data_bus.data_bus_hdl[i] = of_icc_get(&pdev->dev, bus_name);
 
-	node = of_get_child_by_name(dev->dev.of_node, "qcom,rot-reg-bus");
-	if (node) {
-		mgr->reg_bus.bus_scale_pdata
-				= msm_bus_pdata_from_node(dev, node);
-		if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
-			SDEROT_ERR("reg bus pdata parsing failed\n");
-			ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
-			if (!mgr->reg_bus.bus_scale_pdata)
-				ret = -EINVAL;
-			mgr->reg_bus.bus_scale_pdata = NULL;
+		if (IS_ERR_OR_NULL(mgr->data_bus.data_bus_hdl[i])) {
+			SDEROT_ERR("rotator: get data bus %s failed\n",
+				   bus_name);
+			break;
 		}
-	} else {
-		SDEROT_DBG(
-			"no DT entries, configuring default reg bus table\n");
-		mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
-		usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
-		for (i = 0; i < usecases; i++) {
-			rot_reg_bus_usecases[i].num_paths = 1;
-			rot_reg_bus_usecases[i].vectors =
-				&rot_reg_bus_vectors[i];
+		mgr->data_bus.data_paths_cnt++;
+	}
+
+	if (!mgr->data_bus.data_paths_cnt) {
+		pr_err("rotator: get none data bus path\n");
+		return -EINVAL;
+	}
+
+end:
+	if (of_find_property(dev->dev.of_node,
+			     "qcom,msm-bus,active-only", NULL)) {
+		mgr->data_bus.bus_active_only = true;
+		for (i = 0; i < mgr->data_bus.data_paths_cnt; i++) {
+			icc_set_tag(mgr->data_bus.data_bus_hdl[i],
+				    QCOM_ICC_TAG_ACTIVE_ONLY);
 		}
 	}
 
-	return ret;
-}
-#else
-static inline int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
-	struct platform_device *dev)
-{
 	return 0;
 }
-#endif
 
 static int sde_rotator_parse_dt(struct sde_rot_mgr *mgr,
 	struct platform_device *dev)
@@ -2920,59 +2894,19 @@ error:
 	return rc;
 }
 
-#ifdef CONFIG_QCOM_BUS_SCALING
 static void sde_rotator_bus_scale_unregister(struct sde_rot_mgr *mgr)
 {
-	SDEROT_DBG("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
-		mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
-
-	if (mgr->data_bus.bus_hdl)
-		msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
-
-	if (mgr->reg_bus.bus_hdl)
-		msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
-}
-
-static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
-{
-	if (!mgr->data_bus.bus_scale_pdata) {
-		SDEROT_DBG("Bus scaling is not enabled\n");
-		return 0;
-	}
+	int i = 0;
 
-	mgr->data_bus.bus_hdl =
-		msm_bus_scale_register_client(
-		mgr->data_bus.bus_scale_pdata);
-	if (!mgr->data_bus.bus_hdl) {
-		SDEROT_ERR("bus_client register failed\n");
-		return -EINVAL;
+	SDEROT_DBG("unregister sde rotator bus\n");
+	for (i = 0; i < mgr->data_bus.data_paths_cnt; i++) {
+		if (mgr->data_bus.data_bus_hdl[i])
+			icc_put(mgr->data_bus.data_bus_hdl[i]);
 	}
-	SDEROT_DBG("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
 
-	if (mgr->reg_bus.bus_scale_pdata) {
-		mgr->reg_bus.bus_hdl =
-			msm_bus_scale_register_client(
-			mgr->reg_bus.bus_scale_pdata);
-		if (!mgr->reg_bus.bus_hdl) {
-			SDEROT_ERR("register bus_client register failed\n");
-			sde_rotator_bus_scale_unregister(mgr);
-		} else {
-			SDEROT_DBG("registered register bus_hdl=%x\n",
-					mgr->reg_bus.bus_hdl);
-		}
-	}
-
-	return 0;
+	if (mgr->reg_bus.data_bus_hdl[0])
+		icc_put(mgr->reg_bus.data_bus_hdl[0]);
 }
-#else
-static inline void sde_rotator_bus_scale_unregister(struct sde_rot_mgr *mgr)
-{
-}
-static inline int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
-{
-	return 0;
-}
-#endif
 
 static inline int sde_rotator_search_dt_clk(struct platform_device *pdev,
 		struct sde_rot_mgr *mgr, char *clk_name, int clk_idx,
@@ -3092,10 +3026,6 @@ static int sde_rotator_res_init(struct platform_device *pdev,
 	if (ret)
 		goto error;
 
-	ret = sde_rotator_bus_scale_register(mgr);
-	if (ret)
-		goto error;
-
 	return 0;
 error:
 	sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
@@ -3228,8 +3158,6 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 		goto error_hw_init;
 	}
 
-	sde_rotator_pm_qos_add(mdata);
-
 	ret = sde_rotator_init_queue(mgr);
 	if (ret) {
 		SDEROT_ERR("fail to init queue\n");

+ 27 - 9
rotator/sde_rotator_core.h

@@ -60,6 +60,23 @@
 
 /* use client provided clock/bandwidth parameters */
 #define SDE_ROTATION_EXT_PERF		0x100000
+#define SDE_ROTATION_BUS_PATH_MAX	0x2
+
+/*
+ * The AMC bucket denotes constraints that are applied to hardware when
+ * icc_set_bw() completes, whereas the WAKE and SLEEP constraints are applied
+ * when the execution environment transitions between active and low power mode.
+ */
+#define QCOM_ICC_BUCKET_AMC            0
+#define QCOM_ICC_BUCKET_WAKE           1
+#define QCOM_ICC_BUCKET_SLEEP          2
+#define QCOM_ICC_NUM_BUCKETS           3
+#define QCOM_ICC_TAG_AMC               BIT(QCOM_ICC_BUCKET_AMC)
+#define QCOM_ICC_TAG_WAKE              BIT(QCOM_ICC_BUCKET_WAKE)
+#define QCOM_ICC_TAG_SLEEP             BIT(QCOM_ICC_BUCKET_SLEEP)
+#define QCOM_ICC_TAG_ACTIVE_ONLY       (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE)
+#define QCOM_ICC_TAG_ALWAYS            (QCOM_ICC_TAG_AMC | QCOM_ICC_TAG_WAKE |\
+                                        QCOM_ICC_TAG_SLEEP)
 
 /**********************************************************************
  * configuration structures
@@ -374,18 +391,19 @@ struct sde_rot_file_private {
 	struct sde_rot_queue_v1 *fenceq;
 };
 
-/*
- * struct sde_rot_bus_data_type - rotator bus scaling configuration
- * @bus_cale_pdata: pointer to bus scaling configuration table
- * @bus_hdl: msm bus scaling handle
- * @curr_bw_uc_idx; current usecase index into configuration table
- * @curr_quota_val: current bandwidth request in byte per second
+/**
+ * struct sde_rot_bus_data_type: power handle struct for data bus
+ * @data_paths_cnt: number of rt data path ports
+ * @curr_quota_val: save the current bus value
+ * @curr_bw_uc_idx: current reg bus value index
+ * @bus_active_only: AMC support, can set the bus path WAKE/SLEEP
  */
 struct sde_rot_bus_data_type {
-	struct msm_bus_scale_pdata *bus_scale_pdata;
-	u32 bus_hdl;
-	u32 curr_bw_uc_idx;
+	struct icc_path *data_bus_hdl[SDE_ROTATION_BUS_PATH_MAX];
+	u32 data_paths_cnt;
 	u64 curr_quota_val;
+	u32 curr_bw_uc_idx;
+	bool bus_active_only;
 };
 
 /*

+ 0 - 105
rotator/sde_rotator_dev.c

@@ -55,8 +55,6 @@
 
 static void sde_rotator_submit_handler(struct kthread_work *work);
 static void sde_rotator_retire_handler(struct kthread_work *work);
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request);
 #ifdef CONFIG_COMPAT
 static long sde_rotator_compat_ioctl32(struct file *file,
 	unsigned int cmd, unsigned long arg);
@@ -1001,8 +999,6 @@ struct sde_rotator_ctx *sde_rotator_ctx_open(
 		SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
 
 	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-				 SDE_ROTATOR_ADD_REQUEST);
 	ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
 			ctx->session_id, &ctx->work_queue);
 	if (ret < 0) {
@@ -1127,8 +1123,6 @@ static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
 	}
 	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
 	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-			SDE_ROTATOR_REMOVE_REQUEST);
 	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
@@ -1243,104 +1237,6 @@ static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
 	return retire_delta >= 0;
 }
 
-static void sde_rotator_pm_qos_remove(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	pm_qos_remove_request(req);
-}
-
-void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	int cpu;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
-		PM_QOS_DEFAULT_VALUE);
-
-	SDEROT_DBG("rotator pmqos add mask %x latency %x\n",
-		rot_mdata->rot_pm_qos_cpu_mask,
-		rot_mdata->rot_pm_qos_cpu_dma_latency);
-}
-
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request)
-{
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	bool changed = false;
-
-	if (!rot_dev) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_dev->mdata->rot_pm_qos_cpu_mask;
-	cpu_dma_latency = rot_dev->mdata->rot_pm_qos_cpu_dma_latency;
-
-	if (!cpu_mask)
-		return;
-
-	if (add_request) {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-			changed = true;
-		rot_dev->mdata->rot_pm_qos_cpu_count++;
-	} else {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count != 0) {
-			rot_dev->mdata->rot_pm_qos_cpu_count--;
-			if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-				changed = true;
-		} else {
-			SDEROT_DBG("%s: ref_count is not balanced\n",
-				__func__);
-		}
-	}
-
-	if (!changed)
-		return;
-
-	SDEROT_EVTLOG(add_request, cpu_mask, cpu_dma_latency);
-
-	if (!add_request) {
-		pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-			PM_QOS_DEFAULT_VALUE);
-		return;
-	}
-
-	pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-		cpu_dma_latency);
-}
-
 /*
  * sde_rotator_inline_open - open inline rotator session
  * @pdev: Pointer to rotator platform device
@@ -3689,7 +3585,6 @@ static int sde_rotator_remove(struct platform_device *pdev)
 		return 0;
 	}
 
-	sde_rotator_pm_qos_remove(rot_dev->mdata);
 	for (i = MAX_ROT_OPEN_SESSION - 1; i >= 0; i--)
 		kthread_stop(rot_dev->rot_thread[i]);
 	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);

+ 0 - 1
rotator/sde_rotator_dev.h

@@ -12,7 +12,6 @@
 #include <linux/ktime.h>
 #include <linux/iommu.h>
 #include <linux/dma-buf.h>
-#include <linux/msm-bus.h>
 #include <linux/platform_device.h>
 #include <linux/soc/qcom/llcc-qcom.h>
 #include <linux/kthread.h>

+ 15 - 0
rotator/sde_rotator_r3.c

@@ -3271,6 +3271,21 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
 				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
 		rot->downscale_caps =
 			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	} else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
+				SDE_MDP_HW_REV_630)) {
+		SDEROT_DBG("Sys cache inline rotation not supported\n");
+		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
+		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
+				sde_hw_rotator_v4_inpixfmts;
+		rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
+				ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
+		rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
+				sde_hw_rotator_v4_outpixfmts;
+		rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
+				ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
+		rot->downscale_caps =
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
 	} else {
 		rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
 				sde_hw_rotator_v3_inpixfmts;

+ 1 - 1
rotator/sde_rotator_smmu.c

@@ -164,7 +164,7 @@ static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
 			goto end;
 		}
 		sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
-			VOTE_INDEX_19_MHZ);
+			VOTE_INDEX_76_MHZ);
 		rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
 		if (rc) {
 			SDEROT_ERR("clock enable failed - rc:%d\n", rc);

+ 0 - 2
rotator/sde_rotator_util.c

@@ -18,8 +18,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
 #include <linux/regulator/consumer.h>
 #include <media/msm_media_info.h>
 #include <linux/videodev2.h>