Browse Source

Disp: Snapshot change for lahaina display driver

This snapshot change adds downstream support
for drm 5.x+(msm_lahaina branch) linux kernel.

Change-Id: Ia691c95da155a00e449c91a2f1a5b20a8e71aed4
Signed-off-by: Narendra Muppalla <[email protected]>
Narendra Muppalla 6 years ago
parent
commit
d1d9ae8b19

+ 2 - 2
msm/dp/dp_drm.c

@@ -231,8 +231,8 @@ static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
 }
 
 static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dp_bridge *bridge;
 	struct dp_display *dp;

+ 5 - 7
msm/dp/dp_drm.h

@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 
 #include "msm_drv.h"
 #include "dp_display.h"
@@ -47,7 +46,7 @@ int dp_connector_config_hdr(struct drm_connector *connector,
  */
 int dp_connector_atomic_check(struct drm_connector *connector,
 	void *display,
-	struct drm_connector_state *c_state);
+	struct drm_atomic_state *state);
 
 /**
  * dp_connector_set_colorspace - callback to set new colorspace
@@ -184,15 +183,14 @@ static inline int dp_connector_config_hdr(struct drm_connector *connector,
 	return 0;
 }
 
-int dp_connector_atomic_check(struct drm_connector *connector,
-	void *display,
-	struct drm_connector_state *c_state)
+static inline int dp_connector_atomic_check(struct drm_connector *connector,
+		void *display, struct drm_atomic_state *state)
 {
 	return 0;
 }
 
-int dp_connector_set_colorspace(struct drm_connector *connector,
-	void *display)
+static inline int dp_connector_set_colorspace(struct drm_connector *connector,
+		void *display)
 {
 	return 0;
 }

+ 2 - 2
msm/dp/dp_mst_drm.c

@@ -1025,8 +1025,8 @@ static void dp_mst_bridge_post_disable(struct drm_bridge *drm_bridge)
 }
 
 static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dp_mst_bridge *bridge;
 	struct dp_display *dp;

+ 8 - 5
msm/dsi/dsi_display.c

@@ -1423,7 +1423,7 @@ static ssize_t debugfs_read_esd_check_mode(struct file *file,
 	struct drm_panel_esd_config *esd_config;
 	char *buf;
 	int rc = 0;
-	size_t len;
+	size_t len = 0;
 
 	if (!display)
 		return -ENODEV;
@@ -5382,12 +5382,15 @@ static enum drm_mode_status dsi_display_drm_ext_mode_valid(
 
 static int dsi_display_drm_ext_atomic_check(struct drm_connector *connector,
 		void *disp,
-		struct drm_connector_state *c_state)
+		struct drm_atomic_state *state)
 {
 	struct dsi_display *display = disp;
+	struct drm_connector_state *c_state;
+
+	c_state = drm_atomic_get_new_connector_state(state, connector);
 
 	return display->ext_conn->helper_private->atomic_check(
-			display->ext_conn, c_state);
+			display->ext_conn, state);
 }
 
 static int dsi_display_ext_get_info(struct drm_connector *connector,
@@ -5538,8 +5541,8 @@ static bool dsi_display_drm_ext_bridge_mode_fixup(
 
 static void dsi_display_drm_ext_bridge_mode_set(
 		struct drm_bridge *bridge,
-		struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
+		const struct drm_display_mode *mode,
+		const struct drm_display_mode *adjusted_mode)
 {
 	struct dsi_display_ext_bridge *ext_bridge;
 	struct drm_display_mode tmp;

+ 2 - 2
msm/dsi/dsi_drm.c

@@ -314,8 +314,8 @@ static void dsi_bridge_post_disable(struct drm_bridge *bridge)
 }
 
 static void dsi_bridge_mode_set(struct drm_bridge *bridge,
-				struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode)
+				const struct drm_display_mode *mode,
+				const struct drm_display_mode *adjusted_mode)
 {
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
 

+ 0 - 1
msm/dsi/dsi_drm.h

@@ -9,7 +9,6 @@
 #include <linux/types.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
 
 #include "msm_drv.h"
 

+ 10 - 3
msm/msm_atomic.c

@@ -21,6 +21,7 @@
 #include "msm_gem.h"
 #include "msm_kms.h"
 #include "sde_trace.h"
+#include <drm/drm_atomic_uapi.h>
 
 #define MULTIPLE_CONN_DETECTED(x) (x > 1)
 
@@ -512,7 +513,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
 
 	obj = msm_framebuffer_bo(new_state->fb, 0);
 	msm_obj = to_msm_bo(obj);
-	fence = reservation_object_get_excl_rcu(msm_obj->resv);
+	fence = dma_resv_get_excl_rcu(msm_obj->resv);
 
 	drm_atomic_set_fence_for_plane(new_state, fence);
 
@@ -715,7 +716,7 @@ int msm_atomic_commit(struct drm_device *dev,
 				msm_framebuffer_bo(new_plane_state->fb, 0);
 			struct msm_gem_object *msm_obj = to_msm_bo(obj);
 			struct dma_fence *fence =
-				reservation_object_get_excl_rcu(msm_obj->resv);
+				dma_resv_get_excl_rcu(msm_obj->resv);
 
 			drm_atomic_set_fence_for_plane(new_plane_state, fence);
 		}
@@ -826,7 +827,13 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
 
 	drm_atomic_helper_commit_modeset_enables(dev, state);
 
-	msm_atomic_wait_for_commit_done(dev, state);
+	if (kms->funcs->commit) {
+		DRM_DEBUG_ATOMIC("triggering commit\n");
+		kms->funcs->commit(kms, state);
+	}
+
+	if (!state->legacy_cursor_update)
+		msm_atomic_wait_for_commit_done(dev, state);
 
 	kms->funcs->complete_commit(kms, state);
 

+ 22 - 12
msm/msm_drv.c

@@ -41,8 +41,10 @@
 #include <linux/kthread.h>
 #include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_kms.h"
 #include "msm_mmu.h"
 #include "sde_wb.h"
@@ -56,9 +58,11 @@
  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  *           MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ *           GEM object's debug name
  */
 #define MSM_VERSION_MAJOR	1
-#define MSM_VERSION_MINOR	3
+#define MSM_VERSION_MINOR	4
 #define MSM_VERSION_PATCHLEVEL	0
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -349,6 +353,13 @@ static int msm_drm_uninit(struct device *dev)
 	struct msm_kms *kms = priv->kms;
 	int i;
 
+	/* We must cancel and cleanup any pending vblank enable/disable
+	 * work before drm_irq_uninstall() to avoid work re-enabling an
+	 * irq after uninstall has disabled it.
+	 */
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
@@ -377,15 +388,13 @@ static int msm_drm_uninit(struct device *dev)
 	if (fbdev && priv->fbdev)
 		msm_fbdev_free(ddev);
 #endif
+	drm_atomic_helper_shutdown(ddev);
 	drm_mode_config_cleanup(ddev);
 
 	pm_runtime_get_sync(dev);
 	drm_irq_uninstall(ddev);
 	pm_runtime_put_sync(dev);
 
-	flush_workqueue(priv->wq);
-	destroy_workqueue(priv->wq);
-
 	if (kms && kms->funcs)
 		kms->funcs->destroy(kms);
 
@@ -702,9 +711,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	struct drm_crtc *crtc;
 
 	ddev = drm_dev_alloc(drv, dev);
-	if (!ddev) {
+	if (IS_ERR(ddev)) {
 		dev_err(dev, "failed to allocate drm_device\n");
-		return -ENOMEM;
+		return PTR_ERR(ddev);
 	}
 
 	drm_mode_config_init(ddev);
@@ -1089,7 +1098,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	BUG_ON(!kms);
-	return kms->funcs->irq_postinstall(kms);
+
+	if (kms->funcs->irq_postinstall)
+		return kms->funcs->irq_postinstall(kms);
+
+	return 0;
 }
 
 static void msm_irq_uninstall(struct drm_device *dev)
@@ -1135,7 +1148,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
 	}
 
 	return msm_gem_new_handle(dev, file, args->size,
-			args->flags, &args->handle);
+			args->flags, &args->handle, NULL);
 }
 
 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -1640,9 +1653,7 @@ static const struct file_operations fops = {
 };
 
 static struct drm_driver msm_driver = {
-	.driver_features    = DRIVER_HAVE_IRQ |
-				DRIVER_GEM |
-				DRIVER_PRIME |
+	.driver_features    = DRIVER_GEM |
 				DRIVER_RENDER |
 				DRIVER_ATOMIC |
 				DRIVER_MODESET,
@@ -1664,7 +1675,6 @@ static struct drm_driver msm_driver = {
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
 	.gem_prime_import   = msm_gem_prime_import,
-	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,

+ 14 - 4
msm/msm_drv.h

@@ -34,13 +34,12 @@
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
 #include <linux/sde_io_util.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
 #include <linux/kthread.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
 #include <drm/drm_plane_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/msm_drm.h>
@@ -736,6 +735,8 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, int npages);
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, struct sg_table *sgt,
 		unsigned int flags);
@@ -821,8 +822,12 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova);
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj,
@@ -836,7 +841,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -851,7 +855,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle);
+		uint32_t size, uint32_t flags, uint32_t *handle, char *name);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 		uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -864,6 +868,10 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 		struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
+
+__printf(2, 3)
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
 int msm_gem_delayed_import(struct drm_gem_object *obj);
 
 void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
@@ -974,12 +982,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
 int msm_debugfs_late_init(struct drm_device *dev);
 int msm_rd_debugfs_init(struct drm_minor *minor);
 void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
+__printf(3, 4)
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...);
 int msm_perf_debugfs_init(struct drm_minor *minor);
 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
 #else
 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
+__printf(3, 4)
 static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 		const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}

+ 20 - 15
msm/msm_fb.c

@@ -19,8 +19,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-buf.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
@@ -40,6 +41,7 @@ struct msm_framebuffer {
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 	.destroy = drm_gem_fb_destroy,
+	.dirty = drm_atomic_helper_dirtyfb,
 };
 
 #ifdef CONFIG_DEBUG_FS
@@ -283,9 +285,11 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+	const struct drm_format_info *info = drm_get_format_info(dev,
+								mode_cmd);
 	struct drm_gem_object *bos[4] = {0};
 	struct drm_framebuffer *fb;
-	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+	int ret, i, n = info->num_planes;
 
 	for (i = 0; i < n; i++) {
 		bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
@@ -310,24 +314,24 @@ out_unref:
 }
 
 struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
-		const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+		const struct drm_mode_fb_cmd2 *mode_cmd,
+		struct drm_gem_object **bos)
 {
+	const struct drm_format_info *info = drm_get_format_info(dev,
+								mode_cmd);
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_framebuffer *msm_fb = NULL;
 	struct drm_framebuffer *fb;
 	const struct msm_format *format;
 	int ret, i, num_planes;
-	unsigned int hsub, vsub;
 	bool is_modified = false;
 
 	DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
-	num_planes = drm_format_num_planes(mode_cmd->pixel_format);
-	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
-	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+	num_planes = info->num_planes;
 
 	format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
 			mode_cmd->modifier[0]);
@@ -370,7 +374,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 			goto fail;
 		} else {
 			ret = kms->funcs->check_modified_format(
-				kms, msm_fb->format, mode_cmd, bos);
+					kms, msm_fb->format, mode_cmd, bos);
 			if (ret)
 				goto fail;
 		}
@@ -384,16 +388,15 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 		}
 
 		for (i = 0; i < num_planes; i++) {
-			unsigned int width = mode_cmd->width / (i ? hsub : 1);
-			unsigned int height = mode_cmd->height / (i ? vsub : 1);
+			unsigned int width = mode_cmd->width / (i ?
+					info->hsub : 1);
+			unsigned int height = mode_cmd->height / (i ?
+					info->vsub : 1);
 			unsigned int min_size;
-			unsigned int cpp = 0;
-
-			cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
 
 			min_size = (height - 1) * mode_cmd->pitches[i]
-				 + width * cpp
-				 + mode_cmd->offsets[i];
+				+ width * info->cpp[i]
+				+ mode_cmd->offsets[i];
 
 			if (!bos[i] || bos[i]->size < min_size) {
 				ret = -EINVAL;
@@ -450,6 +453,8 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
 		return ERR_CAST(bo);
 	}
 
+	msm_gem_object_set_name(bo, "stolenfb");
+
 	fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
 	if (IS_ERR(fb)) {
 		dev_err(dev->dev, "failed to allocate fb\n");

+ 152 - 32
msm/msm_gem.c

@@ -109,13 +109,12 @@ static struct page **get_pages(struct drm_gem_object *obj)
 			return ptr;
 		}
 
-		/*
-		 * Make sure to flush the CPU cache for newly allocated memory
-		 * so we don't get ourselves into trouble with a dirty cache
+		/* For non-cached buffers, ensure the new pages are clean
+		 * because display controller, GPU, etc. are not coherent:
 		 */
 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
 			aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
-			dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
+			dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
 				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 		}
 	}
@@ -403,19 +402,14 @@ put_iova(struct drm_gem_object *obj)
 }
 
 /* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace, uint64_t *iova)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 	struct msm_gem_vma *vma;
 	int ret = 0;
 
-	mutex_lock(&msm_obj->lock);
-
-	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-		mutex_unlock(&msm_obj->lock);
-		return -EBUSY;
-	}
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	vma = lookup_vma(obj, aspace);
 
@@ -468,6 +462,64 @@ unlock:
 	mutex_unlock(&msm_obj->lock);
 	return ret;
 }
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+	struct page **pages;
+
+	WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+		return -EBUSY;
+
+	vma = lookup_vma(obj, aspace);
+	if (WARN_ON(!vma))
+		return -EINVAL;
+
+	pages = get_pages(obj);
+	if (IS_ERR(pages))
+		return PTR_ERR(pages);
+
+	return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+			obj->size >> PAGE_SHIFT, msm_obj->flags);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	u64 local;
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+
+	ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+	if (!ret)
+		ret = msm_gem_pin_iova(obj, aspace);
+
+	if (!ret)
+		*iova = local;
+
+	mutex_unlock(&msm_obj->lock);
+	return ret;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int ret;
+
+	mutex_lock(&msm_obj->lock);
+	ret = msm_gem_get_iova_locked(obj, aspace, iova);
+	mutex_unlock(&msm_obj->lock);
+
+	return ret;
+}
 
 /* get iova without taking a reference, used in places where you have
  * already done a 'msm_gem_get_iova()'.
@@ -486,6 +538,27 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
 	return vma ? vma->iova : 0;
 }
 
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+		struct msm_gem_address_space *aspace)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct msm_gem_vma *vma;
+
+	mutex_lock(&msm_obj->lock);
+	vma = lookup_vma(obj, aspace);
+
+	if (!WARN_ON(!vma))
+		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
+				msm_obj->flags);
+
+	mutex_unlock(&msm_obj->lock);
+}
+
 void msm_gem_put_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
@@ -560,7 +633,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 	args->pitch = align_pitch(args->width, args->bpp);
 	args->size  = PAGE_ALIGN(args->pitch * args->height);
 	return msm_gem_new_handle(dev, file, args->size,
-			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -766,7 +839,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 	long ret;
 
-	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+	ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
 						  true,  remain);
 	if (ret == 0)
 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -789,7 +862,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 		struct seq_file *m)
 {
 	if (!dma_fence_is_signaled(fence))
-		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
 				fence->ops->get_driver_name(fence),
 				fence->ops->get_timeline_name(fence),
 				fence->seqno);
@@ -798,8 +871,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
-	struct reservation_object *robj = msm_obj->resv;
-	struct reservation_object_list *fobj;
+	struct dma_resv *robj = msm_obj->resv;
+	struct dma_resv_list *fobj;
 	struct dma_fence *fence;
 	struct msm_gem_vma *vma;
 	uint64_t off = drm_vma_node_start(&obj->vma_node);
@@ -825,11 +898,19 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 			obj->name, kref_read(&obj->refcount),
 			off, msm_obj->vaddr);
 
-	/* FIXME: we need to print the address space here too */
-	list_for_each_entry(vma, &msm_obj->vmas, list)
-		seq_printf(m, " %08llx", vma->iova);
+	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+	if (!list_empty(&msm_obj->vmas)) {
+
+		seq_puts(m, "      vmas:");
 
-	seq_printf(m, " %zu%s\n", obj->size, madv);
+		list_for_each_entry(vma, &msm_obj->vmas, list)
+			seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+				vma->iova, vma->mapped ? "mapped" : "unmapped",
+				vma->inuse);
+
+		seq_puts(m, "\n");
+	}
 
 	rcu_read_lock();
 	fobj = rcu_dereference(robj->fence);
@@ -856,9 +937,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 	int count = 0;
 	size_t size = 0;
 
+	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 	list_for_each_entry(msm_obj, list, mm_list) {
 		struct drm_gem_object *obj = &msm_obj->base;
-		seq_printf(m, "   ");
+		seq_puts(m, "   ");
 		msm_gem_describe(obj, m);
 		count++;
 		size += obj->size;
@@ -908,7 +990,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 	}
 
 	if (msm_obj->resv == &msm_obj->_resv)
-		reservation_object_fini(msm_obj->resv);
+		dma_resv_fini(msm_obj->resv);
 
 	drm_gem_object_release(obj);
 
@@ -918,7 +1000,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-		uint32_t size, uint32_t flags, uint32_t *handle)
+		uint32_t size, uint32_t flags, uint32_t *handle,
+		char *name)
 {
 	struct drm_gem_object *obj;
 	int ret;
@@ -928,6 +1011,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
+	if (name)
+		msm_gem_object_set_name(obj, "%s", name);
+
 	ret = drm_gem_handle_create(file, obj, handle);
 
 	/* drop reference from allocate - handle holds it now */
@@ -938,7 +1024,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 
 static int msm_gem_new_impl(struct drm_device *dev,
 		uint32_t size, uint32_t flags,
-		struct reservation_object *resv,
+		struct dma_resv *resv,
 		struct drm_gem_object **obj,
 		bool struct_mutex_locked)
 {
@@ -969,7 +1055,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
 		msm_obj->resv = resv;
 	} else {
 		msm_obj->resv = &msm_obj->_resv;
-		reservation_object_init(msm_obj->resv);
+		dma_resv_init(msm_obj->resv);
 	}
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
@@ -1004,7 +1090,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
 	if (!iommu_present(&platform_bus_type))
 		use_vram = true;
-	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
 		use_vram = true;
 
 	if (WARN_ON(use_vram && !priv->vram.size))
@@ -1190,23 +1276,29 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
 
 	if (iova) {
 		ret = msm_gem_get_iova(obj, aspace, iova);
-		if (ret) {
-			drm_gem_object_put(obj);
-			return ERR_PTR(ret);
-		}
+		if (ret)
+			goto err;
 	}
 
 	vaddr = msm_gem_get_vaddr(obj);
 	if (IS_ERR(vaddr)) {
 		msm_gem_put_iova(obj, aspace);
-		drm_gem_object_put(obj);
-		return ERR_CAST(vaddr);
+		ret = PTR_ERR(vaddr);
+		goto err;
 	}
 
 	if (bo)
 		*bo = obj;
 
 	return vaddr;
+err:
+	if (locked)
+		drm_gem_object_put(obj);
+	else
+		drm_gem_object_put_unlocked(obj);
+
+	return ERR_PTR(ret);
+
 }
 
 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1222,3 +1314,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 {
 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
 }
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+		struct msm_gem_address_space *aspace, bool locked)
+{
+	if (IS_ERR_OR_NULL(bo))
+		return;
+
+	msm_gem_put_vaddr(bo);
+	msm_gem_unpin_iova(bo, aspace);
+
+	if (locked)
+		drm_gem_object_put(bo);
+	else
+		drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(bo);
+	va_list ap;
+
+	if (!fmt)
+		return;
+
+	va_start(ap, fmt);
+	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+	va_end(ap);
+}

+ 7 - 3
msm/msm_gem.h

@@ -19,7 +19,7 @@
 #define __MSM_GEM_H__
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
@@ -82,6 +82,8 @@ struct msm_gem_vma {
 	uint64_t iova;
 	struct msm_gem_address_space *aspace;
 	struct list_head list;    /* node in msm_gem_object::vmas */
+	bool mapped;
+	int inuse;
 };
 
 struct msm_gem_object {
@@ -124,8 +126,8 @@ struct msm_gem_object {
 	struct list_head vmas;    /* list of msm_gem_vma */
 
 	/* normally (resv == &_resv) except for imported bo's */
-	struct reservation_object *resv;
-	struct reservation_object _resv;
+	struct dma_resv *resv;
+	struct dma_resv _resv;
 
 	/* For physically contiguous buffers.  Used when we don't have
 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
@@ -136,6 +138,7 @@ struct msm_gem_object {
 
 	struct msm_gem_address_space *aspace;
 	bool in_active_list;
+	char name[32]; /* Identifier to print for the debugfs files */
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
@@ -195,6 +198,7 @@ struct msm_gem_submit {
 	struct msm_ringbuffer *ring;
 	unsigned int nr_cmds;
 	unsigned int nr_bos;
+	u32 ident;	   /* A "identifier" for the submit for logging */
 	struct {
 		uint32_t type;
 		uint32_t size;  /* in dwords */

+ 1 - 1
msm/msm_gem_prime.c

@@ -76,7 +76,7 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
 		msm_gem_put_pages(obj);
 }
 
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+struct dma_resv *msm_gem_prime_res_obj(struct drm_gem_object *obj)
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 

+ 4 - 5
msm/msm_gem_vma.c

@@ -197,9 +197,9 @@ msm_gem_address_space_destroy(struct kref *kref)
 	struct msm_gem_address_space *aspace = container_of(kref,
 			struct msm_gem_address_space, kref);
 
-	if (aspace && aspace->ops->destroy)
-		aspace->ops->destroy(aspace);
-
+	drm_mm_takedown(&aspace->mm);
+	if (aspace->mmu)
+		aspace->mmu->funcs->destroy(aspace->mmu);
 	kfree(aspace);
 }
 
@@ -232,8 +232,7 @@ static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
 	msm_gem_address_space_put(aspace);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
 		struct msm_gem_vma *vma, struct sg_table *sgt,
 		unsigned int flags)
 {

+ 0 - 1
msm/msm_smmu.c

@@ -21,7 +21,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/msm_dma_iommu_mapping.h>
 
-#include <asm/dma-iommu.h>
 #include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"

+ 8 - 3
msm/sde/sde_connector.c

@@ -16,6 +16,7 @@
 #include "dsi_display.h"
 #include "sde_crtc.h"
 #include "sde_rm.h"
+#include <drm/drm_probe_helper.h>
 
 #define BL_NODE_NAME_SIZE 32
 #define HDR10_PLUS_VSIF_TYPE_CODE      0x81
@@ -2105,23 +2106,26 @@ sde_connector_atomic_best_encoder(struct drm_connector *connector,
 }
 
 static int sde_connector_atomic_check(struct drm_connector *connector,
-		struct drm_connector_state *new_conn_state)
+		struct drm_atomic_state *state)
 {
 	struct sde_connector *c_conn;
 	struct sde_connector_state *c_state;
 	bool qsync_dirty = false, has_modeset = false;
+	struct drm_connector_state *new_conn_state;
 
 	if (!connector) {
 		SDE_ERROR("invalid connector\n");
 		return -EINVAL;
 	}
 
+	c_conn = to_sde_connector(connector);
+	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
+
 	if (!new_conn_state) {
 		SDE_ERROR("invalid connector state\n");
 		return -EINVAL;
 	}
 
-	c_conn = to_sde_connector(connector);
 	c_state = to_sde_connector_state(new_conn_state);
 
 	has_modeset = sde_crtc_atomic_check_has_modeset(new_conn_state->state,
@@ -2135,10 +2139,11 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
 		SDE_ERROR("invalid qsync update during modeset\n");
 		return -EINVAL;
 	}
+	new_conn_state = drm_atomic_get_new_connector_state(state, connector);
 
 	if (c_conn->ops.atomic_check)
 		return c_conn->ops.atomic_check(connector,
-				c_conn->display, new_conn_state);
+				c_conn->display, state);
 
 	return 0;
 }

+ 1 - 1
msm/sde/sde_connector.h

@@ -295,7 +295,7 @@ struct sde_connector_ops {
 	 */
 	int (*atomic_check)(struct drm_connector *connector,
 			void *display,
-			struct drm_connector_state *c_state);
+			struct drm_atomic_state *state);
 
 	/**
 	 * pre_destroy - handle pre destroy operations for the connector

+ 4 - 4
msm/sde/sde_crtc.c

@@ -23,7 +23,7 @@
 #include <uapi/drm/sde_drm.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include <drm/drm_flip_work.h>
 #include <linux/clk/qcom.h>
 
@@ -2794,8 +2794,8 @@ static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
 	struct sde_crtc_state *cstate;
 	struct drm_display_mode *mode;
 	struct sde_kms *kms;
-	struct sde_hw_ds *hw_ds;
-	struct sde_hw_ds_cfg *cfg;
+	struct sde_hw_ds *hw_ds = NULL;
+	struct sde_hw_ds_cfg *cfg = NULL;
 	u32 ret = 0;
 	u32 num_ds_enable = 0, hdisplay = 0;
 	u32 max_in_width = 0, max_out_width = 0;
@@ -4538,7 +4538,7 @@ static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct sde_kms *kms;
-	struct drm_plane *plane;
+	struct drm_plane *plane = NULL;
 	struct drm_display_mode *mode;
 	int rc = 0, cnt = 0;
 

+ 1 - 46
msm/sde/sde_encoder.c

@@ -25,7 +25,7 @@
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_intf.h"
@@ -226,7 +226,6 @@ enum sde_enc_rc_states {
  * @recovery_events_enabled:	status of hw recovery feature enable by client
  * @elevated_ahb_vote:		increase AHB bus speed for the first frame
  *				after power collapse
- * @pm_qos_cpu_req:		pm_qos request for cpu frequency
  * @mode_info:                  stores the current mode and should be used
  *				 only in commit phase
  */
@@ -292,7 +291,6 @@ struct sde_encoder_virt {
 
 	bool recovery_events_enabled;
 	bool elevated_ahb_vote;
-	struct pm_qos_request pm_qos_cpu_req;
 	struct msm_mode_info mode_info;
 };
 
@@ -314,44 +312,6 @@ void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
 	}
 }
 
-static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	int cpu;
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	cpu_mask = sde_kms->catalog->perf.cpu_mask;
-	cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
-
-	req = &sde_enc->pm_qos_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
-
-	SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
-}
-
-static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc,
-	struct sde_kms *sde_kms)
-{
-	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
-
-	if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
-		return;
-
-	pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
-}
-
 static bool _sde_encoder_is_autorefresh_enabled(
 		struct sde_encoder_virt *sde_enc)
 {
@@ -2165,12 +2125,7 @@ static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
 		/* enable all the irq */
 		_sde_encoder_irq_control(drm_enc, true);
 
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_add_request(drm_enc, sde_kms);
-
 	} else {
-		if (is_cmd_mode)
-			_sde_encoder_pm_qos_remove_request(drm_enc, sde_kms);
 
 		/* disable all the irq */
 		_sde_encoder_irq_control(drm_enc, false);

+ 11 - 10
msm/sde/sde_encoder_phys_wb.c

@@ -703,8 +703,8 @@ static int sde_encoder_phys_wb_atomic_check(
 	const struct drm_display_mode *mode = &crtc_state->mode;
 	int rc;
 
-	SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id, mode->name,
+	SDE_DEBUG("[atomic_check:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->name,
 			mode->hdisplay, mode->vdisplay);
 
 	if (!conn_state || !conn_state->connector) {
@@ -962,8 +962,8 @@ static void sde_encoder_phys_wb_setup(
 	struct drm_framebuffer *fb;
 	struct sde_rect *wb_roi = &wb_enc->wb_roi;
 
-	SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode.base.id, mode.name,
+	SDE_DEBUG("[mode_set:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode.name,
 			mode.hdisplay, mode.vdisplay);
 
 	memset(wb_roi, 0, sizeof(struct sde_rect));
@@ -1150,9 +1150,9 @@ static void sde_encoder_phys_wb_mode_set(
 	phys_enc->cached_mode = *adj_mode;
 	instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
 
-	SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
-			hw_wb->idx - WB_0, mode->base.id,
-			mode->name, mode->hdisplay, mode->vdisplay);
+	SDE_DEBUG("[mode_set_cache:%d,\"%s\",%d,%d]\n",
+			hw_wb->idx - WB_0, mode->name,
+			mode->hdisplay, mode->vdisplay);
 
 	phys_enc->hw_ctl = NULL;
 	phys_enc->hw_cdm = NULL;
@@ -1401,6 +1401,7 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 	uint32_t size;
 	int nplanes, i, ret;
 	struct msm_gem_address_space *aspace;
+	const struct drm_format_info *info;
 
 	if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
 		SDE_ERROR("invalid params\n");
@@ -1434,7 +1435,8 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 	}
 
 	/* allocate gem tracking object */
-	nplanes = drm_format_num_planes(pixel_format);
+	info = drm_get_format_info(dev, &mode_cmd);
+	nplanes = info->num_planes;
 	if (nplanes >= SDE_MAX_PLANES) {
 		SDE_ERROR("requested format has too many planes\n");
 		return -EINVAL;
@@ -1452,8 +1454,7 @@ static int _sde_encoder_phys_wb_init_internal_fb(
 
 	for (i = 0; i < nplanes; ++i) {
 		wb_enc->bo_disable[i] = wb_enc->bo_disable[0];
-		mode_cmd.pitches[i] = width *
-			drm_format_plane_cpp(pixel_format, i);
+		mode_cmd.pitches[i] = width * info->cpp[i];
 	}
 
 	fb = msm_framebuffer_init(dev, &mode_cmd, wb_enc->bo_disable);

+ 6 - 3
msm/sde/sde_formats.c

@@ -1168,10 +1168,11 @@ int sde_format_check_modified_format(
 		const struct drm_mode_fb_cmd2 *cmd,
 		struct drm_gem_object **bos)
 {
-	int ret, i, num_base_fmt_planes;
+	const struct drm_format_info *info;
 	const struct sde_format *fmt;
 	struct sde_hw_fmt_layout layout;
 	uint32_t bos_total_size = 0;
+	int ret, i;
 
 	if (!msm_fmt || !cmd || !bos) {
 		DRM_ERROR("invalid arguments\n");
@@ -1179,14 +1180,16 @@ int sde_format_check_modified_format(
 	}
 
 	fmt = to_sde_format(msm_fmt);
-	num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+	info = drm_format_info(fmt->base.pixel_format);
+	if (!info)
+		return -EINVAL;
 
 	ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
 			&layout, cmd->pitches);
 	if (ret)
 		return ret;
 
-	for (i = 0; i < num_base_fmt_planes; i++) {
+	for (i = 0; i < info->num_planes; i++) {
 		if (!bos[i]) {
 			DRM_ERROR("invalid handle for plane %d\n", i);
 			return -EINVAL;

+ 1 - 1
msm/sde/sde_hw_catalog.c

@@ -2913,7 +2913,7 @@ static int sde_vbif_parse_dt(struct device_node *np,
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[VBIF_PROP_MAX];
 	u32 off_count, vbif_len;
-	struct sde_vbif_cfg *vbif;
+	struct sde_vbif_cfg *vbif = NULL;
 
 	if (!sde_cfg) {
 		SDE_ERROR("invalid argument\n");

+ 6 - 5
msm/sde/sde_kms.c

@@ -25,7 +25,8 @@
 #include <linux/of_irq.h>
 #include <linux/dma-buf.h>
 #include <linux/memblock.h>
-#include <linux/bootmem.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -506,7 +507,7 @@ static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
 	if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
 		ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
 		if (ret) {
-			smmu_state->sui_misr_state == NONE;
+			smmu_state->sui_misr_state = NONE;
 			goto end;
 		}
 	}
@@ -2467,9 +2468,9 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 		/* currently consider modes[0] as the preferred mode */
 		drm_mode = list_first_entry(&connector->modes,
 				struct drm_display_mode, head);
-		SDE_DEBUG("drm_mode->name = %s, id=%d, type=0x%x, flags=0x%x\n",
-				drm_mode->name, drm_mode->base.id,
-				drm_mode->type, drm_mode->flags);
+		SDE_DEBUG("drm_mode->name = %s, type=0x%x, flags=0x%x\n",
+				drm_mode->name, drm_mode->type,
+				drm_mode->flags);
 
 		/* Update CRTC drm structure */
 		crtc->state->active = true;

+ 3 - 6
msm/sde/sde_plane.c

@@ -1406,6 +1406,7 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde,
 {
 	struct sde_hw_pixel_ext *pe;
 	uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+	const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
 
 	if (!psde || !fmt || !pstate) {
 		SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
@@ -1421,10 +1422,8 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde,
 		sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
 
 	/* don't chroma subsample if decimating */
-	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
-		drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
-	chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 :
-		drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+	chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 : info->hsub;
+	chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 : info->vsub;
 
 	/* update scaler */
 	if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3) ||
@@ -4007,8 +4006,6 @@ static void sde_plane_destroy(struct drm_plane *plane)
 		msm_property_destroy(&psde->property_info);
 		mutex_destroy(&psde->lock);
 
-		drm_plane_helper_disable(plane, NULL);
-
 		/* this will destroy the states as well */
 		drm_plane_cleanup(plane);
 

+ 1 - 1
msm/sde/sde_rm.c

@@ -1951,7 +1951,7 @@ static void _sde_rm_release_rsvp(
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc, bool nxt)
 {
 	struct sde_rm_rsvp *rsvp;
-	struct drm_connector *conn;
+	struct drm_connector *conn = NULL;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	uint64_t top_ctrl;

+ 1 - 0
msm/sde/sde_wb.c

@@ -6,6 +6,7 @@
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
 
 #include <uapi/drm/sde_drm.h>
+#include <drm/drm_probe_helper.h>
 
 #include "msm_kms.h"
 #include "sde_kms.h"

+ 0 - 1
pll/pll_util.c

@@ -346,7 +346,6 @@ pnode_err:
 	if (pnode)
 		of_node_put(pnode);
 
-	dma_release_declared_memory(&pdev->dev);
 	return rc;
 }
 

+ 0 - 2
rotator/sde_rotator_core.c

@@ -3228,8 +3228,6 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 		goto error_hw_init;
 	}
 
-	sde_rotator_pm_qos_add(mdata);
-
 	ret = sde_rotator_init_queue(mgr);
 	if (ret) {
 		SDEROT_ERR("fail to init queue\n");

+ 0 - 105
rotator/sde_rotator_dev.c

@@ -55,8 +55,6 @@
 
 static void sde_rotator_submit_handler(struct kthread_work *work);
 static void sde_rotator_retire_handler(struct kthread_work *work);
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request);
 #ifdef CONFIG_COMPAT
 static long sde_rotator_compat_ioctl32(struct file *file,
 	unsigned int cmd, unsigned long arg);
@@ -1001,8 +999,6 @@ struct sde_rotator_ctx *sde_rotator_ctx_open(
 		SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
 
 	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-				 SDE_ROTATOR_ADD_REQUEST);
 	ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
 			ctx->session_id, &ctx->work_queue);
 	if (ret < 0) {
@@ -1127,8 +1123,6 @@ static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
 	}
 	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
 	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_pm_qos_request(rot_dev,
-			SDE_ROTATOR_REMOVE_REQUEST);
 	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
@@ -1243,104 +1237,6 @@ static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
 	return retire_delta >= 0;
 }
 
-static void sde_rotator_pm_qos_remove(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	pm_qos_remove_request(req);
-}
-
-void sde_rotator_pm_qos_add(struct sde_rot_data_type *rot_mdata)
-{
-	struct pm_qos_request *req;
-	u32 cpu_mask;
-	int cpu;
-
-	if (!rot_mdata) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_mdata->rot_pm_qos_cpu_mask;
-
-	if (!cpu_mask)
-		return;
-
-	req = &rot_mdata->pm_qos_rot_cpu_req;
-	req->type = PM_QOS_REQ_AFFINE_CORES;
-	cpumask_empty(&req->cpus_affine);
-	for_each_possible_cpu(cpu) {
-		if ((1 << cpu) & cpu_mask)
-			cpumask_set_cpu(cpu, &req->cpus_affine);
-	}
-	pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY,
-		PM_QOS_DEFAULT_VALUE);
-
-	SDEROT_DBG("rotator pmqos add mask %x latency %x\n",
-		rot_mdata->rot_pm_qos_cpu_mask,
-		rot_mdata->rot_pm_qos_cpu_dma_latency);
-}
-
-static void sde_rotator_pm_qos_request(struct sde_rotator_device *rot_dev,
-					 bool add_request)
-{
-	u32 cpu_mask;
-	u32 cpu_dma_latency;
-	bool changed = false;
-
-	if (!rot_dev) {
-		SDEROT_DBG("invalid rot device or context\n");
-		return;
-	}
-
-	cpu_mask = rot_dev->mdata->rot_pm_qos_cpu_mask;
-	cpu_dma_latency = rot_dev->mdata->rot_pm_qos_cpu_dma_latency;
-
-	if (!cpu_mask)
-		return;
-
-	if (add_request) {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-			changed = true;
-		rot_dev->mdata->rot_pm_qos_cpu_count++;
-	} else {
-		if (rot_dev->mdata->rot_pm_qos_cpu_count != 0) {
-			rot_dev->mdata->rot_pm_qos_cpu_count--;
-			if (rot_dev->mdata->rot_pm_qos_cpu_count == 0)
-				changed = true;
-		} else {
-			SDEROT_DBG("%s: ref_count is not balanced\n",
-				__func__);
-		}
-	}
-
-	if (!changed)
-		return;
-
-	SDEROT_EVTLOG(add_request, cpu_mask, cpu_dma_latency);
-
-	if (!add_request) {
-		pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-			PM_QOS_DEFAULT_VALUE);
-		return;
-	}
-
-	pm_qos_update_request(&rot_dev->mdata->pm_qos_rot_cpu_req,
-		cpu_dma_latency);
-}
-
 /*
  * sde_rotator_inline_open - open inline rotator session
  * @pdev: Pointer to rotator platform device
@@ -3689,7 +3585,6 @@ static int sde_rotator_remove(struct platform_device *pdev)
 		return 0;
 	}
 
-	sde_rotator_pm_qos_remove(rot_dev->mdata);
 	for (i = MAX_ROT_OPEN_SESSION - 1; i >= 0; i--)
 		kthread_stop(rot_dev->rot_thread[i]);
 	sde_rotator_destroy_debugfs(rot_dev->debugfs_root);