瀏覽代碼

Add 'qcom/opensource/securemsm-kernel/' from commit 'a6005ceed271246683596608e4c56b4d921fb363'

git-subtree-dir: qcom/opensource/securemsm-kernel
git-subtree-mainline: 46e9caf0d0eafdd15a8ad8516b6b626c7972d08c
git-subtree-split: a6005ceed271246683596608e4c56b4d921fb363
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/qcom/opensource/securemsm-kernel
tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
David Wronek 8 月之前
父節點
當前提交
587685c687
共有 92 個文件被更改,包括 45193 次插入0 次删除
  1. 99 0
      qcom/opensource/securemsm-kernel/Android.bp
  2. 181 0
      qcom/opensource/securemsm-kernel/Android.mk
  3. 92 0
      qcom/opensource/securemsm-kernel/BUILD.bazel
  4. 58 0
      qcom/opensource/securemsm-kernel/Kbuild
  5. 12 0
      qcom/opensource/securemsm-kernel/Makefile
  6. 21 0
      qcom/opensource/securemsm-kernel/build/anorak.bzl
  7. 21 0
      qcom/opensource/securemsm-kernel/build/blair.bzl
  8. 20 0
      qcom/opensource/securemsm-kernel/build/monaco.bzl
  9. 21 0
      qcom/opensource/securemsm-kernel/build/niobe.bzl
  10. 21 0
      qcom/opensource/securemsm-kernel/build/pineapple.bzl
  11. 21 0
      qcom/opensource/securemsm-kernel/build/sun.bzl
  12. 7 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig.conf
  13. 10 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig.h
  14. 1 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom.conf
  15. 6 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom.h
  16. 6 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom_compat.h
  17. 1 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smcinvoke.conf
  18. 6 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smcinvoke.h
  19. 1 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smmu_proxy.conf
  20. 6 0
      qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smmu_proxy.h
  21. 39 0
      qcom/opensource/securemsm-kernel/crypto-qti/fips_status.h
  22. 999 0
      qcom/opensource/securemsm-kernel/crypto-qti/ota_crypto.c
  23. 224 0
      qcom/opensource/securemsm-kernel/crypto-qti/qce.h
  24. 6823 0
      qcom/opensource/securemsm-kernel/crypto-qti/qce50.c
  25. 256 0
      qcom/opensource/securemsm-kernel/crypto-qti/qce50.h
  26. 22 0
      qcom/opensource/securemsm-kernel/crypto-qti/qce_ota.h
  27. 2887 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcedev.c
  28. 443 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcedev_smmu.c
  29. 81 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcedev_smmu.h
  30. 136 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcedevi.h
  31. 19 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcom_crypto_device.h
  32. 5546 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcrypto.c
  33. 61 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcrypto.h
  34. 529 0
      qcom/opensource/securemsm-kernel/crypto-qti/qcryptohw_50.h
  35. 123 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp1.h
  36. 27 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp1_ops.h
  37. 304 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp2p2.h
  38. 338 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_main.c
  39. 113 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_main.h
  40. 1456 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_qseecom.c
  41. 346 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_qseecom.h
  42. 1103 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_smcinvoke.c
  43. 62 0
      qcom/opensource/securemsm-kernel/hdcp/hdcp_smcinvoke.h
  44. 10 0
      qcom/opensource/securemsm-kernel/include/linux/CTrustedCameraDriver.h
  45. 159 0
      qcom/opensource/securemsm-kernel/include/linux/IClientEnv.h
  46. 130 0
      qcom/opensource/securemsm-kernel/include/linux/ITrustedCameraDriver.h
  47. 108 0
      qcom/opensource/securemsm-kernel/include/linux/smci_clientenv.h
  48. 151 0
      qcom/opensource/securemsm-kernel/include/linux/smci_object.h
  49. 110 0
      qcom/opensource/securemsm-kernel/include/linux/smcinvoke.h
  50. 202 0
      qcom/opensource/securemsm-kernel/include/linux/smcinvoke_object.h
  51. 48 0
      qcom/opensource/securemsm-kernel/include/smci/interface/IAppClient.h
  52. 143 0
      qcom/opensource/securemsm-kernel/include/smci/interface/IAppController.h
  53. 105 0
      qcom/opensource/securemsm-kernel/include/smci/interface/IAppLoader.h
  54. 48 0
      qcom/opensource/securemsm-kernel/include/smci/interface/IOpener.h
  55. 41 0
      qcom/opensource/securemsm-kernel/include/smci/interface/smci_appclient.h
  56. 100 0
      qcom/opensource/securemsm-kernel/include/smci/interface/smci_appcontroller.h
  57. 79 0
      qcom/opensource/securemsm-kernel/include/smci/interface/smci_apploader.h
  58. 40 0
      qcom/opensource/securemsm-kernel/include/smci/interface/smci_opener.h
  59. 20 0
      qcom/opensource/securemsm-kernel/include/smci/uid/CAppClient.h
  60. 12 0
      qcom/opensource/securemsm-kernel/include/smci/uid/CAppLoader.h
  61. 390 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/qcedev.h
  62. 218 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/qcota.h
  63. 17 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/qrng.h
  64. 186 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/qseecom.h
  65. 196 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/qseecom_api.h
  66. 110 0
      qcom/opensource/securemsm-kernel/include/uapi/linux/smcinvoke.h
  67. 48 0
      qcom/opensource/securemsm-kernel/linux/misc/qseecom_kernel.h
  68. 26 0
      qcom/opensource/securemsm-kernel/linux/misc/qseecom_priv.h
  69. 740 0
      qcom/opensource/securemsm-kernel/linux/misc/qseecomi.h
  70. 493 0
      qcom/opensource/securemsm-kernel/qrng/msm_rng.c
  71. 145 0
      qcom/opensource/securemsm-kernel/qseecom/ice.h
  72. 9885 0
      qcom/opensource/securemsm-kernel/qseecom/qseecom.c
  73. 112 0
      qcom/opensource/securemsm-kernel/securemsm_kernel.bzl
  74. 71 0
      qcom/opensource/securemsm-kernel/securemsm_kernel_product_board.mk
  75. 75 0
      qcom/opensource/securemsm-kernel/securemsm_kernel_vendor_board.mk
  76. 160 0
      qcom/opensource/securemsm-kernel/securemsm_modules.bzl
  77. 71 0
      qcom/opensource/securemsm-kernel/smcinvoke/IQSEEComCompat.h
  78. 106 0
      qcom/opensource/securemsm-kernel/smcinvoke/IQSEEComCompatAppLoader.h
  79. 64 0
      qcom/opensource/securemsm-kernel/smcinvoke/smci_qseecomcompat.h
  80. 83 0
      qcom/opensource/securemsm-kernel/smcinvoke/smci_qseecomcompatapploader.h
  81. 3296 0
      qcom/opensource/securemsm-kernel/smcinvoke/smcinvoke.c
  82. 639 0
      qcom/opensource/securemsm-kernel/smcinvoke/smcinvoke_kernel.c
  83. 502 0
      qcom/opensource/securemsm-kernel/smcinvoke/trace_smcinvoke.h
  84. 58 0
      qcom/opensource/securemsm-kernel/smmu-proxy/include/uapi/linux/qti-smmu-proxy.h
  85. 18 0
      qcom/opensource/securemsm-kernel/smmu-proxy/linux/qti-smmu-proxy.h
  86. 113 0
      qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-common.c
  87. 30 0
      qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-common.h
  88. 107 0
      qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-msgq.h
  89. 323 0
      qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-pvm.c
  90. 775 0
      qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-tvm.c
  91. 113 0
      qcom/opensource/securemsm-kernel/ssg_kernel_headers.py
  92. 1969 0
      qcom/opensource/securemsm-kernel/tz_log/tz_log.c

+ 99 - 0
qcom/opensource/securemsm-kernel/Android.bp

@@ -0,0 +1,99 @@
+headers_src = [
+    "include/uapi/linux/smc*ke.h",
+    "include/linux/smc*_object.h",
+    "include/linux/IClientE*v.h",
+    "include/linux/smc*_clientenv.h",
+]
+
+smcinvoke_headers_out = [
+    "include/linux/smcinvoke.h",
+    "include/linux/smcinvoke_object.h",
+    "include/linux/smci_object.h",
+    "include/linux/IClientEnv.h",
+    "include/linux/smci_clientenv.h",
+]
+
+smcinvoke_kernel_headers_verbose = "--verbose "
+
+genrule {
+    name: "qti_generate_smcinvoke_kernel_headers",
+    tools: ["headers_install.sh",
+            "unifdef"
+    ],
+    tool_files: [
+         "ssg_kernel_headers.py",
+    ],
+    srcs: headers_src,
+    cmd: "python3 -u $(location ssg_kernel_headers.py) " +
+        smcinvoke_kernel_headers_verbose +
+        "--header_arch arm64 " +
+        "--gen_dir $(genDir) " +
+        "--smcinvoke_headers_to_expose $(locations include/uapi/linux/smc*ke.h) $(locations include/linux/smc*_object.h) $(locations include/linux/IClientE*v.h) $(locations include/linux/smc*_clientenv.h) " +
+        "--unifdef $(location unifdef) " +
+        "--headers_install $(location headers_install.sh)",
+    out: smcinvoke_headers_out,
+}
+
+cc_library_headers {
+    name: "smcinvoke_kernel_headers",
+    export_include_dirs: ["."] + ["include"] + ["include/uapi"],
+    generated_headers: ["qti_generate_smcinvoke_kernel_headers"],
+    export_generated_headers: ["qti_generate_smcinvoke_kernel_headers"],
+    vendor: true,
+    recovery_available: true
+}
+
+qseecom_headers_src = [
+    "include/uapi/linux/qsee*om.h",
+    "include/uapi/linux/qsee*api.h",
+]
+
+qseecom_headers_out = [
+    "include/linux/qseecom.h",
+    "include/linux/qseecom_api.h",
+]
+
+qseecom_kernel_headers_verbose = "--verbose "
+
+genrule {
+    name: "qti_generate_qseecom_kernel_headers",
+    tools: ["headers_install.sh",
+            "unifdef"
+    ],
+    tool_files: [
+         "ssg_kernel_headers.py",
+    ],
+    srcs: qseecom_headers_src,
+    cmd: "python3 -u $(location ssg_kernel_headers.py) " +
+        qseecom_kernel_headers_verbose +
+        "--header_arch arm64 " +
+        "--gen_dir $(genDir) " +
+        "--smcinvoke_headers_to_expose $(locations include/uapi/linux/qsee*om.h) $(locations include/uapi/linux/qsee*api.h) " +
+        "--unifdef $(location unifdef) " +
+        "--headers_install $(location headers_install.sh)",
+    out: qseecom_headers_out,
+}
+
+
+cc_library_headers {
+    name: "qseecom_kernel_headers",
+    export_include_dirs: ["."] + ["include"] + ["include/uapi"],
+    generated_headers: ["qti_generate_qseecom_kernel_headers"],
+    export_generated_headers: ["qti_generate_qseecom_kernel_headers"],
+    vendor: true,
+    recovery_available: true
+}
+
+
+cc_library_headers {
+    name: "smmu_proxy_uapi_header",
+    vendor_available: true,
+    export_include_dirs: ["smmu-proxy/include/uapi/"],
+}
+
+cc_library_headers {
+    name: "securemsm_kernel_uapi_headers",
+    vendor_available: true,
+    host_supported: true,
+    export_include_dirs: ["include/uapi"],
+}

+ 181 - 0
qcom/opensource/securemsm-kernel/Android.mk

@@ -0,0 +1,181 @@
+# Android makefile for securemsm kernel modules
+
+ENABLE_SECUREMSM_DLKM := true
+ENABLE_SECUREMSM_QTEE_DLKM := true
+
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+  ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE),false)
+    ENABLE_SECUREMSM_DLKM := false
+  endif
+  ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE),false)
+    ENABLE_SECUREMSM_QTEE_DLKM := false
+  endif
+endif
+
+ifeq ($(ENABLE_SECUREMSM_DLKM), true)
+  ENABLE_QCRYPTO_DLKM := true
+  ENABLE_HDCP_QSEECOM_DLKM := true
+  ENABLE_QRNG_DLKM := true
+  ifeq ($(TARGET_USES_SMMU_PROXY), true)
+    ENABLE_SMMU_PROXY := true
+  endif #TARGET_USES_SMMU_PROXY
+endif #ENABLE_SECUREMSM_DLKM
+
+ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := true
+  #Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
+  ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
+    ENABLE_QSEECOM_DLKM := true
+  endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
+endif #ENABLE_SECUREMSM_QTEE_DLKM
+
+ifeq ($(TARGET_USES_GY), true)
+  ENABLE_QCRYPTO_DLKM := false
+  ENABLE_HDCP_QSEECOM_DLKM := false
+  ENABLE_QRNG_DLKM := false
+  ENABLE_SMMU_PROXY := false
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := false
+  ENABLE_QSEECOM_DLKM := false
+endif #TARGET_USES_GY
+
+LOCAL_PATH := $(call my-dir)
+
+VENDOR_OPENSOURCE_DIR ?= vendor/qcom/opensource
+VENDOR_COMMON_DIR ?= device/qcom/common
+
+DLKM_DIR := $(TOP)/$(VENDOR_COMMON_DIR)/dlkm
+
+SEC_KERNEL_DIR := $(TOP)/$(VENDOR_OPENSOURCE_DIR)/securemsm-kernel
+
+LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/include/ \
+                                $(LOCAL_PATH)/include/uapi
+
+SSG_SRC_FILES := \
+	$(wildcard $(LOCAL_PATH)/*) \
+ 	$(wildcard $(LOCAL_PATH)/*/*) \
+ 	$(wildcard $(LOCAL_PATH)/*/*/*) \
+ 	$(wildcard $(LOCAL_PATH)/*/*/*/*)
+LOCAL_MODULE_DDK_BUILD := true
+# This is set once per LOCAL_PATH, not per (kernel) module
+KBUILD_OPTIONS := SSG_ROOT=$(SEC_KERNEL_DIR)
+KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
+
+CONDITIONAL_FLAGS := $(ENABLE_SECUREMSM_QTEE_DLKM) $(ENABLE_SECUREMSM_DLKM)
+
+ifneq (0, $(words $(filter true, $(CONDITIONAL_FLAGS))))
+include $(CLEAR_VARS)
+# For incremental compilation
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := sec-module-symvers
+LOCAL_MODULE_STEM         := Module.symvers
+LOCAL_MODULE_KBUILD_NAME  := Module.symvers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif
+
+ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
+include $(CLEAR_VARS)
+#LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := smcinvoke_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := smcinvoke_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_HEADER_LIBRARIES    := smcinvoke_kernel_headers
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_SMCINVOKE_DLKM
+###################################################
+###################################################
+ifeq ($(ENABLE_TZLOG_DLKM), true)
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := tz_log_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := tz_log_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_TZLOG_DLKM
+
+ifeq ($(ENABLE_QSEECOM_DLKM), true)
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := qseecom_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := qseecom_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_QSEECOM_DLKM
+###################################################
+###################################################
+
+ifeq ($(ENABLE_QCRYPTO_DLKM), true)
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := qce50_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := qce50_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###################################################
+###################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := qcedev-mod_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := qcedev-mod_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+###################################################
+###################################################
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := qcrypto-msm_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := qcrypto-msm_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_QCRYPTO_DLKM
+###################################################
+###################################################
+ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := hdcp_qseecom_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := hdcp_qseecom_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_HDCP_QSEECOM_DLKM
+###################################################
+###################################################
+ifeq ($(ENABLE_QRNG_DLKM), true)
+include $(CLEAR_VARS)
+LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_MODULE              := qrng_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := qrng_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_QRNG_DLKM
+###################################################
+###################################################
+ifeq ($(ENABLE_SMMU_PROXY), true)
+include $(CLEAR_VARS)
+#LOCAL_SRC_FILES           := $(SSG_SRC_FILES)
+LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/smmu-proxy/ $(LOCAL_PATH)/
+LOCAL_MODULE              := smmu_proxy_dlkm.ko
+LOCAL_MODULE_KBUILD_NAME  := smmu_proxy_dlkm.ko
+LOCAL_MODULE_TAGS         := optional
+LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_MODULE_PATH         := $(KERNEL_MODULES_OUT)
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+endif #ENABLE_SMMU_PROXY

+ 92 - 0
qcom/opensource/securemsm-kernel/BUILD.bazel

@@ -0,0 +1,92 @@
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+ddk_headers(
+    name = "smcinvoke_kernel_headers",
+    hdrs = glob([
+        "include/linux/smcinvoke*.h",
+        "include/linux/smci_o*.h",
+        "include/uapi/linux/smcinvoke*.h",
+        "include/linux/IClientE*.h",
+        "include/linux/smci_c*.h",
+        "include/smci/interface/IOpener.h",
+        "include/smci/interface/smci_opener.h",
+        "include/linux/ITrustedCameraDriver.h",
+        "include/linux/CTrustedCameraDriver.h",
+    ]),
+    includes = [
+        "include",
+        "include/linux",
+        "linux",
+        "include/uapi/linux",
+    ],
+)
+
+ddk_headers(
+    name = "qseecom_kernel_headers",
+    hdrs = glob([
+        "include/uapi/linux/qseecom.h",
+        "include/uapi/linux/qseecom_api.h",
+        "linux/misc/qseecom_kernel.h",
+        "linux/misc/qseecom_priv.h",
+        "linux/misc/qseecomi.h",
+    ]),
+    includes = ["linux", "include/uapi", "include/uapi/linux"]
+)
+
+ddk_headers(
+    name = "hdcp_qseecom_dlkm",
+    hdrs = glob([
+        ":smcinvoke_kernel_headers",
+        "linux/*.h",
+        "include/linux/*h",
+        "include/smci/uid/*h",
+        "include/smci/interface/*h",
+        "linux/misc/*.h",
+        "config/*.h",
+    ]),
+    includes = [
+        ".",
+        "config",
+        "include",
+        "linux",
+    ],
+)
+
+ddk_headers(
+    name = "qcedev_local_headers",
+    hdrs = glob([
+        "include/uapi/linux/*.h",
+        "crypto-qti/*.h"
+    ]),
+    includes = ["include/uapi", "include/uapi/linux", "crypto-qti"]
+)
+
+ddk_headers(
+    name = "smmu_proxy_headers",
+    hdrs = glob([
+        "smmu-proxy/*.h",
+        "smmu-proxy/linux/*.h",
+        "smmu-proxy/include/uapi/linux/*.h"
+    ]),
+    includes = [".", "smmu-proxy"],
+)
+
+load(":build/pineapple.bzl", "define_pineapple")
+load(":build/anorak.bzl", "define_anorak")
+load(":build/blair.bzl", "define_blair")
+load(":build/sun.bzl", "define_sun")
+load(":build/niobe.bzl", "define_niobe")
+load(":build/monaco.bzl", "define_monaco")
+
+define_pineapple()
+define_anorak()
+define_blair()
+define_niobe()
+define_monaco()
+define_sun()

+ 58 - 0
qcom/opensource/securemsm-kernel/Kbuild

@@ -0,0 +1,58 @@
+LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \
+                -I$(SSG_MODULE_ROOT)/linux/ \
+                -I$(SSG_MODULE_ROOT)/include/linux/ \
+                -I$(SSG_MODULE_ROOT)/include/uapi/ \
+                -I$(SSG_MODULE_ROOT)/include/uapi/linux/
+
+ifneq ($(CONFIG_ARCH_QTI_VM), y)
+    LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h
+    include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf
+endif
+
+#Enable Qseecom if CONFIG_ARCH_KHAJE OR CONFIG_ARCH_KHAJE or CONFIG_QTI_QUIN_GVM is set to y
+ifneq (, $(filter y, $(CONFIG_QTI_QUIN_GVM) $(CONFIG_ARCH_KHAJE) $(CONFIG_ARCH_SA8155) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_SA6155)))
+    include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.conf
+    LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.h
+else
+    LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom_compat.h
+endif
+
+obj-$(CONFIG_QSEECOM) += qseecom_dlkm.o
+qseecom_dlkm-objs := qseecom/qseecom.o
+
+include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.conf
+LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.h
+
+obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o
+smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o
+
+obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o
+tz_log_dlkm-objs := tz_log/tz_log.o
+
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o
+qce50_dlkm-objs := crypto-qti/qce50.o
+
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o
+qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o
+
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o
+qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o
+
+obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o
+hdcp_qseecom_dlkm-objs := hdcp/hdcp_main.o hdcp/hdcp_smcinvoke.o hdcp/hdcp_qseecom.o
+
+obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o
+qrng_dlkm-objs := qrng/msm_rng.o
+
+ifneq (, $(filter y, $(ARCH_QTI_VM) $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_SUN) $(CONFIG_ARCH_NIOBE) $(CONFIG_ARCH_ANORAK)))
+    include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.conf
+    LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.h
+
+    obj-$(CONFIG_QTI_SMMU_PROXY) += smmu_proxy_dlkm.o
+    smmu_proxy_dlkm-objs := smmu-proxy/qti-smmu-proxy-common.o
+    ifneq ($(CONFIG_ARCH_QTI_VM), y)
+    smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-pvm.o
+    else
+    smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-tvm.o
+    endif
+endif

+ 12 - 0
qcom/opensource/securemsm-kernel/Makefile

@@ -0,0 +1,12 @@
+M=$(PWD)
+SSG_MODULE_ROOT=$(KERNEL_SRC)/$(M)
+INC=-I/$(M)/linux/*
+KBUILD_OPTIONS+=SSG_MODULE_ROOT=$(SSG_MODULE_ROOT)
+
+all: modules
+
+clean:
+	rm -f *.cmd *.d *.mod *.o *.ko *.mod.c *.mod.o Module.symvers modules.order
+
+%:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) $(INC) $@ $(KBUILD_OPTIONS)

+ 21 - 0
qcom/opensource/securemsm-kernel/build/anorak.bzl

@@ -0,0 +1,21 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_anorak():
+    define_consolidate_gki_modules(
+        target = "anorak",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "smmu_proxy_dlkm",
+            "qseecom_dlkm"
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE",
+            "CONFIG_QSEECOM_COMPAT",
+        ],
+    )

+ 21 - 0
qcom/opensource/securemsm-kernel/build/blair.bzl

@@ -0,0 +1,21 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_blair():
+    define_consolidate_gki_modules(
+        target = "blair",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "smmu_proxy_dlkm",
+            "qseecom_dlkm"
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE",
+            "CONFIG_QSEECOM",
+        ],
+    )

+ 20 - 0
qcom/opensource/securemsm-kernel/build/monaco.bzl

@@ -0,0 +1,20 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_monaco():
+    define_consolidate_gki_modules(
+        target = "monaco",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "qseecom_dlkm"
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE",
+	    "CONFIG_QSEECOM_COMPAT",
+        ],
+    )

+ 21 - 0
qcom/opensource/securemsm-kernel/build/niobe.bzl

@@ -0,0 +1,21 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_niobe():
+    define_consolidate_gki_modules(
+        target = "niobe",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "smmu_proxy_dlkm",
+            "qseecom_dlkm"
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE",
+	    "CONFIG_QSEECOM_COMPAT",
+        ],
+    )

+ 21 - 0
qcom/opensource/securemsm-kernel/build/pineapple.bzl

@@ -0,0 +1,21 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_pineapple():
+    define_consolidate_gki_modules(
+        target = "pineapple",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "smmu_proxy_dlkm",
+            "qseecom_dlkm"
+        ],
+        extra_options = [
+            "CONFIG_QCOM_SMCINVOKE",
+            "CONFIG_QSEECOM_COMPAT",
+        ],
+    )

+ 21 - 0
qcom/opensource/securemsm-kernel/build/sun.bzl

@@ -0,0 +1,21 @@
+load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
+
+def define_sun():
+    define_consolidate_gki_modules(
+        target = "sun",
+        modules = [
+            "smcinvoke_dlkm",
+            "tz_log_dlkm",
+            "qseecom_dlkm",
+            "hdcp_qseecom_dlkm",
+            "qce50_dlkm",
+            "qcedev-mod_dlkm",
+            "qrng_dlkm",
+            "qcrypto-msm_dlkm",
+            "smmu_proxy_dlkm"
+         ],
+         extra_options = [
+             "CONFIG_QCOM_SMCINVOKE",
+             "CONFIG_QSEECOM_COMPAT",
+         ],
+     )

+ 7 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig.conf

@@ -0,0 +1,7 @@
+export CONFIG_QTI_TZ_LOG=m
+export CONFIG_CRYPTO_DEV_QCEDEV=m
+export CONFIG_CRYPTO_DEV_QCRYPTO=m
+export CONFIG_HDCP_QSEECOM=m
+export CONFIG_HW_RANDOM_MSM_LEGACY=m
+export CONFIG_QSEECOM_PROXY=m
+export CONFIG_QSEECOM=m

+ 10 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig.h

@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#define  CONFIG_QTI_TZ_LOG 1
+#define  CONFIG_CRYPTO_DEV_QCEDEV 1
+#define  CONFIG_CRYPTO_DEV_QCRYPTO 1
+#define  CONFIG_HDCP_QSEECOM 1
+#define  CONFIG_HW_RANDOM_MSM_LEGACY 1

+ 1 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom.conf

@@ -0,0 +1 @@
+export CONFIG_QTI_CRYPTO_FDE=m

+ 6 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#define  CONFIG_QSEECOM 1

+ 6 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_qseecom_compat.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#define  CONFIG_QSEECOM_COMPAT 1

+ 1 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smcinvoke.conf

@@ -0,0 +1 @@
+export CONFIG_QCOM_SMCINVOKE=m

+ 6 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smcinvoke.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#define  CONFIG_QCOM_SMCINVOKE 1

+ 1 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smmu_proxy.conf

@@ -0,0 +1 @@
+export CONFIG_QTI_SMMU_PROXY=m

+ 6 - 0
qcom/opensource/securemsm-kernel/config/sec-kernel_defconfig_smmu_proxy.h

@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define  CONFIG_QTI_SMMU_PROXY 1

+ 39 - 0
qcom/opensource/securemsm-kernel/crypto-qti/fips_status.h

@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _FIPS_STATUS__H
+#define _FIPS_STATUS__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * fips_status: global FIPS140-2 status
+ * @FIPS140_STATUS_NA:
+ *					Not a FIPS140-2 compliant Build.
+ *					The flag status won't
+ *					change throughout
+ *					the lifetime
+ * @FIPS140_STATUS_PASS_CRYPTO:
+ *					KAT self tests are passed.
+ * @FIPS140_STATUS_QCRYPTO_ALLOWED:
+ *					Integrity test is passed.
+ * @FIPS140_STATUS_PASS:
+ *					All tests are passed and build
+ *					is in FIPS140-2 mode
+ * @FIPS140_STATUS_FAIL:
+ *					One of the test is failed.
+ *					This will block all requests
+ *					to crypto modules
+ */
+enum fips_status {
+		FIPS140_STATUS_NA				= 0,
+		FIPS140_STATUS_PASS_CRYPTO		= 1,
+		FIPS140_STATUS_QCRYPTO_ALLOWED	= 2,
+		FIPS140_STATUS_PASS				= 3,
+		FIPS140_STATUS_FAIL				= 0xFF
+};
+#endif /* _FIPS_STATUS__H */

+ 999 - 0
qcom/opensource/securemsm-kernel/crypto-qti/ota_crypto.c

@@ -0,0 +1,999 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI Over the Air (OTA) Crypto driver
+ *
+ * Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+#include <linux/version.h>
+
+#include "linux/qcota.h"
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_VAR_MPKT_F8_OPER = 3,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head rlist;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+		struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
+	} req;
+	unsigned int steps;
+	struct ota_qce_dev  *pqce;
+};
+
+/*
+ * Register ourselves as a char device /dev/qcota0 to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota0"
+
+
+struct ota_dev_control {
+
+	/* char device */
+	struct cdev cdev;
+	int minor;
+	struct list_head ready_commands;
+	unsigned int magic;
+	struct list_head qce_dev;
+	spinlock_t lock;
+	struct mutex register_lock;
+	bool registered;
+	uint32_t total_units;
+};
+
+struct ota_qce_dev {
+	struct list_head qlist;
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	struct ota_async_req *active_command;
+	struct tasklet_struct done_tasklet;
+	struct ota_dev_control *podev;
+	uint32_t unit;
+	u64 total_req;
+	u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+	.magic = OTA_MAGIC,
+};
+
+static dev_t qcota_device_no;
+static struct class *driver_class;
+static struct device *class_dev;
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u64 f8_req;
+	u64 f8_mp_req;
+	u64 f8_v_mp_req;
+	u64 f9_req;
+	u64 f8_op_success;
+	u64 f8_op_fail;
+	u64 f8_mp_op_success;
+	u64 f8_mp_op_fail;
+	u64 f8_v_mp_op_success;
+	u64 f8_v_mp_op_fail;
+	u64 f9_op_success;
+	u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+	return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_control();
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static bool  _next_v_mp_req(struct ota_async_req *areq)
+{
+	unsigned char *p;
+
+	if (areq->err)
+		return false;
+	if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+		return false;
+
+	p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+	p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+	p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+	areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_len =
+		areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+	areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+	return true;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+	struct ota_dev_control *podev = pqce->podev;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+	bool schedule = true;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = pqce->active_command;
+	if (unlikely(areq == NULL))
+		pr_err("ota_crypto: %s, no active request\n", __func__);
+	else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+		if (_next_v_mp_req(areq)) {
+			/* execute next subcommand */
+			spin_unlock_irqrestore(&podev->lock, flags);
+			ret = start_req(pqce, areq);
+			if (unlikely(ret)) {
+				areq->err = ret;
+				schedule = true;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				areq = NULL;
+				schedule = false;
+			}
+		} else {
+			/* done with this variable mp req */
+			schedule = true;
+		}
+	}
+	while (schedule) {
+		if (!list_empty(&podev->ready_commands)) {
+			new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, rlist);
+			list_del(&new_req->rlist);
+			pqce->active_command = new_req;
+			spin_unlock_irqrestore(&podev->lock, flags);
+
+			if (new_req) {
+				new_req->err = 0;
+				/* start a new request */
+				ret = start_req(pqce, new_req);
+			}
+			if (unlikely(new_req && ret)) {
+				new_req->err = ret;
+				complete(&new_req->complete);
+				ret = 0;
+				new_req = NULL;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				schedule = false;
+			}
+		} else {
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+			schedule = false;
+		}
+	}
+	if (areq)
+		complete(&areq->complete);
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+	areq->req.f9_req.mac_i  = *((uint32_t *)icv);
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else
+		areq->err = 0;
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else {
+		areq->err = 0;
+	}
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* command should be on the podev->active_command */
+	areq->pqce = pqce;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+		break;
+
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+		pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	areq->err = ret;
+	pqce->total_req++;
+	if (ret)
+		pqce->err_req++;
+	return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+	/* do this function with spinlock set */
+	struct ota_qce_dev *p;
+
+	if (unlikely(list_empty(&podev->qce_dev))) {
+		pr_err("%s: no valid qce to schedule\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		if (p->active_command == NULL)
+			return p;
+	}
+	return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+	struct ota_qce_dev *pqce;
+
+	areq->err = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	pqce = schedule_qce(podev);
+	if (pqce) {
+		pqce->active_command = areq;
+		spin_unlock_irqrestore(&podev->lock, flags);
+
+		ret = start_req(pqce, areq);
+		if (ret != 0) {
+			spin_lock_irqsave(&podev->lock, flags);
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+		}
+
+	} else {
+		list_add_tail(&areq->rlist, &podev->ready_commands);
+		spin_unlock_irqrestore(&podev->lock, flags);
+	}
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat;
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+	default:
+		if (areq->err)
+			pstat->f8_v_mp_op_fail++;
+		else
+			pstat->f8_v_mp_op_success++;
+		break;
+	}
+
+	return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total, temp;
+	struct qcota_stat *pstat;
+	int i;
+	uint8_t *p = NULL;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat;
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		if (areq.req.f9_req.msize == 0)
+			return 0;
+
+		k_buf = memdup_user((const void __user *)user_src,
+					areq.req.f9_req.msize);
+		if (IS_ERR(k_buf))
+			return -EFAULT;
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		}
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+		if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+				 areq.req.f8_mp_req.cipher_size)
+			return -EINVAL;
+		total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		/* k_buf should be cache line aligned */
+		k_buf = memdup_user((const void __user *)user_src, total);
+		if (IS_ERR(k_buf))
+			return -EFAULT;
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_V_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qce_f8_variable_multi_pkt_req)))
+			return -EFAULT;
+		if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+				sizeof(struct qce_f8_variable_multi_pkt_req)))
+			return -EFAULT;
+
+		if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+			return -EINVAL;
+
+		for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			if (!access_ok(VERIFY_WRITE, (void __user *)
+				areq.req.f8_v_mp_req.cipher_iov[i].addr,
+				areq.req.f8_v_mp_req.cipher_iov[i].size))
+				return -EFAULT;
+			total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			total = ALIGN(total, L1_CACHE_BYTES);
+		}
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_src =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (copy_from_user(p, (void __user *)user_src,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+
+		areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_len =
+			areq.req.f8_v_mp_req.cipher_iov[0].size;
+		areq.steps = 0;
+		areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+		pstat->f8_v_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err != 0) {
+			kfree(k_buf);
+			return err;
+		}
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_dst =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (copy_to_user(user_dst, p,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+		kfree(k_buf);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	podev = &qcota_dev;
+	pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+	if (!pqce)
+		return -ENOMEM;
+
+	rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	driver_class = class_create(QCOTA_DEV);
+#else
+	driver_class = class_create(THIS_MODULE, QCOTA_DEV);
+#endif
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
+		QCOTA_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&podev->cdev, &qcota_fops);
+	podev->cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+	podev->minor = 0;
+
+	pqce->podev = podev;
+	pqce->active_command = NULL;
+	tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device %s, can not open qce\n",
+			__func__, pdev->name);
+		goto exit_del_cdev;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					!ce_support.ota) {
+		pr_err("%s: device %s, qce does not support ota capability\n",
+			__func__, pdev->name);
+		rc = -ENODEV;
+		goto err;
+	}
+	pqce->qce = handle;
+	pqce->pdev = pdev;
+	pqce->total_req = 0;
+	pqce->err_req = 0;
+	platform_set_drvdata(pdev, pqce);
+
+	mutex_lock(&podev->register_lock);
+	rc = 0;
+	if (!podev->registered) {
+		if (rc == 0) {
+			pqce->unit = podev->total_units;
+			podev->total_units++;
+			podev->registered = true;
+		}
+	} else {
+		pqce->unit = podev->total_units;
+		podev->total_units++;
+	}
+	mutex_unlock(&podev->register_lock);
+	if (rc) {
+		pr_err("ion: failed to register misc device.\n");
+		goto err;
+	}
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_add_tail(&pqce->qlist, &podev->qce_dev);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+
+	platform_set_drvdata(pdev, NULL);
+	tasklet_kill(&pqce->done_tasklet);
+
+exit_del_cdev:
+	cdev_del(&podev->cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qcota_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qcota_device_no, 1);
+
+	kfree(pqce);
+	return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	pqce = platform_get_drvdata(pdev);
+	if (!pqce)
+		return 0;
+	if (pqce->qce)
+		qce_close(pqce->qce);
+
+	podev = pqce->podev;
+	if (!podev)
+		goto ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_del(&pqce->qlist);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	mutex_lock(&podev->register_lock);
+	if (--podev->total_units == 0) {
+		cdev_del(&podev->cdev);
+		device_destroy(driver_class, qcota_device_no);
+		class_destroy(driver_class);
+		unregister_chrdev_region(qcota_device_no, 1);
+		podev->registered = false;
+	}
+	mutex_unlock(&podev->register_lock);
+ret:
+
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return 0;
+}
+
+static const struct of_device_id qcota_match[] = {
+	{	.compatible = "qcom,qcota",
+	},
+	{}
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.of_match_table = qcota_match,
+	},
+};
+
+static int _disp_stats(void)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	pstat = &_qcota_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI OTA crypto accelerator Statistics:\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request                      : %llu\n",
+					pstat->f8_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success            : %llu\n",
+					pstat->f8_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail               : %llu\n",
+					pstat->f8_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request                   : %llu\n",
+					pstat->f8_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success         : %llu\n",
+					pstat->f8_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail            : %llu\n",
+					pstat->f8_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP request          : %llu\n",
+					pstat->f8_v_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation success: %llu\n",
+					pstat->f8_v_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation fail   : %llu\n",
+					pstat->f8_v_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request                      : %llu\n",
+					pstat->f9_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success            : %llu\n",
+					pstat->f9_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail               : %llu\n",
+					pstat->f9_op_fail);
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req                 : %llu\n",
+			p->unit,
+			p->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error           : %llu\n",
+			p->unit,
+			p->err_req
+		);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return len;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int len;
+
+	len = _disp_stats();
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		p->total_req = 0;
+		p->err_req = 0;
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         simple_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+	_debug_qcota = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+	struct ota_dev_control *podev;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+
+	podev = &qcota_dev;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	INIT_LIST_HEAD(&podev->qce_dev);
+	spin_lock_init(&podev->lock);
+	mutex_init(&podev->register_lock);
+	podev->registered = false;
+	podev->total_units = 0;
+
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Ota Crypto driver");
+
+module_init(qcota_init);
+module_exit(qcota_exit);

+ 224 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qce.h

@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QTI Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+#include <crypto/skcipher.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+#define AES_CE_BLOCK_SIZE		16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0xFF00
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+/* Crypto clock control flags */
+#define QCE_CLK_ENABLE_FIRST		1
+#define QCE_BW_REQUEST_FIRST		2
+#define QCE_CLK_DISABLE_FIRST		3
+#define QCE_BW_REQUEST_RESET_FIRST	4
+
+/* default average and peak bw for crypto device */
+#define CRYPTO_AVG_BW			384
+#define CRYPTO_PEAK_BW			384
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Offload operation type */
+enum qce_offload_op_enum {
+	QCE_OFFLOAD_NONE = 0, /* kernel pipe */
+	QCE_OFFLOAD_HLOS_HLOS = 1,
+	QCE_OFFLOAD_HLOS_HLOS_1 = 2,
+	QCE_OFFLOAD_HLOS_CPB = 3,
+	QCE_OFFLOAD_HLOS_CPB_1 = 4,
+	QCE_OFFLOAD_CPB_HLOS = 5,
+	QCE_OFFLOAD_OPER_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+	bool aligned_only;
+	bool bam;
+	bool is_shared;
+	bool hw_key;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	bool clk_mgmt_sus_res;
+	bool req_bw_before_clk;
+	unsigned int ce_device;
+	unsigned int ce_hw_instance;
+	unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+	unsigned int  flags;
+	int current_req_info;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode  */
+	enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int iv_ctr_size;	/* iv increment counter size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+	unsigned int  flags;
+	enum qce_offload_op_enum offload_op;	/* Offload usecase */
+	bool is_pattern_valid;		/* Is pattern setting required */
+	unsigned int pattern_info;	/* Pattern info for offload operation */
+	unsigned int block_offset;	/* partial first block for AES CTR */
+	bool is_copy_op;		/* copy buffers without crypto ops */
+	int current_req_info;
+};
+
+struct qce_pm_table {
+	int (*suspend)(void *handle);
+	int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+struct qce_error {
+    bool no_error;
+    bool timer_error;
+    bool key_paused;
+    bool generic_error;
+};
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+void qce_dump_req(void *handle);
+void qce_get_crypto_status(void *handle, struct qce_error *error);
+int qce_manage_timeout(void *handle, int req_info);
+int qce_set_irqs(void *handle, bool enable);
+#endif /* __CRYPTO_MSM_QCE_H */

+ 6823 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qce50.c

@@ -0,0 +1,6823 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha1.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/iommu.h>
+
+#include "qcrypto.h"
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_SMMU_IOVA_START 0x10000000
+#define CRYPTO_SMMU_IOVA_SIZE 0x40000000
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR    0x200
+#define QCE_SECTOR_SIZE	    0x200
+#define CE_CLK_100MHZ	100000000
+#define CE_CLK_DIV	1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+	struct list_head qlist;
+	unsigned long handle;
+	uint32_t cnt;
+	uint32_t bam_mem;
+	void __iomem *bam_iobase;
+	bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ			(MAX_QCE_BAM_REQ / 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ	(MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ  + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ		MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX			MAX_QCE_BAM_REQ
+
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
+#define AES_CTR_IV_CTR_SIZE	64
+
+#define QCE_NO_ERROR_VAL1	0x2000006
+#define QCE_NO_ERROR_VAL2	0x2000004
+
+// Crypto Engines 5.7 and below
+// Key timer expiry for pipes 1-15 (Status3)
+#define CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS3	0x0000FF00
+// Key timer expiry for pipes 16-19 (Status6)
+#define CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS6	0x00000300
+// Key pause for pipes 1-15 (Status3)
+#define CRYPTO5_LEGACY_KEY_PAUSE_STATUS3		0xFF000000
+// Key pause for pipes 16-19 (Status6)
+#define CRYPTO5_LEGACY_KEY_PAUSE_STATUS6		0x3000000
+
+// Crypto Engines 5.8 and above
+// Key timer expiry for all pipes (Status3)
+#define CRYPTO58_TIMER_EXPIRED		0x00000010
+// Key pause for all pipes (Status3)
+#define CRYPTO58_KEY_PAUSE			0x00001000
+// Key index for Status3 (Timer and Key Pause)
+#define KEY_INDEX_SHIFT				16
+
+enum qce_owner {
+	QCE_OWNER_NONE   = 0,
+	QCE_OWNER_CLIENT = 1,
+	QCE_OWNER_TIMEOUT = 2
+};
+
+struct dummy_request {
+	struct qce_sha_req sreq;
+	struct scatterlist sg;
+	struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	struct bam_registration_info *pbam;
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+	unsigned char *iovec_vmem;  /* Allocate iovec virtual memory */
+	int iovec_memsize;				/* Memory allocated */
+	uint32_t bam_mem;		/* bam physical address, from DT */
+	uint32_t bam_mem_size;		/* bam io size, from DT */
+	int is_shared;			/* CE HW is shared */
+	bool support_cmd_dscr;
+	bool support_hw_key;
+	bool support_clk_mgmt_sus_res;
+	bool support_only_core_src_clk;
+	bool request_bw_before_clk;
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+	struct clk *ce_bus_clk;	/* Handle to CE AXI clk*/
+	bool no_get_around;
+	bool no_ccm_mac_status_get_around;
+	unsigned int ce_opp_freq_hz;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	uint32_t engines_avail;
+	struct qce_ce_cfg_reg_setting reg;
+	struct ce_bam_info ce_bam_info;
+	struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+	unsigned int ce_request_index;
+	enum qce_owner owner;
+	atomic_t no_of_queued_req;
+	struct timer_list timer;
+	struct dummy_request dummyreq;
+	unsigned int mode;
+	unsigned int intr_cadence;
+	unsigned int dev_no;
+	struct qce_driver_stats qce_stats;
+	atomic_t bunch_cmd_seq;
+	atomic_t last_intr_seq;
+	bool cadence_flag;
+	uint8_t *dummyreq_in_buf;
+	struct dma_iommu_mapping *smmu_mapping;
+	bool enable_s1_smmu;
+	bool no_clock_support;
+	bool kernel_pipes_support;
+	bool offload_pipes_support;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+/*
+ * Requests for offload operations do not require explicit dma operations
+ * as they already have SMMU mapped source/destination buffers.
+ */
+static bool is_offload_op(int op)
+{
+	return (op == QCE_OFFLOAD_HLOS_HLOS || op == QCE_OFFLOAD_HLOS_HLOS_1 ||
+		op == QCE_OFFLOAD_CPB_HLOS || op == QCE_OFFLOAD_HLOS_CPB ||
+		op == QCE_OFFLOAD_HLOS_CPB_1);
+}
+
+static uint32_t qce_get_config_be(struct qce_device *pce_dev,
+				   uint32_t pipe_pair)
+{
+	uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+
+	return (beats << CRYPTO_REQ_SIZE |
+		BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+		BIT(CRYPTO_MASK_OP_DONE_INTR) | 0 << CRYPTO_HIGH_SPD_EN_N |
+		pipe_pair << CRYPTO_PIPE_SET_SELECT);
+}
+
+static void dump_status_regs(unsigned int *status)
+{
+	pr_info("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, status[0]);
+	pr_info("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, status[1]);
+	pr_info("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, status[2]);
+	pr_info("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, status[3]);
+	pr_info("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, status[4]);
+	pr_info("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, status[5]);
+}
+
+void qce_get_crypto_status(void *handle, struct qce_error *error)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	unsigned int status[6] = {0};
+
+	status[0] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
+	status[1] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS2_REG);
+	status[2] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS3_REG);
+	status[3] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS4_REG);
+	status[4] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS5_REG);
+	status[5] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS6_REG);
+
+#ifdef QCE_DEBUG
+	dump_status_regs(status);
+#endif
+
+	if (status[0] != QCE_NO_ERROR_VAL1 && status[0] != QCE_NO_ERROR_VAL2) {
+		if (pce_dev->ce_bam_info.minor_version >= 8) {
+			if (status[2] & CRYPTO58_TIMER_EXPIRED) {
+				error->timer_error = true;
+				pr_err("%s: timer expired, index = 0x%x\n",
+					__func__, (status[2] >> KEY_INDEX_SHIFT));
+			} else if (status[2] & CRYPTO58_KEY_PAUSE) {
+				error->key_paused = true;
+				pr_err("%s: key paused, index = 0x%x\n",
+					__func__, (status[2] >> KEY_INDEX_SHIFT));
+			} else {
+				pr_err("%s: generic error, refer all status\n",
+					__func__);
+				error->generic_error = true;
+			}
+		} else {
+			if ((status[2] & CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS3) ||
+				(status[5] & CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS6)) {
+				error->timer_error = true;
+				pr_err("%s: timer expired, refer status 3 and 6\n",
+					__func__);
+			}
+			else if ((status[2] & CRYPTO5_LEGACY_KEY_PAUSE_STATUS3) ||
+					(status[5] & CRYPTO5_LEGACY_KEY_PAUSE_STATUS6)) {
+				error->key_paused = true;
+				pr_err("%s: key paused, reder status 3 and 6\n",
+					__func__);
+			} else {
+				pr_err("%s: generic error, refer all status\n",
+					__func__);
+				error->generic_error = true;
+			}
+		}
+		dump_status_regs(status);
+		return;
+	}
+
+	error->no_error = true;
+	pr_info("%s: No crypto error, status1 = 0x%x\n",
+		   __func__, status[0]);
+
+	return;
+}
+EXPORT_SYMBOL(qce_get_crypto_status);
+
+static int qce_crypto_config(struct qce_device *pce_dev,
+		enum qce_offload_op_enum offload_op)
+{
+	uint32_t config_be = 0;
+
+	config_be = qce_get_config_be(pce_dev,
+		    pce_dev->ce_bam_info.pipe_pair_index[offload_op]);
+
+	pce_dev->reg.crypto_cfg_be = config_be;
+	pce_dev->reg.crypto_cfg_le = (config_be |
+					CRYPTO_LITTLE_ENDIAN_MASK);
+	return 0;
+}
+
+static void qce_enable_clock_gating(struct qce_device *pce_dev)
+{
+	/* This feature might cause some HW issues, noop till resolved. */
+	return;
+}
+
+/*
+ * IV counter mask is be set based on the values sent through the offload ioctl
+ * calls. Currently for offload operations, it is 64 bytes of mask for AES CTR,
+ * and 128 bytes of mask for AES CBC.
+ */
+static void qce_set_iv_ctr_mask(struct qce_device *pce_dev,
+				struct qce_req *creq)
+{
+	if (creq->iv_ctr_size == AES_CTR_IV_CTR_SIZE) {
+		pce_dev->reg.encr_cntr_mask_0 = 0x0;
+		pce_dev->reg.encr_cntr_mask_1 = 0x0;
+		pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
+		pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
+	} else {
+		pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
+		pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
+		pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
+		pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
+	}
+
+	return;
+}
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_map_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_unmap_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+	int i = 0;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before checking the version.
+	 */
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+		pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	}
+
+	/*
+	 * The majority of crypto HW bugs have been fixed in 5.3.0 and
+	 * above. That allows a single sps transfer of consumer
+	 * pipe, and a single sps transfer of producer pipe
+	 * for a crypto request. no_get_around flag indicates this.
+	 *
+	 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+	 * fixed. no_ccm_mac_status_get_around flag indicates this.
+	 */
+	pce_dev->no_get_around = (min_rev >=
+			CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+	if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+			 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else
+		pce_dev->no_ccm_mac_status_get_around = false;
+
+	pce_dev->ce_bam_info.minor_version = min_rev;
+	pce_dev->ce_bam_info.major_version = maj_rev;
+
+	pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+					CRYPTO_ENGINES_AVAIL);
+	dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK, IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
+			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
+			pce_dev->ce_bam_info.bam_iobase,
+			pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		dev_dbg(pce_dev->pdev, "Consumer pipe IN [%d] = %d, Producer Pipe OUT [%d] = %d\n",
+				i, pce_dev->ce_bam_info.src_pipe_index[i],
+				i, pce_dev->ce_bam_info.dest_pipe_index[i]);
+	}
+
+	return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_sha_req *sreq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		return &cmdlistptr->auth_sha1;
+	case QCE_HASH_SHA256:
+		return &cmdlistptr->auth_sha256;
+	case QCE_HASH_SHA1_HMAC:
+		return &cmdlistptr->auth_sha1_hmac;
+	case QCE_HASH_SHA256_HMAC:
+		return &cmdlistptr->auth_sha256_hmac;
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			return &cmdlistptr->auth_aes_128_cmac;
+		return &cmdlistptr->auth_aes_256_cmac;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	uint32_t auth_cfg;
+
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+
+	pce = cmdlistinfo->crypto_cfg;
+	pce->data = pce_dev->reg.crypto_cfg_be;
+
+	pce = cmdlistinfo->crypto_cfg_le;
+	pce->data = pce_dev->reg.crypto_cfg_le;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+
+		/* no more check for null key. use flag */
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+						== QCRYPTO_CTX_USE_HW_KEY)
+			use_hw_key = true;
+		else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+		pce = cmdlistinfo->go_proc;
+		if (use_hw_key) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			if (!use_pipe_key) {
+				_byte_stream_to_net_words(mackey32,
+						sreq->authkey,
+						sreq->authklen);
+				for (i = 0; i < authk_size_in_word; i++, pce++)
+					pce->data = mackey32[i];
+			}
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+				1 << CRYPTO_FIRST |
+				1 << CRYPTO_USE_PIPE_KEY_AUTH |
+				1 << CRYPTO_USE_HW_KEY_AUTH);
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+	pce->data = auth_cfg;
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	pce = cmdlistinfo->encr_seg_cfg;
+	pce->data = 0;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (sreq->size)
+		pce->data = sreq->size;
+	else
+		pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+	return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_AES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+		if (creq->encklen ==  AES128_KEY_SIZE) {
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return
+				&cmdlistptr->aead_hmac_sha1_cbc_aes_128;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return
+				&cmdlistptr->aead_hmac_sha256_cbc_aes_128;
+			else
+				return NULL;
+		} else if (creq->encklen ==  AES256_KEY_SIZE) {
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_aes_256;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return
+				&cmdlistptr->aead_hmac_sha256_cbc_aes_256;
+			else
+				return NULL;
+		} else
+			return NULL;
+		break;
+		default:
+			return NULL;
+		}
+		break;
+
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	struct sps_command_element *pce;
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = q_req->ivsize;
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	if (qce_crypto_config(pce_dev, q_req->offload_op))
+		return -EINVAL;
+
+	pce = cmdlistinfo->crypto_cfg;
+	pce->data = pce_dev->reg.crypto_cfg_be;
+
+	pce = cmdlistinfo->crypto_cfg_le;
+	pce->data = pce_dev->reg.crypto_cfg_le;
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_3DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_AES:
+		if ((key_size != AES128_KEY_SIZE) &&
+				(key_size != AES256_KEY_SIZE))
+			return -EINVAL;
+		enciv_in_word = 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* only support cbc mode */
+	if (q_req->mode != QCE_MODE_CBC)
+		return -EINVAL;
+
+	_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+	pce = cmdlistinfo->encr_cntr_iv;
+	for (i = 0; i < enciv_in_word; i++, pce++)
+		pce->data = enciv32[i];
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < enck_size_in_word; i++, pce++)
+		pce->data = enckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	encr_cfg = pce->data;
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	else
+		encr_cfg &= ~(1 << CRYPTO_ENCODE);
+	pce->data = encr_cfg;
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		for (i = 0; i < 5; i++, pce++)
+			pce->data = _std_init_vector_sha1[i];
+	else
+		for (i = 0; i < 8; i++, pce++)
+			pce->data = _std_init_vector_sha256[i];
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = 0;
+
+	pce = cmdlistinfo->auth_seg_cfg;
+	a_cfg = pce->data;
+	a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+	pce->data = a_cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = totallen_in;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = q_req->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_des_ecb;
+			return &cmdlistptr->cipher_des_cbc;
+		case CIPHER_ALG_3DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_3des_ecb;
+			return &cmdlistptr->cipher_3des_cbc;
+		default:
+			return NULL;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_ecb;
+			return &cmdlistptr->cipher_aes_256_ecb;
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_cbc_ctr;
+			return &cmdlistptr->cipher_aes_256_cbc_ctr;
+		case QCE_MODE_XTS:
+			if (creq->encklen/2 == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_xts;
+			return &cmdlistptr->cipher_aes_256_xts;
+		case QCE_MODE_CCM:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->aead_aes_128_ccm;
+			return &cmdlistptr->aead_aes_256_ccm;
+		default:
+			return NULL;
+		}
+	}
+	return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+	bool is_des_cipher = false;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	if (qce_crypto_config(pce_dev, creq->offload_op))
+		return -EINVAL;
+
+	pce = cmdlistinfo->crypto_cfg;
+	pce->data = pce_dev->reg.crypto_cfg_be;
+
+	pce = cmdlistinfo->crypto_cfg_le;
+	pce->data = pce_dev->reg.crypto_cfg_le;
+
+	pce = cmdlistinfo->go_proc;
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	if (use_hw_key)
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+						pce_dev->phy_iobase);
+	else
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	if (!use_pipe_key && !use_hw_key) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data = totallen_in;
+		else
+			pce->data = totallen_in - creq->authsize;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	} else {
+		if (creq->op != QCE_REQ_AEAD) {
+			pce = cmdlistinfo->auth_seg_cfg;
+			pce->data = 0;
+		}
+	}
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+				(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (ivsize > MAX_IV_LENGTH) {
+				pr_err("%s: error: Invalid length parameter\n",
+					 __func__);
+				return -EINVAL;
+			}
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (!use_hw_key) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		is_des_cipher = true;
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (ivsize > MAX_IV_LENGTH) {
+				pr_err("%s: error: Invalid length parameter\n",
+					 __func__);
+				return -EINVAL;
+			}
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (!use_hw_key) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		is_des_cipher = true;
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if (!use_hw_key && !use_pipe_key) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				pce = cmdlistinfo->encr_xts_key;
+				for (i = 0; i < xtsklen; i++, pce++)
+					pce->data = xtskey32[i];
+			}
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+						creq->cryptlen);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				pce->data =
+					min((unsigned int)QCE_SECTOR_SIZE * 2,
+					creq->cryptlen);
+				break;
+			default:
+				pce->data = creq->cryptlen;
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (ivsize > MAX_IV_LENGTH) {
+				pr_err("%s: error: Invalid length parameter\n",
+					 __func__);
+				return -EINVAL;
+			}
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if (!use_hw_key) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+
+	if (use_hw_key)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if (creq->is_copy_op) {
+		pce->data = 0;
+	} else {
+		if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+			pce->data = (creq->cryptlen + creq->authsize);
+		else
+			pce->data = creq->cryptlen;
+	}
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	if (!is_des_cipher) {
+		/* pattern info */
+		pce = cmdlistinfo->pattern_info;
+		pce->data = creq->pattern_info;
+
+		/* block offset */
+		pce = cmdlistinfo->block_offset;
+		pce->data = (creq->block_offset << 4) |
+				(creq->block_offset ? 1: 0);
+
+		/* IV counter size */
+		qce_set_iv_ctr_mask(pce_dev, creq);
+
+		pce = cmdlistinfo->encr_mask_3;
+		pce->data = pce_dev->reg.encr_cntr_mask_3;
+		pce = cmdlistinfo->encr_mask_2;
+		pce->data = pce_dev->reg.encr_cntr_mask_2;
+		pce = cmdlistinfo->encr_mask_1;
+		pce->data = pce_dev->reg.encr_cntr_mask_1;
+		pce = cmdlistinfo->encr_mask_0;
+		pce->data = pce_dev->reg.encr_cntr_mask_0;
+	}
+
+	pce = cmdlistinfo->go_proc;
+	pce->data = 0;
+	if (is_offload_op(creq->offload_op))
+		pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT));
+	else
+		pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT) |
+				(1 << CRYPTO_RESULTS_DUMP));
+
+
+	return 0;
+}
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	}
+
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+
+	pce = cmdlistinfo->crypto_cfg;
+	pce->data = pce_dev->reg.crypto_cfg_be;
+
+	pce = cmdlistinfo->crypto_cfg_le;
+	pce->data = pce_dev->reg.crypto_cfg_le;
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ikey32[i];
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	pce->data = req->last_bits;
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	pce = cmdlistinfo->auth_bytecount;
+	pce->data = req->fresh;
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	pce++;
+	pce->data = req->count_i;
+
+	/* write auth seg cfg */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F9_DIRECTION);
+	pce->data = cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = req->msize;
+
+	/* write auth seg start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->msize;
+
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+	return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	}
+
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+
+	pce = cmdlistinfo->crypto_cfg;
+	pce->data = pce_dev->reg.crypto_cfg_be;
+
+	pce = cmdlistinfo->crypto_cfg_le;
+	pce->data = pce_dev->reg.crypto_cfg_le;
+
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if (key_stream_mode)
+		cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F8_DIRECTION);
+	pce->data = cfg;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (cipher_offset & 0xffff);
+
+	/* write encr seg size  */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = cipher_size;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->data_len;
+
+	/* write cntr0_iv0 for countC */
+	pce = cmdlistinfo->encr_cntr_iv;
+	pce->data = req->count_c;
+	/* write cntr1_iv1 for nPkts, and bearer */
+	pce++;
+	if (npkts == 1)
+		npkts = 0;
+	pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+	return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+	int i, j, ents;
+	struct ce_sps_data *pce_sps_data;
+	struct sps_iovec *iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	iovec = pce_sps_data->in_transfer.iovec;
+	pr_info("==============================================\n");
+	pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	for (i = 0; i <  pce_sps_data->in_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				pr_info("      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	pr_info("==============================================\n");
+	pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	iovec =  pce_sps_data->out_transfer.iovec;
+	for (i = 0; i <   pce_sps_data->out_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+	_qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+{									\
+	pr_info("      [0x%pK] 0x%x\n", addr, (uint32_t)val);		\
+	writel_relaxed(val, addr);					\
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+	writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	bool sha1 = false;
+	uint32_t auth_cfg = 0;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (sreq->alg == QCE_HASH_AES_CMAC) {
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		if (sreq->authklen == AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+		else
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* no more check for null key. use flag to check*/
+
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+					QCRYPTO_CTX_USE_HW_KEY) {
+			use_hw_key = true;
+		} else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY) {
+			use_pipe_key = true;
+		} else {
+			/* setup key */
+			for (i = 0; i < authk_size_in_word; i++)
+				QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+					(CRYPTO_AUTH_KEY0_REG +
+							i*sizeof(uint32_t))));
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		auth_cfg = pce_dev->reg.auth_cfg_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+		auth_cfg = pce_dev->reg.auth_cfg_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	/* Set auth_ivn, auth_keyn registers  */
+	for (i = 0; i < 5; i++)
+		QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++)
+			QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+	}
+
+
+	/* write auth_bytecnt 0/1/2/3, start with 0 */
+	for (i = 0; i < 2; i++)
+		QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+					CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+	/* Set/reset  last bit in CFG register  */
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_FIRST);
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+	 /* write seg_cfg */
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* reset encr seg_cfg   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (!use_hw_key) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+		struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t ivsize = q_req->ivsize;
+	uint32_t encr_cfg;
+
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	if (qce_crypto_config(pce_dev, q_req->offload_op))
+		return -EINVAL;
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+		break;
+
+	case CIPHER_ALG_3DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			if (key_size == AES128_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+			else if (key_size  == AES256_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+			else
+				return -EINVAL;
+			break;
+		default:
+		return -EINVAL;
+		}
+
+		enciv_in_word = 4;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+
+
+
+	/* write CNTR0_IV0_REG */
+	if (q_req->mode !=  QCE_MODE_ECB) {
+		_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+		for (i = 0; i < enciv_in_word; i++)
+			QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	for (i = 0; i < enck_size_in_word; i++)
+		QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+	/* write encr seg cfg */
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	for (i = 0; i < authk_size_in_word; i++)
+		QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+			(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+		for (i = 0; i < 5; i++)
+			QCE_WRITE_REG(_std_init_vector_sha1[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	} else {
+		for (i = 0; i < 8; i++)
+			QCE_WRITE_REG(_std_init_vector_sha256[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write encr seg size    */
+	QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write encr start   */
+	QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_START_REG);
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+	else
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* write auth seg_cfg */
+	QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+	/* write seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+		struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	if (qce_crypto_config(pce_dev, creq->offload_op))
+		return -EINVAL;
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be,
+			(pce_dev->iobase + CRYPTO_CONFIG_REG));
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le,
+			(pce_dev->iobase + CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	if (!use_pipe_key && !use_hw_key) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			for (i = 0; i < authklen32; i++)
+				QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+		}
+		QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT) {
+			QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		} else {
+			QCE_WRITE_REG((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		}
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		if (creq->op != QCE_REQ_AEAD)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	mb();
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+		}
+		if (!use_hw_key) {
+			QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+			QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+		}
+		if (!use_hw_key) {
+			/* write encr key */
+			for (i = 0; i < 6; i++)
+				QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if (!use_hw_key && !use_pipe_key) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				for (i = 0; i < xtsklen; i++)
+					QCE_WRITE_REG(xtskey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+			/* write xts du size */
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				QCE_WRITE_REG(
+					min((uint32_t)QCE_SECTOR_SIZE,
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				QCE_WRITE_REG(
+					min((uint32_t)(QCE_SECTOR_SIZE * 2),
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			default:
+				QCE_WRITE_REG(creq->cryptlen,
+					pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+
+			/* write encr cntr iv */
+			for (i = 0; i <= 3; i++)
+				QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+
+			if (creq->mode == QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				for (i = 0; i <= 3; i++)
+					QCE_WRITE_REG(enciv32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+							(i * sizeof(uint32_t)));
+				/* update cntr_iv[3] by one */
+				QCE_WRITE_REG((enciv32[3] + 1),
+							pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(3 * sizeof(uint32_t)));
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if (!use_hw_key && !use_pipe_key) {
+				for (i = 0; i < enck_size_in_word; i++)
+					QCE_WRITE_REG(enckey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	if (use_hw_key)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	/* write encr seg cfg */
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+		QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	} else {
+		QCE_WRITE_REG(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	}
+
+	/* write pattern */
+	if (creq->is_pattern_valid)
+		QCE_WRITE_REG(creq->pattern_info, pce_dev->iobase +
+				CRYPTO_DATA_PATT_PROC_CFG_REG);
+
+	/* write block offset to CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG? */
+	QCE_WRITE_REG(((creq->block_offset << 4) |
+		(creq->block_offset ? 1 : 0)),
+		pce_dev->iobase + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG);
+
+	/* write encr seg start */
+	QCE_WRITE_REG((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+	/* write encr counter mask */
+	qce_set_iv_ctr_mask(pce_dev, creq);
+	QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_3,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+	QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_2,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+	QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_1,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+	QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_0,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+
+	/* write seg size  */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* issue go to crypto   */
+	if (!use_hw_key) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+				 struct qce_f9_req *req)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t auth_cfg;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	}
+
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	/* write enc_seg_cfg */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write ecn_seg_size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+					CRYPTO_AUTH_IV4_REG));
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT0_REG));
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT1_REG));
+
+	/* write auth seg cfg */
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg size */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth seg start*/
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+		struct qce_f8_req *req, bool key_stream_mode,
+		uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	}
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+	/* set big endian configuration */
+	if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
+		return -EINVAL;
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write auth seg configuration */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+			(CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+	/* write encr seg cfg */
+	if (key_stream_mode)
+		encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg start */
+	QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+		CRYPTO_ENCR_SEG_START_REG);
+	/* write encr seg size  */
+	QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+		CRYPTO_SEG_SIZE_REG);
+
+	/* write cntr0_iv0 for countC */
+	QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+		CRYPTO_CNTR0_IV0_REG);
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+			pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+						[req_info].ce_sps;
+	uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
+
+	if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr)
+		return rc;
+
+	rc = sps_transfer_one(pce_dev->ce_bam_info.consumer[op].pipe,
+		GET_PHYS_ADDR(
+		pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist),
+		0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+	if (rc) {
+		pr_err("sps_xfr_one() fail rc=%d\n", rc);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static int qce_sps_set_irqs(struct qce_device *pce_dev, bool enable)
+{
+	if (enable)
+		return sps_bam_enable_irqs(pce_dev->ce_bam_info.bam_handle);
+	else
+		return sps_bam_disable_irqs(pce_dev->ce_bam_info.bam_handle);
+}
+
+int qce_set_irqs(void *handle, bool enable)
+{
+	return qce_sps_set_irqs(handle, enable);
+}
+EXPORT_SYMBOL(qce_set_irqs);
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete);
+
+static int qce_sps_pipe_reset(struct qce_device *pce_dev, int op)
+{
+	int rc = -1;
+	struct sps_pipe *sps_pipe_info = NULL;
+	struct sps_connect *sps_connect_info = NULL;
+
+	/* Reset both the pipe sets in the pipe group */
+	sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
+			pce_dev->ce_bam_info.dest_pipe_index[op]);
+	sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
+			pce_dev->ce_bam_info.src_pipe_index[op]);
+
+	/* Reconnect to consumer pipe */
+	sps_pipe_info = pce_dev->ce_bam_info.consumer[op].pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.consumer[op].connect;
+	rc = sps_disconnect(sps_pipe_info);
+	if (rc) {
+		pr_err("sps_disconnect() fail pipe=0x%lx, rc = %d\n",
+		(uintptr_t)sps_pipe_info, rc);
+		goto exit;
+	}
+	memset(sps_connect_info->desc.base, 0x00,
+				sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
+		(uintptr_t)sps_pipe_info, rc);
+		goto exit;
+	}
+
+	/* Reconnect to producer pipe */
+	sps_pipe_info = pce_dev->ce_bam_info.producer[op].pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.producer[op].connect;
+	rc = sps_disconnect(sps_pipe_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
+		(uintptr_t)sps_pipe_info, rc);
+		goto exit;
+	}
+	memset(sps_connect_info->desc.base, 0x00,
+				sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
+		(uintptr_t)sps_pipe_info, rc);
+		goto exit;
+	}
+
+	/* Register producer callback */
+	rc = sps_register_event(sps_pipe_info,
+			&pce_dev->ce_bam_info.producer[op].event);
+	if (rc)
+		pr_err("Producer cb registration failed rc = %d\n",
+							rc);
+exit:
+	return rc;
+}
+
+#define MAX_RESET_TIME_RETRIES 1000
+
+int qce_manage_timeout(void *handle, int req_info)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct skcipher_request *areq;
+	struct ce_request_info *preq_info;
+	qce_comp_func_ptr_t qce_callback;
+	uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
+	struct qce_error error = {0};
+	int retries = 0;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	qce_callback = preq_info->qce_cb;
+	areq = (struct skcipher_request *) preq_info->areq;
+
+	pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info,  op);
+
+	if (qce_sps_pipe_reset(pce_dev, op))
+		pr_err("%s: pipe reset failed\n", __func__);
+
+	qce_get_crypto_status(pce_dev, &error);
+	while (!error.no_error && retries < MAX_RESET_TIME_RETRIES) {
+		usleep_range(3000, 5000);
+		retries++;
+		qce_get_crypto_status(pce_dev, &error);
+		pr_info("%s: waiting for reset to complete\n", __func__);
+	}
+
+	// Write memory barrier
+	wmb();
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info))
+		pr_err("%s: fail unlock other pipes\n", __func__);
+
+	qce_enable_clock_gating(pce_dev);
+
+	if (!atomic_read(&preq_info->in_use)) {
+		pr_err("request information %d already done\n", req_info);
+		return -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	return 0;
+}
+EXPORT_SYMBOL(qce_manage_timeout);
+
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+	uint32_t ccm_fail_status = 0;
+	uint32_t result_dump_status = 0;
+	int32_t result_status = 0;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct aead_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (preq_info->asg)
+		qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+
+	/* read status before unlock */
+	if (preq_info->dir == QCE_DECRYPT) {
+		if (pce_dev->no_get_around)
+			if (pce_dev->no_ccm_mac_status_get_around)
+				ccm_fail_status =
+				be32_to_cpu(pce_sps_data->result->status);
+			else
+				ccm_fail_status =
+				be32_to_cpu(pce_sps_data->result_null->status);
+		else
+			ccm_fail_status = readl_relaxed(pce_dev->iobase +
+					CRYPTO_STATUS_REG);
+	}
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("aead operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+			pce_sps_data->producer_status)  {
+		pr_err("aead sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (!atomic_read(&preq_info->in_use)) {
+		pr_err("request information %d already done\n", req_info);
+		return -ENXIO;
+	}
+	if (preq_info->mode == QCE_MODE_CCM) {
+		/*
+		 * Not from result dump, instead, use the status we just
+		 * read of device for MAC_FAILED.
+		 */
+		if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+				(ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+			result_status = -EBADMSG;
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, result_status);
+
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+			sizeof(iv));
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, iv, result_status);
+
+	}
+	return 0;
+}
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+	uint32_t bytecount32[2];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ahash_request *) preq_info->areq;
+	if (!areq) {
+		pr_err("sha operation error. areq is NULL\n");
+		return -ENXIO;
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	_byte_stream_to_net_words(bytecount32,
+		(unsigned char *)pce_sps_data->result->auth_byte_count,
+					2 * CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, digest, (char *)bytecount32,
+				-ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+
+		pr_err("sha operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status) {
+		pr_err("sha sps operation error. sps status %x\n",
+			pce_sps_data->consumer_status);
+		result_status = -ENXIO;
+	}
+
+	if (!atomic_read(&preq_info->in_use)) {
+		pr_err("request information %d already done\n", req_info);
+		return -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, digest, (char *)bytecount32, result_status);
+	return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+	uint32_t mac_i;
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, DMA_TO_DEVICE);
+	_byte_stream_to_net_words(&mac_i,
+		(char *)(&pce_sps_data->result->auth_iv[0]),
+		CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+				| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("f9 operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f9 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+	return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct skcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct skcipher_request *) preq_info->areq;
+
+	if (!is_offload_op(preq_info->offload_op)) {
+		if (areq->src != areq->dst)
+			qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+					preq_info->dst_nents, DMA_FROM_DEVICE);
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+				preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+	}
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (!is_offload_op(preq_info->offload_op)) {
+		if (result_dump_status & ((1 << CRYPTO_SW_ERR) |
+			(1 << CRYPTO_AXI_ERR) | (1 <<  CRYPTO_HSD_ERR))) {
+			pr_err("ablk_cipher operation error. Status %x\n",
+				result_dump_status);
+			result_status = -ENXIO;
+		}
+	}
+
+	if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_ECB) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+								result_status);
+	} else {
+		if (pce_dev->ce_bam_info.minor_version == 0) {
+			if (preq_info->mode == QCE_MODE_CBC) {
+				if  (preq_info->dir == QCE_DECRYPT)
+					memcpy(iv, (char *)preq_info->dec_iv,
+								sizeof(iv));
+				else
+					memcpy(iv, (unsigned char *)
+						(sg_virt(areq->src) +
+						areq->src->length - 16),
+						sizeof(iv));
+			}
+			if ((preq_info->mode == QCE_MODE_CTR) ||
+				(preq_info->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv3 = 0;
+				unsigned long long cntr_iv64 = 0;
+				unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+				memcpy(iv, areq->iv, sizeof(iv));
+				if (preq_info->mode != QCE_MODE_XTS)
+					num_blk = areq->cryptlen/16;
+				else
+					num_blk = 1;
+				cntr_iv3 =  ((*(iv + 12) << 24) & 0xff000000) |
+					(((*(iv + 13)) << 16) & 0xff0000) |
+					(((*(iv + 14)) << 8) & 0xff00) |
+					(*(iv + 15) & 0xff);
+				cntr_iv64 =
+					(((unsigned long long)cntr_iv3 &
+					0xFFFFFFFFULL) +
+					(unsigned long long)num_blk) %
+					(unsigned long long)(0x100000000ULL);
+
+				cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+				*(iv + 15) = (char)(*b);
+				*(iv + 14) = (char)(*(b + 1));
+				*(iv + 13) = (char)(*(b + 2));
+				*(iv + 12) = (char)(*(b + 3));
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_sps_data->result->encr_cntr_iv),
+				sizeof(iv));
+		}
+
+		if (!atomic_read(&preq_info->in_use)) {
+			pr_err("request information %d already done\n", req_info);
+			return -ENXIO;
+		}
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, iv, result_status);
+	}
+	return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	uint32_t result_dump_status2;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				preq_info->ota_size, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, (preq_info->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+	if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR)))) {
+		pr_err(
+			"f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+			result_dump_status, result_dump_status2, req_info);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f8 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	pce_sps_data->result->status = 0;
+	pce_sps_data->result->status2 = 0;
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, NULL, NULL, result_status);
+	return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+							.ce_sps;
+	pce_sps_data->in_transfer.iovec_count = 0;
+	pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+	struct sps_iovec *iovec;
+
+	if (sps_bam_pipe->iovec_count == 0)
+		return;
+	iovec  = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	uint32_t data_cnt;
+
+	while (len > 0) {
+		if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+			pr_err("Num of descrptor %d exceed max (%d)\n",
+				sps_bam_pipe->iovec_count,
+				(uint32_t)QCE_MAX_NUM_DSCR);
+			return -ENOMEM;
+		}
+		if (len > SPS_MAX_PKT_SIZE)
+			data_cnt = SPS_MAX_PKT_SIZE;
+		else
+			data_cnt = len;
+		iovec->size = data_cnt;
+		iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+		iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+		sps_bam_pipe->iovec_count++;
+		iovec++;
+		paddr += data_cnt;
+		len -= data_cnt;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	while (nbytes > 0 && sg_src) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)\n",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src = sg_next(sg_src);
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+	unsigned int res_within_sg;
+
+	if (!sg_src)
+		return -ENOENT;
+	res_within_sg = sg_dma_len(sg_src);
+
+	while (off > 0) {
+		if (!sg_src) {
+			pr_err("broken sg list off %d nbytes %d\n",
+				off, nbytes);
+			return -ENOENT;
+		}
+		len = sg_dma_len(sg_src);
+		if (off < len) {
+			res_within_sg = len - off;
+			break;
+		}
+		off -= len;
+		sg_src = sg_next(sg_src);
+		if (sg_src)
+			res_within_sg = sg_dma_len(sg_src);
+	}
+	while (nbytes > 0 && sg_src) {
+		len = min(nbytes, res_within_sg);
+		nbytes -= len;
+		addr = sg_dma_address(sg_src) + off;
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)\n",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		if (nbytes) {
+			sg_src = sg_next(sg_src);
+			if (!sg_src) {
+				pr_err("more data bytes %d\n", nbytes);
+				return -ENOMEM;
+			}
+			res_within_sg = sg_dma_len(sg_src);
+			off = 0;
+		}
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	dma_addr_t  paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+	iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+	sps_bam_pipe->iovec_count++;
+	if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+		pr_err("Num of descrptor %d exceed max (%d)\n",
+			sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data;
+	uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	pce_sps_data->out_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	pce_sps_data->in_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	_qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+	if (pce_sps_data->in_transfer.iovec_count) {
+		rc = sps_transfer(pce_dev->ce_bam_info.consumer[op].pipe,
+					  &pce_sps_data->in_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (cons pipe=0x%lx) rc = %d\n",
+			(uintptr_t)pce_dev->ce_bam_info.consumer[op].pipe,
+				rc);
+			goto ret;
+		}
+	}
+	rc = sps_transfer(pce_dev->ce_bam_info.producer[op].pipe,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+			(uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc);
+ret:
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @index - Points to crypto use case
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				int index,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		 * For CE producer transfer, source should be
+		 * CE peripheral where as destination should
+		 * be system memory.
+		 */
+		sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index =
+				pce_dev->ce_bam_info.src_pipe_index[index];
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index =
+				pce_dev->ce_bam_info.dest_pipe_index[index];
+	/* Set pipe group */
+	sps_connect_info->lock_group =
+			pce_dev->ce_bam_info.pipe_pair_index[index];
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+					sizeof(struct sps_iovec);
+	if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+		sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL | __GFP_ZERO);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+	if (is_producer) {
+		sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+		sps_event->callback = _sps_producer_callback;
+		rc = sps_register_event(ep->pipe, sps_event);
+		if (rc) {
+			pr_err("Producer callback registration failed rc=%d\n",
+									rc);
+			goto sps_connect_err;
+		}
+	} else {
+		sps_event->options = SPS_O_EOT;
+		sps_event->callback = NULL;
+	}
+
+	pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+	struct bam_registration_info *pbam;
+
+	mutex_lock(&bam_register_lock);
+	pbam = pce_dev->pbam;
+	if (pbam == NULL)
+		goto ret;
+
+	pbam->cnt--;
+	if (pbam->cnt > 0)
+		goto ret;
+
+	if (pce_dev->ce_bam_info.bam_handle) {
+		sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+		pr_debug("deregister bam handle 0x%lx\n",
+					pce_dev->ce_bam_info.bam_handle);
+		pce_dev->ce_bam_info.bam_handle = 0;
+	}
+	iounmap(pbam->bam_iobase);
+	pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+	list_del(&pbam->qlist);
+	kfree(pbam);
+
+ret:
+	pce_dev->pbam = NULL;
+	mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	struct bam_registration_info *pbam = NULL;
+	struct bam_registration_info *p;
+	uint32_t bam_cfg = 0;
+
+
+	mutex_lock(&bam_register_lock);
+
+	list_for_each_entry(p, &qce50_bam_list, qlist) {
+		if (p->bam_mem == pce_dev->bam_mem) {
+			pbam = p;  /* found */
+			break;
+		}
+	}
+
+	if (pbam) {
+		pr_debug("found bam 0x%x\n", pbam->bam_mem);
+		pbam->cnt++;
+		pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+		pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+		pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+		pce_dev->pbam = pbam;
+		pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+		goto ret;
+	}
+
+	pbam = kzalloc(sizeof(struct  bam_registration_info), GFP_KERNEL);
+	if (!pbam) {
+		rc = -ENOMEM;
+		goto ret;
+	}
+	pbam->cnt = 1;
+	pbam->bam_mem = pce_dev->bam_mem;
+	pbam->bam_iobase = ioremap(pce_dev->bam_mem,
+					pce_dev->bam_mem_size);
+	if (!pbam->bam_iobase) {
+		kfree(pbam);
+		rc = -ENOMEM;
+		pr_err("Can not map BAM io memory\n");
+		goto ret;
+	}
+	pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+	pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+	pbam->handle = 0;
+	pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+	bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+					CRYPTO_BAM_CNFG_BITS_REG);
+	pbam->support_cmd_dscr =  (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+					true : false;
+	if (!pbam->support_cmd_dscr) {
+		pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+							bam_cfg);
+		pce_dev->no_get_around = false;
+	}
+	pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+	bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+	bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+	/*
+	 * This event threshold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+	/*
+	 * Set flag to indicate BAM global device control is managed
+	 * remotely.
+	 */
+	if (!pce_dev->support_cmd_dscr || pce_dev->is_shared)
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+	else
+		bam.manage = SPS_BAM_MGR_LOCAL;
+
+	bam.ee = pce_dev->ce_bam_info.bam_ee;
+	bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
+	bam.options |= SPS_BAM_CACHED_WP;
+	pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+	pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
+
+	/* Register CE Peripheral BAM device to SPS driver */
+	rc = sps_register_bam_device(&bam, &pbam->handle);
+	if (rc) {
+		pr_err("sps_register_bam_device() failed! err=%d\n", rc);
+		rc = -EIO;
+		iounmap(pbam->bam_iobase);
+		kfree(pbam);
+		goto ret;
+	}
+
+	pce_dev->pbam = pbam;
+	list_add_tail(&pbam->qlist, &qce50_bam_list);
+	pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+
+ret:
+	mutex_unlock(&bam_register_lock);
+
+	return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0, i = 0;
+
+	rc = qce_sps_get_bam(pce_dev);
+	if (rc)
+		return rc;
+	pr_debug("BAM device registered. bam_handle=0x%lx\n",
+		pce_dev->ce_bam_info.bam_handle);
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
+			continue;
+		else if ((i > 0) && !(pce_dev->offload_pipes_support))
+			break;
+		if (!pce_dev->ce_bam_info.pipe_pair_index[i])
+			continue;
+		rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.producer[i], i, true);
+		if (rc)
+			goto sps_connect_producer_err;
+		rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.consumer[i], i, false);
+		if (rc)
+			goto sps_connect_consumer_err;
+	}
+
+	pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_bam_info.bam_mem,
+		(unsigned int)pce_dev->ce_bam_info.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer[i]);
+sps_connect_producer_err:
+	qce_sps_release_bam(pce_dev);
+	return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+	int i;
+	int request_index = pce_dev->ce_request_index;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		request_index++;
+		if (request_index >= MAX_QCE_BAM_REQ)
+			request_index = 0;
+		if (!atomic_xchg(
+			&pce_dev->ce_request_info[request_index].in_use,
+								true)) {
+			pce_dev->ce_request_index = request_index;
+			return request_index;
+		}
+	}
+	pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+			pce_dev->dev_no, atomic_read(
+					&pce_dev->no_of_queued_req));
+	return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete)
+{
+	pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+	if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+						false)) {
+		if (req_info < MAX_QCE_BAM_REQ && is_complete)
+			atomic_dec(&pce_dev->no_of_queued_req);
+	} else
+		pr_warn("request info %d free already\n", req_info);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+	phys_addr_t addr =
+		DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+				  notify->data.transfer.iovec.addr);
+	pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
+			notify->event_id, &addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags,
+			notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+	struct ce_request_info *preq_info;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	switch (preq_info->xfer_type) {
+	case QCE_XFER_CIPHERING:
+		_ablk_cipher_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_HASHING:
+		_sha_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_AEAD:
+		_aead_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F8:
+		_f8_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F9:
+		_f9_complete(pce_dev, req_info);
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, true);
+		break;
+	}
+}
+
+static void qce_multireq_timeout(struct timer_list *data)
+{
+	struct qce_device *pce_dev = from_timer(pce_dev, data, timer);
+	int ret = 0;
+	int last_seq;
+	unsigned long flags;
+
+	last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+	if (last_seq == 0 ||
+		last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+		atomic_set(&pce_dev->last_intr_seq, last_seq);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+	/* last bunch mode command time out */
+
+	/*
+	 * From here to dummy request finish sps request and set owner back
+	 * to none, we disable interrupt.
+	 * So it won't get preempted or interrupted. If bam inerrupts happen
+	 * between, and completion callback gets called from BAM, a new
+	 * request may be issued by the client driver.  Deadlock may happen.
+	 */
+	local_irq_save(flags);
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
+							!= QCE_OWNER_NONE) {
+		local_irq_restore(flags);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+
+	ret = qce_dummy_req(pce_dev);
+	if (ret)
+		pr_warn("pcedev %d: Failed to insert dummy req\n",
+				pce_dev->dev_no);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	local_irq_restore(flags);
+
+	del_timer(&(pce_dev->timer));
+	pce_dev->qce_stats.no_of_timeouts++;
+	pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (!_qce50_disp_stats)
+		return;
+	pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_timeouts);
+	pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_dummy_reqs);
+	if (pce_dev->mode)
+		pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+	else
+		pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+	pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+			atomic_read(&pce_dev->no_of_queued_req));
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	pce_dev->qce_stats.no_of_timeouts = 0;
+	pce_dev->qce_stats.no_of_dummy_reqs = 0;
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+	int rc = 0;
+	unsigned int req_info;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+	uint16_t op;
+
+	print_notify_debug(notify);
+
+	req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+	if ((req_info & 0xffff0000)  != CRYPTO_REQ_USER_PAT) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	req_info = req_info & 0x00ff;
+	if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	if (!atomic_read(&preq_info->in_use)) {
+		pr_err("request information %d already done\n", req_info);
+		return;
+	}
+	op = pce_dev->ce_request_info[req_info].offload_op;
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+		preq_info->xfer_type == QCE_XFER_AEAD) &&
+			pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		if (!is_offload_op(op) && (op < QCE_OFFLOAD_OPER_LAST)) {
+			pce_sps_data->out_transfer.iovec_count = 0;
+			_qce_sps_add_data(GET_PHYS_ADDR(
+					pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+			_qce_set_flag(&pce_sps_data->out_transfer,
+				SPS_IOVEC_FLAG_INT);
+			rc = sps_transfer(
+					pce_dev->ce_bam_info.producer[op].pipe,
+					&pce_sps_data->out_transfer);
+			if (rc) {
+				pr_err("sps_xfr fail (prod pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.producer[op].pipe,
+				rc);
+			}
+		}
+		return;
+	}
+
+	_qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	int i = 0;
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
+			continue;
+		else if ((i > 0) && !(pce_dev->offload_pipes_support))
+			break;
+		if (!pce_dev->ce_bam_info.pipe_pair_index[i])
+			continue;
+		qce_sps_exit_ep_conn(pce_dev,
+				&pce_dev->ce_bam_info.consumer[i]);
+		qce_sps_exit_ep_conn(pce_dev,
+				&pce_dev->ce_bam_info.producer[i]);
+	}
+	qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->command = 0;
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	(*cmd_ptr)->reserved = 0;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+		pr_err("Unknown mode of operation %d received, exiting now\n",
+			mode);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS2_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS3_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS4_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS5_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS6_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+			pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+			pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+			pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+			pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_DATA_PATT_PROC_CFG_REG, 0,
+						&pcl_info->pattern_info);
+	qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG, 0,
+				&pcl_info->block_offset);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_XTS_KEY0_REG +
+						i * sizeof(uint32_t)), 0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+					&pcl_info->encr_xts_du_size);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_des_ecb;
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->cipher_null);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+			pdev->ce_bam_info.ce_burst_size, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+			pdev->reg.encr_cfg_aes_ecb_128, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+			 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = pdev->reg.auth_cfg_sha1;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = pdev->reg.auth_cfg_sha256;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+		key_reg = 16;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+		key_reg = 16;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_128;
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_256;
+			key_reg = 8;
+		}
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				int cri_index,
+				unsigned char **pvaddr,
+				uint32_t alg,
+				uint32_t mode,
+				uint32_t key_size,
+				bool     sha1)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmd;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	uint32_t key_reg;
+	uint32_t iv_reg;
+	uint32_t i;
+	uint32_t  enciv_in_word;
+	uint32_t encr_cfg;
+
+	cmd = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	switch (alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmd->aead_hmac_sha1_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+				&(cmd->aead_hmac_sha1_cbc_des);
+			} else {
+				cmd->aead_hmac_sha256_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+				&(cmd->aead_hmac_sha256_cbc_des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_3DES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmd->aead_hmac_sha1_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+				&(cmd->aead_hmac_sha1_cbc_3des);
+			} else {
+				cmd->aead_hmac_sha256_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+				&(cmd->aead_hmac_sha256_cbc_3des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+		if (key_size ==  AES128_KEY_SIZE) {
+			if (sha1) {
+				cmd->aead_hmac_sha1_cbc_aes_128.cmdlist	=
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+					&(cmd->aead_hmac_sha1_cbc_aes_128);
+			} else {
+				cmd->aead_hmac_sha256_cbc_aes_128.cmdlist
+					= (uintptr_t)ce_vaddr;
+				pcl_info =
+					&(cmd->aead_hmac_sha256_cbc_aes_128);
+			}
+			encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+		} else if (key_size ==  AES256_KEY_SIZE) {
+			if (sha1) {
+				cmd->aead_hmac_sha1_cbc_aes_256.cmdlist	=
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+					&(cmd->aead_hmac_sha1_cbc_aes_256);
+			} else {
+				cmd->aead_hmac_sha256_cbc_aes_256.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info =
+				&(cmd->aead_hmac_sha256_cbc_aes_256);
+			}
+			encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+		} else {
+			return -EINVAL;
+		}
+		break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 4;
+
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+	key_reg = key_size/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+			&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	if (mode != QCE_MODE_ECB) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+			&pcl_info->encr_cntr_iv);
+		for (i = 1; i < enciv_in_word; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+
+	if (sha1)
+		iv_reg = 5;
+	else
+		iv_reg = 8;
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+				&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+				0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+			 &pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+			&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+			&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			&pcl_info->encr_seg_start);
+
+	if (sha1)
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha1_hmac,
+			&pcl_info->auth_seg_cfg);
+	else
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha256_hmac,
+			&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+			&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+			&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+		key_reg = 8;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+			pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+			pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+			pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+			pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 4;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to f8 cipher algorithm defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_kasumi);
+		encr_cfg = pdev->reg.encr_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_snow3g);
+		encr_cfg = pdev->reg.encr_cfg_snow3g;
+		break;
+	}
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						 &pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_kasumi);
+		auth_cfg = pdev->reg.auth_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_snow3g);
+		auth_cfg = pdev->reg.auth_cfg_snow3g;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	iv_reg = 5;
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+					pdev->ce_bam_info.ce_burst_size);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								false);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								true);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								false);
+
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+	_setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+	int i;
+	unsigned char *iovec_vaddr;
+	int iovec_memsize;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+					pce_dev->ce_bam_info.ce_burst_size);
+	iovec_vaddr = pce_dev->iovec_vmem;
+	iovec_memsize = pce_dev->iovec_memsize;
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+		/* Allow for 256 descriptor (cmd and data) entries per pipe */
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+			virt_to_phys(
+			pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+			virt_to_phys(
+			pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		if (pce_dev->support_cmd_dscr)
+			qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+		vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+				pce_dev->ce_bam_info.ce_burst_size);
+		pce_dev->ce_request_info[i].ce_sps.result_dump =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result_null =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+				(uintptr_t)vaddr;
+		vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+	}
+	if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+							iovec_memsize < 0)
+		panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+				 pce_dev->memsize, (uintptr_t)vaddr -
+				(uintptr_t)pce_dev->coh_vmem);
+	return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+	uint32_t pipe_pair =
+		pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE];
+
+	pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair);
+
+	pce_dev->reg.crypto_cfg_le =
+		(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+	/* Initialize encr_cfg register for AES alg */
+	pce_dev->reg.encr_cfg_aes_cbc_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_cbc_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ccm_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	pce_dev->reg.encr_cfg_aes_ccm_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	/* Initialize encr_cfg register for DES alg */
+	pce_dev->reg.encr_cfg_des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	/* Initialize encr_cfg register for kasumi/snow3g  alg */
+	pce_dev->reg.encr_cfg_kasumi =
+		(CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+	pce_dev->reg.encr_cfg_snow3g =
+		(CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+	/* Initialize auth_cfg register for CMAC alg */
+	pce_dev->reg.auth_cfg_cmac_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+	pce_dev->reg.auth_cfg_cmac_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+	/* Initialize auth_cfg register for HMAC alg */
+	pce_dev->reg.auth_cfg_hmac_sha1 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_hmac_sha256 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for SHA1/256 alg */
+	pce_dev->reg.auth_cfg_sha1 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_sha256 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for AEAD alg */
+	pce_dev->reg.auth_cfg_aead_sha1_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aead_sha256_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aes_ccm_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	pce_dev->reg.auth_cfg_aes_ccm_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	/* Initialize auth_cfg register for kasumi/snow3g */
+	pce_dev->reg.auth_cfg_kasumi =
+			(CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	pce_dev->reg.auth_cfg_snow3g =
+			(CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+
+	/* Initialize IV counter mask values */
+	pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
+	pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
+	pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
+	pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
+
+	return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct qce_cmdlist_info *cmdlistinfo;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+		_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+				&pce_sps_data->in_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+	}
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+	}
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	if (!cookie)
+		pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+	int ret = 0;
+
+	if (atomic_xchg(
+		&pce_dev->ce_request_info[DUMMY_REQ_INDEX].in_use, true))
+		return -EBUSY;
+	ret = qce_process_sha_req(pce_dev, NULL);
+	pce_dev->qce_stats.no_of_dummy_reqs++;
+	return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+		struct ce_request_info *preq_info)
+{
+	struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+	unsigned int no_of_queued_req;
+	unsigned int cadence;
+
+	if (!pce_dev->no_get_around) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		return 0;
+	}
+
+	/*
+	 * claim ownership of device
+	 */
+again:
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
+							!= QCE_OWNER_NONE) {
+		ndelay(40);
+		goto again;
+	}
+	no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
+	if (pce_dev->mode == IN_INTERRUPT_MODE) {
+		if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+			pce_dev->mode = IN_BUNCH_MODE;
+			pr_debug("pcedev %d mode switch to BUNCH\n",
+					pce_dev->dev_no);
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 1);
+			atomic_set(&pce_dev->last_intr_seq, 1);
+			mod_timer(&(pce_dev->timer),
+					(jiffies + DELAY_IN_JIFFIES));
+		} else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+		}
+	} else {
+		pce_dev->intr_cadence++;
+		cadence = (preq_info->req_len >> 7) + 1;
+		if (cadence > SET_INTR_AT_REQ)
+			cadence = SET_INTR_AT_REQ;
+		if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
+					== cadence) && pce_dev->cadence_flag))
+			atomic_inc(&pce_dev->bunch_cmd_seq);
+		else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 0);
+			atomic_set(&pce_dev->last_intr_seq, 0);
+			pce_dev->cadence_flag = !pce_dev->cadence_flag;
+		}
+	}
+
+	return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	q_req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+	totallen_in = areq->cryptlen + q_req->assoclen;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		out_len = areq->cryptlen + authsize;
+		hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * data stream to be ciphered.
+	 * The destination scatter list is pointing to the same
+	 * data area as source.
+	 */
+	if (pce_dev->ce_bam_info.minor_version == 0)
+		preq_info->src_nents = count_sg(areq->src, totallen_in);
+	else
+		preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+							areq->assoclen);
+
+	if (q_req->assoclen) {
+		preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
+
+		/* formatted associated data input */
+		qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+		preq_info->asg = q_req->asg;
+	} else {
+		preq_info->assoc_nents = 0;
+		preq_info->asg = NULL;
+	}
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		/*
+		 * The destination scatter list is pointing to the same
+		 * data area as src.
+		 * Note, the associated data will be pass-through
+		 * at the beginning of destination area.
+		 */
+		preq_info->dst_nents = count_sg(areq->dst,
+						out_len + areq->assoclen);
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+								 q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						q_req->alg, q_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+					q_req->assoclen, cmdlistinfo);
+	} else {
+		/* set up crypto device */
+		rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+					q_req->assoclen);
+	}
+
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = q_req->mode;
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen_in;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr && cmdlistinfo) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		goto bad;
+	} else {
+		if (q_req->assoclen) {
+			rc = _qce_sps_add_sg_data(pce_dev, q_req->asg,
+				q_req->assoclen, &pce_sps_data->in_transfer);
+			if (rc)
+				goto bad;
+		}
+		rc = _qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+					areq->assoclen,
+					&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		_qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+		if (pce_dev->no_get_around) {
+			rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+			if (rc)
+				goto bad;
+		}
+
+		/* Pass through to ignore associated  data*/
+		rc = _qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				q_req->assoclen,
+				&pce_sps_data->out_transfer);
+		if (rc)
+			goto bad;
+		rc = _qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+					areq->assoclen,
+					&pce_sps_data->out_transfer);
+		if (rc)
+			goto bad;
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		rc = _qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				hw_pad_out, &pce_sps_data->out_transfer);
+		if (rc)
+			goto bad;
+		if (pce_dev->no_get_around ||
+				totallen_in <= SPS_MAX_PKT_SIZE) {
+			rc = _qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+			if (rc)
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+
+		_qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->assoc_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
+				preq_info->assoc_nents, DMA_TO_DEVICE);
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	int i = 0;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
+			continue;
+		else if ((i > 0) && !(pce_dev->offload_pipes_support))
+			break;
+		if (!pce_dev->ce_bam_info.pipe_pair_index[i])
+			continue;
+		sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
+		sps_disconnect(sps_pipe_info);
+
+		sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
+		sps_disconnect(sps_pipe_info);
+	}
+
+	return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info;
+	int rc, i;
+
+	rc = -ENODEV;
+	if (handle == NULL)
+		return rc;
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
+			continue;
+		else if ((i > 0) && !(pce_dev->offload_pipes_support))
+			break;
+		if (!pce_dev->ce_bam_info.pipe_pair_index[i])
+			continue;
+		sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
+		sps_connect_info = &pce_dev->ce_bam_info.consumer[i].connect;
+		memset(sps_connect_info->desc.base, 0x00,
+					sps_connect_info->desc.size);
+		rc = sps_connect(sps_pipe_info, sps_connect_info);
+		if (rc) {
+			pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+			return rc;
+		}
+		sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
+		sps_connect_info = &pce_dev->ce_bam_info.producer[i].connect;
+		memset(sps_connect_info->desc.base, 0x00,
+					sps_connect_info->desc.size);
+		rc = sps_connect(sps_pipe_info, sps_connect_info);
+		if (rc)
+			pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+
+		rc = sps_register_event(sps_pipe_info,
+				&pce_dev->ce_bam_info.producer[i].event);
+		if (rc)
+			pr_err("Producer cb registration failed rc = %d\n",
+								rc);
+	}
+	qce_enable_clock_gating(pce_dev);
+
+	return rc;
+}
+
+struct qce_pm_table qce_pm_table  = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct aead_request *areq;
+	uint32_t authsize;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t totallen;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	if (q_req->mode == QCE_MODE_CCM)
+		return _qce_aead_ccm_req(handle, q_req);
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	q_req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	areq = (struct aead_request *) q_req->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+	q_req->ivsize = ivsize;
+	authsize = q_req->authsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+		pr_err("Integer overflow on total aead req length.\n");
+		return -EINVAL;
+	}
+
+	totallen = q_req->cryptlen + areq->assoclen;
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+							req_info, q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+				q_req->alg, q_req->mode, q_req->encklen,
+					q_req->authsize);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_aead(pce_dev, q_req, totallen,
+					areq->assoclen, cmdlistinfo);
+		if (rc < 0) {
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * iv, and data stream to be ciphered.
+	 */
+	preq_info->src_nents = count_sg(areq->src, totallen);
+
+
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output  for encryption    */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, totallen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+	preq_info->asg = NULL;
+	preq_info->offload_op = QCE_OFFLOAD_NONE;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	} else {
+		rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+					areq->assoclen);
+		if (rc)
+			goto bad;
+	}
+
+	preq_info->mode = q_req->mode;
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+				&pce_sps_data->out_transfer);
+		if (rc)
+			goto bad;
+		if (totallen > SPS_MAX_PKT_SIZE) {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		} else {
+			rc = _qce_sps_add_data(GET_PHYS_ADDR(
+					pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_sps_data->out_transfer);
+			if (rc)
+				goto bad;
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		}
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (pce_dev->no_get_around) {
+			rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+			if (rc)
+				goto bad;
+		}
+
+		rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+					&pce_sps_data->out_transfer);
+		if (rc)
+			goto bad;
+
+		if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+			rc = _qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+			if (rc)
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->src_nents)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	if (areq->src != areq->dst)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct skcipher_request *areq = (struct skcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	c_req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = 0;
+	preq_info->dst_nents = 0;
+
+	/* cipher input */
+	preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
+
+	if (!is_offload_op(c_req->offload_op))
+		qce_dma_map_sg(pce_dev->pdev, areq->src,
+			preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen);
+		if (!is_offload_op(c_req->offload_op))
+			qce_dma_map_sg(pce_dev->pdev, areq->dst,
+				preq_info->dst_nents, DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+	preq_info->dir = c_req->dir;
+	if  ((pce_dev->ce_bam_info.minor_version == 0) &&
+			(preq_info->dir == QCE_DECRYPT) &&
+			(c_req->mode == QCE_MODE_CBC)) {
+		memcpy(preq_info->dec_iv, (unsigned char *)
+			sg_virt(areq->src) + areq->src->length - 16,
+			NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+	}
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+							req_info, c_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						c_req->alg, c_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_cipher(pce_dev, c_req, areq->cryptlen, 0,
+							cmdlistinfo);
+	} else {
+		rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->cryptlen, 0);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = c_req->mode;
+	preq_info->offload_op = c_req->offload_op;
+
+	/* setup for client callback, and issue command to BAM */
+	preq_info->areq = areq;
+	preq_info->qce_cb = c_req->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_CIPHERING;
+	preq_info->req_len = areq->cryptlen;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr && cmdlistinfo) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+	rc = _qce_sps_add_data(areq->src->dma_address, areq->cryptlen,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	if (pce_dev->no_get_around) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+
+	rc = _qce_sps_add_data(areq->dst->dma_address, areq->cryptlen,
+					&pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+	if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		if (!is_offload_op(c_req->offload_op)) {
+			rc = _qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+				CRYPTO_RESULT_DUMP_SIZE,
+				&pce_sps_data->out_transfer);
+			if (rc)
+				goto bad;
+		}
+	} else {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+	}
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	if (!is_offload_op(c_req->offload_op)) {
+		if (areq->src != areq->dst)
+			if (preq_info->dst_nents)
+				qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+				preq_info->dst_nents, DMA_FROM_DEVICE);
+
+		if (preq_info->src_nents)
+			qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+				preq_info->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+	bool is_dummy = false;
+
+	if (!sreq) {
+		sreq = &(pce_dev->dummyreq.sreq);
+		req_info = DUMMY_REQ_INDEX;
+		is_dummy = true;
+	} else {
+		req_info = qce_alloc_req_info(pce_dev);
+		if (req_info < 0)
+			return -EBUSY;
+	}
+
+	sreq->current_req_info = req_info;
+	areq = (struct ahash_request *)sreq->areq;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = count_sg(sreq->src, sreq->size);
+	qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+							DMA_TO_DEVICE);
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	} else {
+		rc = _ce_setup_hash_direct(pce_dev, sreq);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->areq = areq;
+	preq_info->qce_cb = sreq->qce_cb;
+	preq_info->offload_op = QCE_OFFLOAD_NONE;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_HASHING;
+	preq_info->req_len = sreq->size;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr && cmdlistinfo) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+	rc = _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (!areq->nbytes) {
+		rc = _qce_sps_add_data(
+			GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+	_qce_set_flag(&pce_sps_data->in_transfer,
+					SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+	if (pce_dev->no_get_around) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+
+	rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	if (is_dummy) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+				preq_info->src_nents, DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* don't support key stream mode */
+
+	if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+	preq_info->ota_size = req->data_len;
+
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+	preq_info->offload_op = QCE_OFFLOAD_NONE;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = req->data_len;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+
+	rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+
+	_qce_set_flag(&pce_sps_data->in_transfer,
+			SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+
+	rc = _qce_sps_add_data((uint32_t)dst, req->data_len,
+					&pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	dma_addr_t dst = 0;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	total = num_pkt *  req->data_len;
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+
+	preq_info->ota_size = total;
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+			cipher_start, cipher_size);
+	if (rc)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+	preq_info->offload_op = QCE_OFFLOAD_NONE;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = total;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		goto bad;
+	}
+
+	rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+
+	rc = _qce_sps_add_data((uint32_t)dst, total,
+					&pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	req->current_req_info = req_info;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	preq_info->ota_size = req->msize;
+
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+	else
+		rc = _ce_f9_setup_direct(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+	preq_info->offload_op = QCE_OFFLOAD_NONE;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F9;
+	preq_info->req_len = req->msize;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr) {
+		rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
+				cmdlistinfo, &pce_sps_data->in_transfer);
+		if (rc)
+			goto bad;
+	}
+	rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+	if (rc)
+		goto bad;
+
+	rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		goto bad;
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0, i = 0;
+
+	pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-shared");
+	pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-key");
+
+	pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-cbc-ecb-ctr-algo");
+	pce_dev->use_sw_aead_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aead-algo");
+	pce_dev->use_sw_aes_xts_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-xts-algo");
+	pce_dev->use_sw_ahash_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-ahash-algo");
+	pce_dev->use_sw_hmac_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-hmac-algo");
+	pce_dev->use_sw_aes_ccm_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-ccm-algo");
+	pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+	pce_dev->support_only_core_src_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+	pce_dev->request_bw_before_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,request-bw-before-clk");
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++)
+		pce_dev->ce_bam_info.pipe_pair_index[i] = 0;
+
+	pce_dev->kernel_pipes_support = true;
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE])) {
+		pr_warn("Kernel pipes not supported.\n");
+		//Unused pipe, just as failsafe.
+		pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE] = 2;
+		pce_dev->kernel_pipes_support = false;
+	}
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+					"qcom,offload-ops-support")) {
+		pce_dev->offload_pipes_support = true;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,bam-pipe-offload-cpb-hlos",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS])) {
+			pr_err("Fail to get bam offload cpb-hlos pipe pair info.\n");
+			return -EINVAL;
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,bam-pipe-offload-hlos-hlos",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS])) {
+			pr_err("Fail to get bam offload hlos-hlos info.\n");
+			return -EINVAL;
+		}
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,bam-pipe-offload-hlos-hlos-1",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS_1])) {
+			pr_info("No bam offload hlos-hlos-1 info.\n");
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,bam-pipe-offload-hlos-cpb",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB])) {
+			pr_err("Fail to get bam offload hlos-cpb info\n");
+			return -EINVAL;
+		}
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,bam-pipe-offload-hlos-cpb-1",
+		&pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB_1])) {
+			pr_info("No bam offload hlos-cpb-1 info\n");
+		}
+	}
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-device",
+				&pce_dev->ce_bam_info.ce_device)) {
+		pr_err("Fail to get CE device information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-hw-instance",
+				&pce_dev->ce_bam_info.ce_hw_instance)) {
+		pr_err("Fail to get CE hw instance information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-ee",
+				&pce_dev->ce_bam_info.bam_ee)) {
+		pr_info("BAM Apps EE is not defined, setting to default 1\n");
+		pce_dev->ce_bam_info.bam_ee = 1;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&pce_dev->ce_opp_freq_hz)) {
+		pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+	}
+
+	if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable"))
+		pce_dev->enable_s1_smmu = true;
+
+	pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node,
+					"qcom,no-clock-support");
+
+	for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
+		/* Source/destination pipes for all usecases */
+		pce_dev->ce_bam_info.dest_pipe_index[i]	=
+			2 * pce_dev->ce_bam_info.pipe_pair_index[i];
+		pce_dev->ce_bam_info.src_pipe_index[i]	=
+			pce_dev->ce_bam_info.dest_pipe_index[i] + 1;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->bam_mem = resource->start;
+		pce_dev->bam_mem_size = resource_size(resource);
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+
+	pce_dev->ce_bam_info.bam_irq = platform_get_irq(pdev,0);
+	if (pce_dev->ce_bam_info.bam_irq < 0) {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_bam_info.bam_iobase)
+		iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	if (pce_dev->no_clock_support) {
+		pr_debug("No clock support defined in dts\n");
+		return rc;
+	}
+
+	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		if (pce_dev->request_bw_before_clk)
+			goto skip_set_rate;
+
+		rc = clk_set_rate(pce_dev->ce_core_src_clk,
+						pce_dev->ce_opp_freq_hz);
+		if (rc) {
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+					pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+			goto exit_put_core_src_clk;
+		}
+	} else {
+		if (pce_dev->support_only_core_src_clk) {
+			rc = PTR_ERR(pce_dev->ce_core_src_clk);
+			pce_dev->ce_core_src_clk = NULL;
+			pr_err("Unable to get CE core src clk\n");
+			return rc;
+		}
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		pce_dev->ce_core_src_clk = NULL;
+	}
+
+skip_set_rate:
+	if (pce_dev->support_only_core_src_clk) {
+		pce_dev->ce_core_clk = NULL;
+		pce_dev->ce_clk = NULL;
+		pce_dev->ce_bus_clk = NULL;
+	} else {
+		pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+		if (IS_ERR(pce_dev->ce_core_clk)) {
+			rc = PTR_ERR(pce_dev->ce_core_clk);
+			pr_err("Unable to get CE core clk\n");
+			goto exit_put_core_src_clk;
+		}
+		pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+		if (IS_ERR(pce_dev->ce_clk)) {
+			rc = PTR_ERR(pce_dev->ce_clk);
+			pr_err("Unable to get CE interface clk\n");
+			goto exit_put_core_clk;
+		}
+
+		pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+		if (IS_ERR(pce_dev->ce_bus_clk)) {
+			rc = PTR_ERR(pce_dev->ce_bus_clk);
+			pr_err("Unable to get CE BUS interface clk\n");
+			goto exit_put_iface_clk;
+		}
+	}
+	return rc;
+
+exit_put_iface_clk:
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+	pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+	if (pce_dev->no_clock_support) {
+		pr_debug("No clock support defined in dts\n");
+		return;
+	}
+
+	if (pce_dev->ce_bus_clk)
+		clk_put(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	int rc = 0;
+
+	if (pce_dev->no_clock_support) {
+		pr_debug("No clock support defined in dts\n");
+		return rc;
+	}
+
+	if (pce_dev->ce_core_src_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core src clk\n");
+			return rc;
+		}
+	}
+
+	if (pce_dev->support_only_core_src_clk)
+		return rc;
+
+	if (pce_dev->ce_core_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto exit_disable_core_src_clk;
+		}
+	}
+
+	if (pce_dev->ce_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto exit_disable_core_clk;
+		}
+	}
+
+	if (pce_dev->ce_bus_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE BUS clk\n");
+			goto exit_disable_ce_clk;
+		}
+	}
+	return rc;
+
+exit_disable_ce_clk:
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+	return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (pce_dev->no_clock_support) {
+		pr_debug("No clock support defined in dts\n");
+		return 0;
+	}
+
+	if (pce_dev->ce_bus_clk)
+		clk_disable_unprepare(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+	char *input =
+	"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+	int len = DUMMY_REQ_DATA_LEN;
+
+	memcpy(pce_dev->dummyreq_in_buf, input, len);
+	sg_init_one(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
+
+	pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+	pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+	pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+	pce_dev->dummyreq.sreq.auth_data[0] = 0;
+	pce_dev->dummyreq.sreq.auth_data[1] = 0;
+	pce_dev->dummyreq.sreq.auth_data[2] = 0;
+	pce_dev->dummyreq.sreq.auth_data[3] = 0;
+	pce_dev->dummyreq.sreq.first_blk = true;
+	pce_dev->dummyreq.sreq.last_blk = true;
+	pce_dev->dummyreq.sreq.size = len;
+	pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+	pce_dev->dummyreq.sreq.flags = 0;
+	pce_dev->dummyreq.sreq.authkey = NULL;
+
+	pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+	pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+	return 0;
+}
+
+static int qce_smmu_init(struct qce_device *pce_dev)
+{
+	struct device *dev = pce_dev->pdev;
+
+	if (!dev->dma_parms) {
+		dev->dma_parms = devm_kzalloc(dev,
+			sizeof(*dev->dma_parms), GFP_KERNEL);
+		if (!dev->dma_parms)
+			return -ENOMEM;
+	}
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
+	return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	int i;
+	static int pcedev_no = 1;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	mutex_lock(&qce_iomap_mutex);
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	if (pce_dev->enable_s1_smmu) {
+		if (qce_smmu_init(pce_dev)) {
+			*rc = -EIO;
+			goto err_pce_dev;
+		}
+	}
+
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+		atomic_set(&pce_dev->ce_request_info[i].in_use, false);
+	pce_dev->ce_request_index = 0;
+
+	pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+						MAX_QCE_ALLOC_BAM_REQ * 2;
+	pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+	if (pce_dev->iovec_vmem == NULL)
+		goto err_mem;
+
+	pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
+	if (pce_dev->dummyreq_in_buf == NULL)
+		goto err_mem;
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+	*rc = qce_enable_clk(pce_dev);
+	if (*rc)
+		goto err_enable_clk;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+
+	qce_init_ce_cfg_val(pce_dev);
+	*rc  = qce_sps_init(pce_dev);
+	if (*rc)
+		goto err;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_disable_clk(pce_dev);
+	setup_dummy_req(pce_dev);
+	atomic_set(&pce_dev->no_of_queued_req, 0);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	timer_setup(&(pce_dev->timer), qce_multireq_timeout, 0);
+	//pce_dev->timer.function = qce_multireq_timeout;
+	//pce_dev->timer.data = (unsigned long)pce_dev;
+	pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+	pce_dev->intr_cadence = 0;
+	pce_dev->dev_no = pcedev_no;
+	pcedev_no++;
+	pce_dev->owner = QCE_OWNER_NONE;
+	qce_enable_clock_gating(pce_dev);
+	mutex_unlock(&qce_iomap_mutex);
+	return pce_dev;
+err:
+	qce_disable_clk(pce_dev);
+
+err_enable_clk:
+	__qce_deinit_clk(pce_dev);
+
+err_mem:
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	mutex_lock(&qce_iomap_mutex);
+	qce_enable_clk(pce_dev);
+	qce_sps_exit(pce_dev);
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+
+	qce_disable_clk(pce_dev);
+	__qce_deinit_clk(pce_dev);
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+				1 << CRYPTO_ENCR_KASUMI_SEL |\
+				1 << CRYPTO_AUTH_SNOW3G_SEL |\
+				1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+		ce_support->ota = true;
+	else
+		ce_support->ota = false;
+	ce_support->bam = true;
+	ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+	ce_support->hw_key = pce_dev->support_hw_key;
+	ce_support->aes_ccm = true;
+	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
+	if (pce_dev->ce_bam_info.minor_version)
+		ce_support->aligned_only = false;
+	else
+		ce_support->aligned_only = true;
+
+	ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+				pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+	ce_support->use_sw_aead_algo =
+				pce_dev->use_sw_aead_algo;
+	ce_support->use_sw_aes_xts_algo =
+				pce_dev->use_sw_aes_xts_algo;
+	ce_support->use_sw_ahash_algo =
+				pce_dev->use_sw_ahash_algo;
+	ce_support->use_sw_hmac_algo =
+				pce_dev->use_sw_hmac_algo;
+	ce_support->use_sw_aes_ccm_algo =
+				pce_dev->use_sw_aes_ccm_algo;
+	ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+	ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+	if (pce_dev->no_get_around)
+		ce_support->max_request = MAX_QCE_BAM_REQ;
+	else
+		ce_support->max_request = 1;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+	int i;
+	bool req_in_use;
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+		pr_info("%s: %d %d\n", __func__, i, req_in_use);
+		if (req_in_use)
+			_qce_dump_descr_fifos(pce_dev, i);
+	}
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");

+ 256 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qce50.h

@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include "linux/msm-sps.h"
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x)  \
+		((uintptr_t)pce_dev->coh_vmem +			\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem +	\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (32 * 1024  - 64)
+
+/* default bam ipc log level */
+#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+	QCE_XFER_HASHING,
+	QCE_XFER_CIPHERING,
+	QCE_XFER_AEAD,
+	QCE_XFER_F8,
+	QCE_XFER_F9,
+	QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	__be32 status;
+	__be32 status2;
+};
+
+struct qce_cmdlist_info {
+
+	unsigned long cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *crypto_cfg_le;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask_0;
+	struct sps_command_element *encr_mask_1;
+	struct sps_command_element *encr_mask_2;
+	struct sps_command_element *encr_mask_3;
+	struct sps_command_element *encr_xts_du_size;
+	struct sps_command_element *pattern_info;
+	struct sps_command_element *block_offset;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info cipher_null;
+	struct qce_cmdlist_info f8_kasumi;
+	struct qce_cmdlist_info f8_snow3g;
+	struct qce_cmdlist_info f9_kasumi;
+	struct qce_cmdlist_info f9_snow3g;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+	uint32_t crypto_cfg_be;
+	uint32_t crypto_cfg_le;
+
+	uint32_t encr_cfg_aes_cbc_128;
+	uint32_t encr_cfg_aes_cbc_256;
+
+	uint32_t encr_cfg_aes_ecb_128;
+	uint32_t encr_cfg_aes_ecb_256;
+
+	uint32_t encr_cfg_aes_xts_128;
+	uint32_t encr_cfg_aes_xts_256;
+
+	uint32_t encr_cfg_aes_ctr_128;
+	uint32_t encr_cfg_aes_ctr_256;
+
+	uint32_t encr_cfg_aes_ccm_128;
+	uint32_t encr_cfg_aes_ccm_256;
+
+	uint32_t encr_cfg_des_cbc;
+	uint32_t encr_cfg_des_ecb;
+
+	uint32_t encr_cfg_3des_cbc;
+	uint32_t encr_cfg_3des_ecb;
+	uint32_t encr_cfg_kasumi;
+	uint32_t encr_cfg_snow3g;
+
+	uint32_t auth_cfg_cmac_128;
+	uint32_t auth_cfg_cmac_256;
+
+	uint32_t auth_cfg_sha1;
+	uint32_t auth_cfg_sha256;
+
+	uint32_t auth_cfg_hmac_sha1;
+	uint32_t auth_cfg_hmac_sha256;
+
+	uint32_t auth_cfg_aes_ccm_128;
+	uint32_t auth_cfg_aes_ccm_256;
+	uint32_t auth_cfg_aead_sha1_hmac;
+	uint32_t auth_cfg_aead_sha256_hmac;
+	uint32_t auth_cfg_kasumi;
+	uint32_t auth_cfg_snow3g;
+
+	/* iv0 - bits 127:96 - CRYPTO_CNTR_MASK_REG0*/
+	uint32_t encr_cntr_mask_0;
+	/* iv1 - bits 95:64 - CRYPTO_CNTR_MASK_REG1*/
+	uint32_t encr_cntr_mask_1;
+	/* iv2 - bits 63:32 - CRYPTO_CNTR_MASK_REG2*/
+	uint32_t encr_cntr_mask_2;
+	/* iv3 - bits 31:0 - CRYPTO_CNTR_MASK_REG*/
+	uint32_t encr_cntr_mask_3;
+};
+
+struct ce_bam_info {
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+	uint32_t			ce_device;
+	uint32_t			ce_hw_instance;
+	uint32_t			bam_ee;
+	unsigned int			pipe_pair_index[QCE_OFFLOAD_OPER_LAST];
+	unsigned int			src_pipe_index[QCE_OFFLOAD_OPER_LAST];
+	unsigned int			dest_pipe_index[QCE_OFFLOAD_OPER_LAST];
+	unsigned long			bam_handle;
+	int				ce_burst_size;
+	uint32_t			minor_version;
+	uint32_t			major_version;
+	struct qce_sps_ep_conn_data	producer[QCE_OFFLOAD_OPER_LAST];
+	struct qce_sps_ep_conn_data	consumer[QCE_OFFLOAD_OPER_LAST];
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump; /* reuslt dump virtual address */
+	uint32_t result_dump_null;
+	uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+	uint32_t result_dump_null_phy;
+
+	uint32_t ignore_buffer; /* ignore buffer virtual address */
+	struct ce_result_dump_format *result; /* ponter to result dump */
+	struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+	atomic_t in_use;
+	bool in_prog;
+	enum qce_xfer_type_enum	xfer_type;
+	struct ce_sps_data ce_sps;
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+	void *user;
+	void *areq;
+	int assoc_nents;
+	struct scatterlist *asg;        /* Formatted associated data sg  */
+	int src_nents;
+	int dst_nents;
+	dma_addr_t phy_iv_in;
+	unsigned char dec_iv[16];
+	int dir;
+	enum qce_cipher_mode_enum mode;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	unsigned int req_len;
+	unsigned int offload_op;
+};
+
+struct qce_driver_stats {
+	int no_of_timeouts;
+	int no_of_dummy_reqs;
+	int current_mode;
+	int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */

+ 22 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qce_ota.h

@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QTI Crypto Engine driver OTA API
+ *
+ * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include "linux/qcota.h"
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */

+ 2887 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcedev.c

@@ -0,0 +1,2887 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI CE device driver.
+ *
+ * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/mman.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include "linux/qcedev.h"
+#include <linux/interconnect.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+#include "qcedev_smmu.h"
+#include "qcom_crypto_device.h"
+
+#define CACHE_LINE_SIZE 64
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+#define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024)
+/*
+ * Max wait time once a crypto request is submitted.
+ */
+#define MAX_CRYPTO_WAIT_TIME 1500
+/*
+ * Max wait time once a offload crypto request is submitted.
+ * This is low due to expected timeout and key pause errors.
+ * This is temporary, and we can use the 1500 value once the
+ * core irqs are enabled.
+ */
+#define MAX_OFFLOAD_CRYPTO_WAIT_TIME 50
+
+#define MAX_REQUEST_TIME 5000
+
+enum qcedev_req_status {
+	QCEDEV_REQ_CURRENT = 0,
+	QCEDEV_REQ_WAITING = 1,
+	QCEDEV_REQ_SUBMITTED = 2,
+	QCEDEV_REQ_DONE = 3,
+};
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+#define QCEDEV_CTX_KEY_MASK		0x000000ff
+#define QCEDEV_CTX_USE_HW_KEY		0x00000001
+#define QCEDEV_CTX_USE_PIPE_KEY		0x00000002
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+static DEFINE_MUTEX(hash_access_lock);
+
+static dev_t qcedev_device_no;
+static struct class *driver_class;
+static struct device *class_dev;
+
+static const struct of_device_id qcedev_match[] = {
+	{	.compatible = "qcom,qcedev"},
+	{	.compatible = "qcom,qcedev,context-bank"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, qcedev_match);
+
+static int qcedev_control_clocks(struct qcedev_control *podev, bool enable)
+{
+	unsigned int control_flag;
+	int ret = 0;
+
+	if (podev->ce_support.req_bw_before_clk) {
+		if (enable)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (enable)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			return ret;
+		}
+		ret = icc_set_bw(podev->icc_path,
+				podev->icc_avg_bw, podev->icc_peak_bw);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = icc_set_bw(podev->icc_path,
+				podev->icc_avg_bw, podev->icc_peak_bw);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return ret;
+		}
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = icc_set_bw(podev->icc_path, 0, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return ret;
+		}
+		ret = icc_set_bw(podev->icc_path, 0, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return ret;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = icc_set_bw(podev->icc_path, 0, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return ret;
+		}
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			ret = icc_set_bw(podev->icc_path,
+					podev->icc_avg_bw, podev->icc_peak_bw);
+			if (ret)
+				pr_err("%s Unable to set high bw\n", __func__);
+			return ret;
+		}
+		break;
+	default:
+		return -ENOENT;
+	}
+
+	return 0;
+}
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+							bool high_bw_req)
+{
+	int ret = 0;
+
+	if(podev == NULL) return;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (high_bw_req) {
+		if (podev->high_bw_req_count == 0) {
+			ret = qcedev_control_clocks(podev, true);
+			if (ret)
+				goto exit_unlock_mutex;
+			ret = qce_set_irqs(podev->qce, true);
+			if (ret) {
+				pr_err("%s: could not enable bam irqs, ret = %d",
+						__func__, ret);
+				qcedev_control_clocks(podev, false);
+				goto exit_unlock_mutex;
+			}
+		}
+		podev->high_bw_req_count++;
+	} else {
+		if (podev->high_bw_req_count == 1) {
+			ret = qce_set_irqs(podev->qce, false);
+			if (ret) {
+				pr_err("%s: could not disable bam irqs, ret = %d",
+						__func__, ret);
+				goto exit_unlock_mutex;
+			}
+			ret = qcedev_control_clocks(podev, false);
+			if (ret)
+				goto exit_unlock_mutex;
+		}
+		podev->high_bw_req_count--;
+	}
+
+exit_unlock_mutex:
+	mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev,
+			    int *current_req_info);
+static int start_offload_cipher_req(struct qcedev_control *podev,
+				int *current_req_info);
+static int start_sha_req(struct qcedev_control *podev,
+			 int *current_req_info);
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].minor == n)
+			return &qce_dev[n];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+					MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	handle->cntl = podev;
+	file->private_data = handle;
+
+	qcedev_ce_high_bw_req(podev, true);
+
+	mutex_init(&handle->registeredbufs.lock);
+	INIT_LIST_HEAD(&handle->registeredbufs.list);
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+	struct qcedev_handle *handle;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+					__func__, podev);
+	}
+
+	if (podev)
+		qcedev_ce_high_bw_req(podev, false);
+
+	if (qcedev_unmap_all_buffers(handle))
+		pr_err("%s: failed to unmap all ion buffers\n", __func__);
+
+	kfree_sensitive(handle);
+	file->private_data = NULL;
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+	if (areq) {
+		areq->state = QCEDEV_REQ_DONE;
+		if (!areq->timed_out)
+			complete(&areq->complete);
+	}
+
+	/* Look through queued requests and wake up the corresponding thread */
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		new_req->state = QCEDEV_REQ_CURRENT;
+		wake_up_interruptible(&new_req->wait_q);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_handle *handle;
+
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	if (!areq || !areq->cookie)
+		return;
+	handle = (struct qcedev_handle *) areq->cookie;
+	pdev = handle->cntl;
+	if (!pdev)
+		return;
+
+	if (digest)
+		memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+	if (authdata) {
+		handle->sha_ctxt.auth_data[0] = auth32[0];
+		handle->sha_ctxt.auth_data[1] = auth32[1];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	if (!areq || !areq->cookie)
+		return;
+	handle = (struct qcedev_handle *) areq->cookie;
+	podev = handle->cntl;
+	if (!podev)
+		return;
+	qcedev_areq = podev->active_command;
+
+	if (iv && qcedev_areq)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev,
+			    int *current_req_info)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	memset(&creq, 0, sizeof(creq));
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto unsupported;
+	}
+	creq.pmem = NULL;
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+	creq.iv_ctr_size = 0;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+	creq.flags = 0;
+	creq.offload_op = QCE_OFFLOAD_NONE;
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+	*current_req_info = creq.current_req_info;
+unsupported:
+	qcedev_areq->err = ret ? -ENXIO : 0;
+
+	return ret;
+};
+
+void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv,
+			      unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	if (!areq || !areq->cookie)
+		return;
+	handle = (struct qcedev_handle *) areq->cookie;
+	podev = handle->cntl;
+	if (!podev)
+		return;
+	qcedev_areq = podev->active_command;
+
+	if (iv && qcedev_areq)
+		memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv,
+			qcedev_areq->offload_cipher_op_req.ivlen);
+
+	tasklet_schedule(&podev->done_tasklet);
+}
+
+static int start_offload_cipher_req(struct qcedev_control *podev,
+				int *current_req_info)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	u8 patt_sz = 0, proc_data_sz = 0;
+	int ret = 0;
+
+	memset(&creq, 0, sizeof(creq));
+	/* Start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+
+	switch (qcedev_areq->offload_cipher_op_req.alg) {
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (qcedev_areq->offload_cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (qcedev_areq->offload_cipher_op_req.is_copy_op ||
+	    qcedev_areq->offload_cipher_op_req.encrypt) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		switch(qcedev_areq->offload_cipher_op_req.op) {
+		case QCEDEV_OFFLOAD_HLOS_HLOS:
+		case QCEDEV_OFFLOAD_HLOS_HLOS_1:
+		case QCEDEV_OFFLOAD_HLOS_CPB:
+		case QCEDEV_OFFLOAD_HLOS_CPB_1:
+			creq.dir = QCE_DECRYPT;
+			break;
+		case QCEDEV_OFFLOAD_CPB_HLOS:
+			creq.dir = QCE_ENCRYPT;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen;
+	creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size;
+
+	creq.encklen = qcedev_areq->offload_cipher_op_req.encklen;
+
+	/* OFFLOAD use cases use PIPE keys so no need to set keys */
+	creq.flags = QCEDEV_CTX_USE_PIPE_KEY;
+	creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+	creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op;
+	if (qcedev_areq->offload_cipher_op_req.is_copy_op)
+		creq.is_copy_op = true;
+
+	creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len;
+
+	creq.qce_cb = qcedev_offload_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+
+	patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz;
+	proc_data_sz =
+		qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz;
+	creq.is_pattern_valid =
+		qcedev_areq->offload_cipher_op_req.is_pattern_valid;
+	if (creq.is_pattern_valid) {
+		creq.pattern_info = 0x1;
+		if (patt_sz)
+			creq.pattern_info |= (patt_sz - 1) << 4;
+		if (proc_data_sz)
+			creq.pattern_info |= (proc_data_sz - 1) << 8;
+		creq.pattern_info |=
+		qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12;
+	}
+	creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset;
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+
+	*current_req_info = creq.current_req_info;
+	qcedev_areq->err = ret ? -ENXIO : 0;
+
+	return ret;
+}
+
+static int start_sha_req(struct qcedev_control *podev,
+			 int *current_req_info)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+	struct qcedev_handle *handle;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	handle = qcedev_areq->handle;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &handle->sha_ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting\n",
+			qcedev_areq->sha_op_req.alg);
+		return -EINVAL;
+	}
+
+	qcedev_areq->sha_req.cookie = handle;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+		sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+		sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+		sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+		sreq.digest = &handle->sha_ctxt.digest[0];
+		sreq.first_blk = handle->sha_ctxt.first_blk;
+		sreq.last_blk = handle->sha_ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	sreq.flags = 0;
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	*current_req_info = sreq.current_req_info;
+	qcedev_areq->err = ret ? -ENXIO : 0;
+
+	return ret;
+};
+
+static void qcedev_check_crypto_status(
+			struct qcedev_async_req *qcedev_areq, void *handle)
+{
+	struct qce_error error = {0};
+
+	qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
+	qce_get_crypto_status(handle, &error);
+
+	if (error.timer_error) {
+		qcedev_areq->offload_cipher_op_req.err =
+			QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR;
+	} else if (error.key_paused) {
+		qcedev_areq->offload_cipher_op_req.err =
+			QCEDEV_OFFLOAD_KEY_PAUSE_ERROR;
+	} else if (error.generic_error) {
+		qcedev_areq->offload_cipher_op_req.err =
+			QCEDEV_OFFLOAD_GENERIC_ERROR;
+	}
+
+	return;
+}
+
+#define MAX_RETRIES	333
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle)
+{
+	struct qcedev_control *podev;
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+	int current_req_info = 0;
+	int wait = MAX_CRYPTO_WAIT_TIME;
+	struct qcedev_async_req *new_req = NULL;
+	int retries = 0;
+	int req_wait = MAX_REQUEST_TIME;
+	unsigned int crypto_wait = 0;
+
+	qcedev_areq->err = 0;
+	podev = handle->cntl;
+	init_waitqueue_head(&qcedev_areq->wait_q);
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	/*
+	 * Service only one crypto request at a time.
+	 * Any other new requests are queued in ready_commands and woken up
+	 * only when the active command has finished successfully or when the
+	 * request times out or when the command failed when setting up.
+	 */
+	do {
+		if (podev->active_command == NULL) {
+			podev->active_command = qcedev_areq;
+			qcedev_areq->state = QCEDEV_REQ_SUBMITTED;
+			switch (qcedev_areq->op_type) {
+			case QCEDEV_CRYPTO_OPER_CIPHER:
+				ret = start_cipher_req(podev,
+						&current_req_info);
+				crypto_wait = MAX_CRYPTO_WAIT_TIME;
+				break;
+			case QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER:
+				ret = start_offload_cipher_req(podev,
+						&current_req_info);
+				crypto_wait = MAX_OFFLOAD_CRYPTO_WAIT_TIME;
+				break;
+			default:
+				crypto_wait = MAX_CRYPTO_WAIT_TIME;
+
+				ret = start_sha_req(podev,
+						&current_req_info);
+				break;
+			}
+		} else {
+			list_add_tail(&qcedev_areq->list,
+					&podev->ready_commands);
+			qcedev_areq->state = QCEDEV_REQ_WAITING;
+			req_wait = wait_event_interruptible_lock_irq_timeout(
+				qcedev_areq->wait_q,
+				(qcedev_areq->state == QCEDEV_REQ_CURRENT),
+				podev->lock,
+				msecs_to_jiffies(MAX_REQUEST_TIME));
+			if ((req_wait == 0) || (req_wait == -ERESTARTSYS)) {
+				pr_err("%s: request timed out, req_wait = %d\n",
+						__func__, req_wait);
+				list_del(&qcedev_areq->list);
+				podev->active_command = NULL;
+				spin_unlock_irqrestore(&podev->lock, flags);
+				return qcedev_areq->err;
+			}
+		}
+	} while (qcedev_areq->state != QCEDEV_REQ_SUBMITTED);
+
+	if (ret != 0) {
+		podev->active_command = NULL;
+		/*
+		 * Look through queued requests and wake up the corresponding
+		 * thread.
+		 */
+		if (!list_empty(&podev->ready_commands)) {
+			new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+			list_del(&new_req->list);
+			new_req->state = QCEDEV_REQ_CURRENT;
+			wake_up_interruptible(&new_req->wait_q);
+		}
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	qcedev_areq->timed_out = false;
+	if (ret == 0)
+		wait = wait_for_completion_timeout(&qcedev_areq->complete,
+				msecs_to_jiffies(crypto_wait));
+
+	if (!wait) {
+	/*
+	 * This means wait timed out, and the callback routine was not
+	 * exercised. The callback sequence does some housekeeping which
+	 * would be missed here, hence having a call to qce here to do
+	 * that.
+	 */
+		pr_err("%s: wait timed out, req info = %d\n", __func__,
+					current_req_info);
+		spin_lock_irqsave(&podev->lock, flags);
+		qcedev_areq->timed_out = true;
+		spin_unlock_irqrestore(&podev->lock, flags);
+		qcedev_check_crypto_status(qcedev_areq, podev->qce);
+		if (qcedev_areq->offload_cipher_op_req.err ==
+			QCEDEV_OFFLOAD_NO_ERROR) {
+			pr_err("%s: no error, wait for request to be done", __func__);
+			while (qcedev_areq->state != QCEDEV_REQ_DONE &&
+				retries < MAX_RETRIES) {
+				usleep_range(3000, 5000);
+				retries++;
+				pr_err("%s: waiting for req state to be done, retries = %d",
+					__func__, retries);
+			}
+			return 0;
+		}
+		ret = qce_manage_timeout(podev->qce, current_req_info);
+		if (ret)
+			pr_err("%s: error during manage timeout", __func__);
+
+		req_done((unsigned long) podev);
+		if (qcedev_areq->offload_cipher_op_req.err !=
+						QCEDEV_OFFLOAD_NO_ERROR)
+			return 0;
+	}
+
+	if (ret)
+		qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat;
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		}
+	} else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) {
+		//Do nothing
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	sha_ctxt->init_done = true;
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint32_t buf_size = 0;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src = qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		handle->sha_ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+	buf_size = total + CACHE_LINE_SIZE * 2;
+	k_buf_src = kmalloc(buf_size, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		memset(k_buf_src, 0, buf_size);
+		kfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			memset(k_buf_src, 0, buf_size);
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+		memcpy(&handle->sha_ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	memset(k_buf_src, 0, buf_size);
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	if (!handle->sha_ctxt.init_done) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+		uint32_t req_size = 0;
+
+		req_size = sizeof(struct qcedev_sha_op_req);
+		/* save the original req structure */
+		saved_req =
+			kmalloc(req_size, GFP_KERNEL);
+		if (saved_req == NULL) {
+			pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+						__func__, (uintptr_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(*sreq));
+		memcpy(saved_req, sreq, sizeof(*sreq));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		memset(saved_req, 0, req_size);
+		kfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+	uint8_t *k_buf_src = NULL;
+	uint32_t buf_size = 0;
+	uint8_t *k_align_src = NULL;
+
+	if (!handle->sha_ctxt.init_done) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	handle->sha_ctxt.last_blk = 1;
+
+	total = handle->sha_ctxt.trailing_buf_len;
+
+	buf_size = total + CACHE_LINE_SIZE * 2;
+	k_buf_src = kmalloc(buf_size, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+						CACHE_LINE_SIZE);
+	memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+
+	sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.first_blk = 0;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+	handle->sha_ctxt.trailing_buf_len = 0;
+	handle->sha_ctxt.init_done = false;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+	memset(k_buf_src, 0, buf_size);
+	kfree(k_buf_src);
+	qcedev_areq->sha_req.sreq.src = NULL;
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint32_t buf_size = 0;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) &&
+		(qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) {
+		pr_err("%s: unsupported key length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+	if (total > U32_MAX - CACHE_LINE_SIZE * 2)
+		return -EINVAL;
+
+	buf_size = total + CACHE_LINE_SIZE * 2;
+	k_buf_src = kmalloc(buf_size, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			memset(k_buf_src, 0, buf_size);
+			kfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, handle);
+
+	memset(k_buf_src, 0, buf_size);
+	kfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		qcedev_sha_init(areq, handle);
+		if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+		uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		authkey_areq.handle = handle;
+
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, handle);
+		err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, handle);
+		else
+			return err;
+		memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		qcedev_sha_init(areq, handle);
+
+		memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+				handle->sha_ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL)
+		return -ENOMEM;
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+			handle->sha_ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+					sha_digest_size);
+	handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+	handle->sha_ctxt.first_blk = 1;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+	memset(k_src, 0, sha_block_size);
+	kfree(k_src);
+	qcedev_areq->sha_req.sreq.src = NULL;
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		handle->sha_ctxt.trailing_buf[i] =
+				(handle->sha_ctxt.authkey[i] ^ constant);
+
+	handle->sha_ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, handle, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_sha_final(areq, handle);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, handle, false);
+	err = qcedev_hmac_get_ohash(areq, handle);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, handle);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, handle);
+	else
+		return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, handle);
+	else
+		return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_handle *handle,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src = areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_init_one(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+
+	areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.iv = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, handle);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len)) {
+				err = -EFAULT;
+				goto exit;
+			}
+
+			k_align_dst += creq->vbuf.dst[dst_i].len;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+				creq->data_len)) {
+				err = -EFAULT;
+				goto exit;
+			}
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+exit:
+	areq->cipher_req.creq.src = NULL;
+	areq->cipher_req.creq.dst = NULL;
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_handle *handle)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint32_t buf_size = 0;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	uint32_t req_size = 0;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	total = 0;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	buf_size = QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2;
+	k_buf_src = kmalloc(buf_size, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	req_size = sizeof(struct qcedev_cipher_op_req);
+	saved_req = kmemdup(creq, req_size, GFP_KERNEL);
+	if (saved_req == NULL) {
+		memset(k_buf_src, 0, buf_size);
+		kfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					memset(saved_req, 0, req_size);
+					memset(k_buf_src, 0, buf_size);
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					memset(saved_req, 0, req_size);
+					memset(k_buf_src, 0, buf_size);
+					kfree(k_buf_src);
+					kfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	memset(saved_req, 0, req_size);
+	memset(k_buf_src, 0, buf_size);
+	kfree(saved_req);
+	kfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq,
+				       struct qcedev_handle *handle)
+{
+	int i = 0;
+	int err = 0;
+	size_t byteoffset = 0;
+	size_t transfer_data_len = 0;
+	size_t pending_data_len = 0;
+	size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset;
+	uint8_t *user_src = NULL;
+	uint8_t *user_dst = NULL;
+	struct scatterlist sg_src;
+	struct scatterlist sg_dst;
+
+	if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->offload_cipher_op_req.byteoffset;
+
+	/*
+	 * areq has two components:
+	 *      a) Request that comes from userspace i.e. offload_cipher_op_req
+	 *      b) Request that QCE understands - skcipher i.e. cipher_req.creq
+	 *         skcipher has sglist pointers src and dest that would carry
+	 *         data to/from CE.
+	 */
+	areq->cipher_req.creq.src = &sg_src;
+	areq->cipher_req.creq.dst = &sg_dst;
+	sg_init_table(&sg_src, 1);
+	sg_init_table(&sg_dst, 1);
+
+	for (i = 0; i < areq->offload_cipher_op_req.entries; i++) {
+		transfer_data_len = 0;
+		pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len;
+		user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr;
+		user_src += byteoffset;
+
+		user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr;
+		user_dst += byteoffset;
+
+		areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv;
+
+		while (pending_data_len) {
+			transfer_data_len = min(max_data_xfer,
+						pending_data_len);
+			sg_src.dma_address = (dma_addr_t)user_src;
+			sg_dst.dma_address = (dma_addr_t)user_dst;
+			areq->cipher_req.creq.cryptlen = transfer_data_len;
+
+			sg_src.length = transfer_data_len;
+			sg_dst.length = transfer_data_len;
+
+			err = submit_req(areq, handle);
+			if (err) {
+				pr_err("%s: Error processing req, err = %d\n",
+						__func__, err);
+				goto exit;
+			}
+			/* update data len to be processed */
+			pending_data_len -= transfer_data_len;
+			user_src += transfer_data_len;
+			user_dst += transfer_data_len;
+		}
+	}
+exit:
+	areq->cipher_req.creq.src = NULL;
+	areq->cipher_req.creq.dst = NULL;
+	return err;
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	/* if intending to use HW key make sure key fields are set
+	 * correctly and HW key is indeed supported in target
+	 */
+	if (req->encklen == 0) {
+		int i;
+
+		for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+			if (req->enckey[i]) {
+				pr_err("%s: Invalid key: non-zero key input\n",
+								__func__);
+				goto error;
+			}
+		}
+		if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+			(req->op != QCEDEV_OPER_DEC_NO_KEY))
+			if (!podev->platform_support.hw_key_support) {
+				pr_err("%s: Invalid op %d\n", __func__,
+						(uint32_t)req->op);
+				goto error;
+			}
+	} else {
+		if (req->encklen == QCEDEV_AES_KEY_192) {
+			if (!podev->ce_support.aes_key_192) {
+				pr_err("%s: AES-192 not supported\n", __func__);
+				goto error;
+			}
+		} else {
+			/* if not using HW key make sure key
+			 * length is valid
+			 */
+			if (req->mode == QCEDEV_AES_MODE_XTS) {
+				if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+				(req->encklen != QCEDEV_AES_KEY_256*2)) {
+					pr_err("%s: unsupported key size: %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			} else {
+				if ((req->encklen != QCEDEV_AES_KEY_128) &&
+					(req->encklen != QCEDEV_AES_KEY_256)) {
+					pr_err("%s: unsupported key size %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			}
+		}
+	}
+	return 0;
+error:
+	return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if (req->use_pmem) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto error;
+	}
+	if ((req->entries == 0) || (req->data_len == 0) ||
+			(req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid cipher length/entries\n", __func__);
+		goto error;
+	}
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+		pr_err("%s: Invalid algorithm %d\n", __func__,
+						(uint32_t)req->alg);
+		goto error;
+	}
+	if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+				(!podev->ce_support.aes_xts)) {
+		pr_err("%s: XTS algorithm is not supported\n", __func__);
+		goto error;
+	}
+	if (req->alg == QCEDEV_ALG_AES) {
+		if (qcedev_check_cipher_key(req, podev))
+			goto error;
+
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR) {
+			pr_err("%s: Operation on byte offset not supported\n",
+								 __func__);
+			goto error;
+		}
+		if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+			pr_err("%s: Invalid byte offset\n", __func__);
+			goto error;
+		}
+		total = req->byteoffset;
+		for (i = 0; i < req->entries; i++) {
+			if (total > U32_MAX - req->vbuf.src[i].len) {
+				pr_err("%s:Integer overflow on total src len\n",
+					__func__);
+				goto error;
+			}
+			total += req->vbuf.src[i].len;
+		}
+	}
+
+	if (req->data_len < req->byteoffset) {
+		pr_err("%s: req data length %u is less than byteoffset %u\n",
+				__func__, req->data_len, req->byteoffset);
+		goto error;
+	}
+
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+		goto error;
+	}
+
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen > 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a zero length IV\n", __func__);
+			goto error;
+		}
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+			goto error;
+		}
+	}
+	/* Check for sum of all dst length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+			pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.dst[i].len);
+			goto error;
+		}
+		if (req->vbuf.dst[i].len >= U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req dst vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.dst[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+			__func__, i, total, req->data_len);
+		goto error;
+	}
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+			pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.src[i].len);
+			goto error;
+		}
+		if (req->vbuf.src[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req src vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.src[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto error;
+	}
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac)) {
+		pr_err("%s: CMAC not supported\n", __func__);
+		goto sha_error;
+	}
+	if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid num entries (%d)\n",
+						__func__, req->entries);
+		goto sha_error;
+	}
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+		pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+		goto sha_error;
+	}
+	if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+			(req->alg == QCEDEV_ALG_SHA256_HMAC)) {
+		if (req->authkey == NULL) {
+			pr_err("%s: Invalid authkey pointer\n", __func__);
+			goto sha_error;
+		}
+		if (req->authklen <= 0) {
+			pr_err("%s: Invalid authkey length (%d)\n",
+						__func__, req->authklen);
+			goto sha_error;
+		}
+	}
+
+	if (req->alg == QCEDEV_ALG_AES_CMAC) {
+		if ((req->authklen != QCEDEV_AES_KEY_128) &&
+					(req->authklen != QCEDEV_AES_KEY_256)) {
+			pr_err("%s: unsupported key length\n", __func__);
+			goto sha_error;
+		}
+	}
+
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (req->data[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req buf length\n",
+				__func__);
+			goto sha_error;
+		}
+		total += req->data[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto sha_error;
+	}
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req,
+				       struct qcedev_control *podev)
+{
+	if (req->encklen == 0)
+		return -EINVAL;
+
+	/* AES-192 is not a valid option for OFFLOAD use case */
+	if ((req->encklen != QCEDEV_AES_KEY_128) &&
+			(req->encklen != QCEDEV_AES_KEY_256)) {
+		pr_err("%s: unsupported key size %d\n",
+					__func__, req->encklen);
+		goto error;
+	}
+
+	return 0;
+error:
+	return -EINVAL;
+}
+
+static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req,
+					  struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	int i = 0;
+
+	if ((req->entries == 0) || (req->data_len == 0) ||
+		(req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid cipher length/entries\n", __func__);
+		goto error;
+	}
+
+	if ((req->alg != QCEDEV_ALG_AES) ||
+		(req->mode > QCEDEV_AES_MODE_CTR)) {
+		pr_err("%s: Invalid algorithm %d\n", __func__,
+					(uint32_t)req->alg);
+		goto error;
+	}
+
+	if (qcedev_check_offload_cipher_key(req, podev))
+		goto error;
+
+	if (req->block_offset >= AES_CE_BLOCK_SIZE)
+		goto error;
+
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR) {
+			pr_err("%s: Operation on byte offset not supported\n",
+						__func__);
+			goto error;
+		}
+		if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+			pr_err("%s: Invalid byte offset\n", __func__);
+			goto error;
+		}
+		total = req->byteoffset;
+		for (i = 0; i < req->entries; i++) {
+			if (total > U32_MAX - req->vbuf.src[i].len) {
+				pr_err("%s:Int overflow on total src len\n",
+						__func__);
+				goto error;
+			}
+			total += req->vbuf.src[i].len;
+		}
+	}
+
+	if (req->data_len < req->byteoffset) {
+		pr_err("%s: req data length %u is less than byteoffset %u\n",
+				__func__, req->data_len, req->byteoffset);
+		goto error;
+	}
+
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__,
+						req->encklen);
+		goto error;
+	}
+
+	/* Check for sum of all dst length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+			pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+					__func__, i, req->vbuf.dst[i].len);
+			goto error;
+		}
+		if (req->vbuf.dst[i].len >= U32_MAX - total) {
+			pr_err("%s: Int overflow on total req dst vbuf len\n",
+					__func__);
+			goto error;
+		}
+		total += req->vbuf.dst[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+					__func__, i, total, req->data_len);
+		goto error;
+	}
+
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+			pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+					__func__, i, req->vbuf.src[i].len);
+			goto error;
+		}
+		if (req->vbuf.src[i].len > U32_MAX - total) {
+			pr_err("%s: Int overflow on total req src vbuf len\n",
+					__func__);
+			goto error;
+		}
+		total += req->vbuf.src[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+				__func__, total, req->data_len);
+		goto error;
+	}
+
+	return 0;
+error:
+	return -EINVAL;
+}
+
+long qcedev_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL);
+	if (!qcedev_areq)
+		return -ENOMEM;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	qcedev_areq->handle = handle;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %pK\n",
+			__func__, podev);
+		err = -ENOENT;
+		goto exit_free_qcedev_areq;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
+		err = -ENOTTY;
+		goto exit_free_qcedev_areq;
+	}
+
+	init_completion(&qcedev_areq->complete);
+	pstat = &_qcedev_stat;
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (copy_from_user(&qcedev_areq->cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req,
+				podev)) {
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+
+		err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle);
+		if (err)
+			goto exit_free_qcedev_areq;
+		if (copy_to_user((void __user *)arg,
+					&qcedev_areq->cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		break;
+
+	case QCEDEV_IOCTL_OFFLOAD_OP_REQ:
+		if (copy_from_user(&qcedev_areq->offload_cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_offload_cipher_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER;
+		if (qcedev_check_offload_cipher_params(
+				&qcedev_areq->offload_cipher_op_req, podev)) {
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR;
+		err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle);
+		if (err)
+			goto exit_free_qcedev_areq;
+
+		if (copy_to_user((void __user *)arg,
+				&qcedev_areq->offload_cipher_op_req,
+				sizeof(struct qcedev_offload_cipher_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq->sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(qcedev_areq, handle, &sg_src);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			goto exit_free_qcedev_areq;
+		}
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		handle->sha_ctxt.init_done = true;
+		}
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac) {
+			err = -ENOTTY;
+			goto exit_free_qcedev_areq;
+		}
+		fallthrough;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq->sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src);
+			if (err) {
+				mutex_unlock(&hash_access_lock);
+				goto exit_free_qcedev_areq;
+			}
+		} else {
+			if (!handle->sha_ctxt.init_done) {
+				pr_err("%s Init was not called\n", __func__);
+				mutex_unlock(&hash_access_lock);
+				err = -EINVAL;
+				goto exit_free_qcedev_areq;
+			}
+			err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
+			if (err) {
+				mutex_unlock(&hash_access_lock);
+				goto exit_free_qcedev_areq;
+			}
+		}
+
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		memcpy(&qcedev_areq->sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		}
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (!handle->sha_ctxt.init_done) {
+			pr_err("%s Init was not called\n", __func__);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		if (copy_from_user(&qcedev_areq->sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(qcedev_areq, handle);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			goto exit_free_qcedev_areq;
+		}
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq->sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		handle->sha_ctxt.init_done = false;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq->sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		mutex_lock(&hash_access_lock);
+		if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) {
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(qcedev_areq, handle, &sg_src);
+		err = qcedev_hash_update(qcedev_areq, handle, &sg_src);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			goto exit_free_qcedev_areq;
+		}
+		err = qcedev_hash_final(qcedev_areq, handle);
+		if (err) {
+			mutex_unlock(&hash_access_lock);
+			goto exit_free_qcedev_areq;
+		}
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			mutex_unlock(&hash_access_lock);
+			err = -EINVAL;
+			goto exit_free_qcedev_areq;
+		}
+		qcedev_areq->sha_op_req.diglen =	handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq->sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		mutex_unlock(&hash_access_lock);
+		if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req,
+					sizeof(struct qcedev_sha_op_req))) {
+			err = -EFAULT;
+			goto exit_free_qcedev_areq;
+		}
+		}
+		break;
+
+	case QCEDEV_IOCTL_MAP_BUF_REQ:
+		{
+			unsigned long long vaddr = 0;
+			struct qcedev_map_buf_req map_buf = { {0} };
+			int i = 0;
+
+			if (copy_from_user(&map_buf,
+					(void __user *)arg, sizeof(map_buf))) {
+				err = -EFAULT;
+				goto exit_free_qcedev_areq;
+			}
+
+			if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) {
+				pr_err("%s: err: num_fds = %d exceeds max value\n",
+							__func__, map_buf.num_fds);
+				err = -EINVAL;
+				goto exit_free_qcedev_areq;
+			}
+
+			for (i = 0; i < map_buf.num_fds; i++) {
+				err = qcedev_check_and_map_buffer(handle,
+						map_buf.fd[i],
+						map_buf.fd_offset[i],
+						map_buf.fd_size[i],
+						&vaddr);
+				if (err) {
+					pr_err(
+						"%s: err: failed to map fd(%d) - %d\n",
+						__func__, map_buf.fd[i], err);
+					goto exit_free_qcedev_areq;
+				}
+				map_buf.buf_vaddr[i] = vaddr;
+				pr_info("%s: info: vaddr = %llx\n, fd = %d",
+					__func__, vaddr, map_buf.fd[i]);
+			}
+
+			if (copy_to_user((void __user *)arg, &map_buf,
+					sizeof(map_buf))) {
+				err = -EFAULT;
+				goto exit_free_qcedev_areq;
+			}
+			break;
+		}
+
+	case QCEDEV_IOCTL_UNMAP_BUF_REQ:
+		{
+			struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
+			int i = 0;
+
+			if (copy_from_user(&unmap_buf,
+				(void __user *)arg, sizeof(unmap_buf))) {
+				err = -EFAULT;
+				goto exit_free_qcedev_areq;
+			}
+			if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) {
+				pr_err("%s: err: num_fds = %d exceeds max value\n",
+							__func__, unmap_buf.num_fds);
+				err = -EINVAL;
+				goto exit_free_qcedev_areq;
+			}
+
+			for (i = 0; i < unmap_buf.num_fds; i++) {
+				err = qcedev_check_and_unmap_buffer(handle,
+						unmap_buf.fd[i]);
+				if (err) {
+					pr_err(
+						"%s: err: failed to unmap fd(%d) - %d\n",
+						 __func__,
+						unmap_buf.fd[i], err);
+					goto exit_free_qcedev_areq;
+				}
+			}
+			break;
+		}
+
+	default:
+		err = -ENOTTY;
+		goto exit_free_qcedev_areq;
+	}
+
+exit_free_qcedev_areq:
+	kfree(qcedev_areq);
+	return err;
+}
+
+static int qcedev_probe_device(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	podev = &qce_dev[0];
+
+	rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	driver_class = class_create(QCEDEV_DEV);
+#else
+	driver_class = class_create(THIS_MODULE, QCEDEV_DEV);
+#endif
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL,
+			QCEDEV_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&podev->cdev, &qcedev_fops);
+	podev->cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+	podev->minor = 0;
+
+	podev->high_bw_req_count = 0;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	INIT_LIST_HEAD(&podev->context_banks);
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	podev->icc_path = of_icc_get(&pdev->dev, "data_path");
+	if (IS_ERR(podev->icc_path)) {
+		rc = PTR_ERR(podev->icc_path);
+		pr_err("%s Failed to get icc path with error %d\n",
+			__func__, rc);
+		goto exit_del_cdev;
+	}
+
+	/*
+	 * HLOS crypto vote values from DTSI. If no values specified, use
+	 * nominal values.
+	 */
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,icc_avg_bw",
+				&podev->icc_avg_bw)) {
+		pr_warn("%s: No icc avg BW set, using default\n", __func__);
+		podev->icc_avg_bw = CRYPTO_AVG_BW;
+	}
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,icc_peak_bw",
+				&podev->icc_peak_bw)) {
+		pr_warn("%s: No icc peak BW set, using default\n", __func__);
+		podev->icc_peak_bw = CRYPTO_PEAK_BW;
+	}
+
+	rc = icc_set_bw(podev->icc_path, podev->icc_avg_bw,
+				podev->icc_peak_bw);
+	if (rc) {
+		pr_err("%s Unable to set high bandwidth\n", __func__);
+		goto exit_unregister_bus_scale;
+	}
+
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		rc = -ENODEV;
+		goto exit_scale_busbandwidth;
+	}
+	podev->qce = handle;
+
+	rc = qce_set_irqs(podev->qce, false);
+	if (rc) {
+		pr_err("%s: could not disable bam irqs, ret = %d",
+				__func__, rc);
+		goto exit_scale_busbandwidth;
+	}
+
+	rc = icc_set_bw(podev->icc_path, 0, 0);
+	if (rc) {
+		pr_err("%s Unable to set to low bandwidth\n", __func__);
+		goto exit_qce_close;
+	}
+
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support =
+						podev->ce_support.hw_key;
+		podev->platform_support.sha_hmac = 1;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+
+	podev->mem_client = qcedev_mem_new_client(MEM_ION);
+	if (!podev->mem_client) {
+		pr_err("%s: err: qcedev_mem_new_client failed\n", __func__);
+		goto exit_qce_close;
+	}
+
+	rc = of_platform_populate(pdev->dev.of_node, qcedev_match,
+			NULL, &pdev->dev);
+	if (rc) {
+		pr_err("%s: err: of_platform_populate failed: %d\n",
+			__func__, rc);
+		goto exit_mem_new_client;
+	}
+
+	return 0;
+
+exit_mem_new_client:
+	if (podev->mem_client)
+		qcedev_mem_delete_client(podev->mem_client);
+	podev->mem_client = NULL;
+
+exit_qce_close:
+	if (handle)
+		qce_close(handle);
+exit_scale_busbandwidth:
+	icc_set_bw(podev->icc_path, 0, 0);
+exit_unregister_bus_scale:
+	if (podev->icc_path)
+		icc_put(podev->icc_path);
+exit_del_cdev:
+	cdev_del(&podev->cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qcedev_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qcedev_device_no, 1);
+
+	podev->icc_path = NULL;
+	platform_set_drvdata(pdev, NULL);
+	podev->pdev = NULL;
+	podev->qce = NULL;
+
+	return rc;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev"))
+		return qcedev_probe_device(pdev);
+	else if (of_device_is_compatible(pdev->dev.of_node,
+		"qcom,qcedev,context-bank"))
+		return qcedev_parse_context_bank(pdev);
+
+	return -EINVAL;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+
+	qcedev_ce_high_bw_req(podev, true);
+	if (podev->qce)
+		qce_close(podev->qce);
+	qcedev_ce_high_bw_req(podev, false);
+
+	if (podev->icc_path)
+		icc_put(podev->icc_path);
+	tasklet_kill(&podev->done_tasklet);
+
+	cdev_del(&podev->cdev);
+
+	device_destroy(driver_class, qcedev_device_no);
+
+	class_destroy(driver_class);
+
+	unregister_chrdev_region(qcedev_device_no, 1);
+	return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = qce_set_irqs(podev->qce, false);
+		if (ret) {
+			pr_err("%s: could not disable bam irqs, ret = %d",
+					__func__, ret);
+			goto suspend_exit;
+		}
+		ret = qcedev_control_clocks(podev, false);
+		if (ret)
+			goto suspend_exit;
+	}
+
+suspend_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = qcedev_control_clocks(podev, true);
+		if (ret)
+			goto resume_exit;
+		ret = qce_set_irqs(podev->qce, true);
+		if (ret) {
+			pr_err("%s: could not enable bam irqs, ret = %d",
+					__func__, ret);
+			qcedev_control_clocks(podev, false);
+		}
+	}
+
+resume_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.suspend = qcedev_suspend,
+	.resume = qcedev_resume,
+	.driver = {
+		.name = "qce",
+		.of_match_table = qcedev_match,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	ssize_t rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         simple_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcedev = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+			&_debug_qcedev, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_debug("qcedev debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	_qcedev_debug_init();
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DEV Crypto driver");
+MODULE_IMPORT_NS(DMA_BUF);
+module_init(qcedev_init);
+module_exit(qcedev_exit);

+ 443 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcedev_smmu.c

@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qti (or) Qualcomm Technologies Inc CE device driver.
+ *
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/qcom-dma-mapping.h>
+#include <linux/list.h>
+#include "linux/qcedev.h"
+#include "qcedevi.h"
+#include "qcedev_smmu.h"
+#include "soc/qcom/secure_buffer.h"
+#include <linux/mem-buf.h>
+
+static int qcedev_setup_context_bank(struct context_bank_info *cb,
+				struct device *dev)
+{
+	if (!dev || !cb) {
+		pr_err("%s err: invalid input params\n", __func__);
+		return -EINVAL;
+	}
+	cb->dev = dev;
+
+	if (!dev->dma_parms) {
+		dev->dma_parms = devm_kzalloc(dev,
+				sizeof(*dev->dma_parms), GFP_KERNEL);
+		if (!dev->dma_parms)
+			return -ENOMEM;
+	}
+	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
+
+	return 0;
+}
+
+int qcedev_parse_context_bank(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+	struct context_bank_info *cb = NULL;
+	struct device_node *np = NULL;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s err: invalid platform devices\n", __func__);
+		return -EINVAL;
+	}
+	if (!pdev->dev.parent) {
+		pr_err("%s err: failed to find a parent for %s\n",
+			__func__, dev_name(&pdev->dev));
+		return -EINVAL;
+	}
+
+	podev = dev_get_drvdata(pdev->dev.parent);
+	np = pdev->dev.of_node;
+	cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
+	if (!cb) {
+		pr_err("%s ERROR = Failed to allocate cb\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&cb->list);
+	list_add_tail(&cb->list, &podev->context_banks);
+
+	rc = of_property_read_string(np, "label", &cb->name);
+	if (rc)
+		pr_debug("%s ERROR = Unable to read label\n", __func__);
+
+	cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
+
+	rc = qcedev_setup_context_bank(cb, &pdev->dev);
+	if (rc) {
+		pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
+		goto err_setup_cb;
+	}
+
+	return 0;
+
+err_setup_cb:
+	list_del(&cb->list);
+	devm_kfree(&pdev->dev, cb);
+	return rc;
+}
+
+struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
+{
+	struct qcedev_mem_client *mem_client = NULL;
+
+	if (mtype != MEM_ION) {
+		pr_err("%s: err: Mem type not supported\n", __func__);
+		goto err;
+	}
+
+	mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
+	if (!mem_client)
+		goto err;
+	mem_client->mtype = mtype;
+
+	return mem_client;
+err:
+	return NULL;
+}
+
+void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
+{
+	kfree(mem_client);
+}
+
+static bool is_iommu_present(struct qcedev_handle *qce_hndl)
+{
+	return !list_empty(&qce_hndl->cntl->context_banks);
+}
+
+static struct context_bank_info *get_context_bank(
+		struct qcedev_handle *qce_hndl, bool is_secure)
+{
+	struct qcedev_control *podev = qce_hndl->cntl;
+	struct context_bank_info *cb = NULL, *match = NULL;
+
+	list_for_each_entry(cb, &podev->context_banks, list) {
+		if (cb->is_secure == is_secure) {
+			match = cb;
+			break;
+		}
+	}
+	return match;
+}
+
+static int ion_map_buffer(struct qcedev_handle *qce_hndl,
+		struct qcedev_mem_client *mem_client, int fd,
+		unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
+{
+	int rc = 0;
+	struct dma_buf *buf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct context_bank_info *cb = NULL;
+
+	buf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(buf))
+		return -EINVAL;
+
+	if (is_iommu_present(qce_hndl)) {
+		cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf));
+		if (!cb) {
+			pr_err("%s: err: failed to get context bank info\n",
+				__func__);
+			rc = -EIO;
+			goto map_err;
+		}
+
+		/* Prepare a dma buf for dma on the given device */
+		attach = dma_buf_attach(buf, cb->dev);
+		if (IS_ERR_OR_NULL(attach)) {
+			rc = PTR_ERR(attach) ?: -ENOMEM;
+			pr_err("%s: err: failed to attach dmabuf\n", __func__);
+			goto map_err;
+		}
+
+		/* Get the scatterlist for the given attachment */
+		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
+		table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR_OR_NULL(table)) {
+			rc = PTR_ERR(table) ?: -ENOMEM;
+			pr_err("%s: err: failed to map table\n", __func__);
+			goto map_table_err;
+		}
+
+		if (table->sgl) {
+			binfo->ion_buf.iova = sg_dma_address(table->sgl);
+			binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
+			if (binfo->ion_buf.mapped_buf_size < fd_size) {
+				pr_err("%s: err: mapping failed, size mismatch\n",
+						__func__);
+				rc = -ENOMEM;
+				goto map_sg_err;
+			}
+		} else {
+			pr_err("%s: err: sg list is NULL\n", __func__);
+			rc = -ENOMEM;
+			goto map_sg_err;
+		}
+
+		binfo->ion_buf.mapping_info.dev = cb->dev;
+		binfo->ion_buf.mapping_info.mapping = cb->mapping;
+		binfo->ion_buf.mapping_info.table = table;
+		binfo->ion_buf.mapping_info.attach = attach;
+		binfo->ion_buf.mapping_info.buf = buf;
+		binfo->ion_buf.ion_fd = fd;
+	} else {
+		pr_err("%s: err: smmu not enabled\n", __func__);
+		rc = -EIO;
+		goto map_err;
+	}
+
+	return 0;
+
+map_sg_err:
+	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
+map_table_err:
+	dma_buf_detach(buf, attach);
+map_err:
+	dma_buf_put(buf);
+	return rc;
+}
+
+static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
+		struct qcedev_reg_buf_info *binfo)
+{
+	struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
+
+	if (is_iommu_present(qce_hndl)) {
+		dma_buf_unmap_attachment(mapping_info->attach,
+			mapping_info->table, DMA_BIDIRECTIONAL);
+		dma_buf_detach(mapping_info->buf, mapping_info->attach);
+		dma_buf_put(mapping_info->buf);
+
+	}
+	return 0;
+}
+
+static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
+		struct qcedev_mem_client *mem_client, int fd,
+		unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
+{
+	int rc = -1;
+
+	switch (mem_client->mtype) {
+	case MEM_ION:
+		rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
+		break;
+	default:
+		pr_err("%s: err: Mem type not supported\n", __func__);
+		break;
+	}
+
+	if (rc)
+		pr_err("%s: err: failed to map buffer\n", __func__);
+
+	return rc;
+}
+
+static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
+		struct qcedev_mem_client *mem_client,
+		struct qcedev_reg_buf_info *binfo)
+{
+	int rc = -1;
+
+	switch (mem_client->mtype) {
+	case MEM_ION:
+		rc = ion_unmap_buffer(qce_hndl, binfo);
+		break;
+	default:
+		pr_err("%s: err: Mem type not supported\n", __func__);
+		break;
+	}
+
+	if (rc)
+		pr_err("%s: err: failed to unmap buffer\n", __func__);
+
+	return rc;
+}
+
+int qcedev_check_and_map_buffer(void *handle,
+		int fd, unsigned int offset, unsigned int fd_size,
+		unsigned long long *vaddr)
+{
+	bool found = false;
+	struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
+	struct qcedev_mem_client *mem_client = NULL;
+	struct qcedev_handle *qce_hndl = handle;
+	int rc = 0;
+	unsigned long mapped_size = 0;
+
+	if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
+		pr_err("%s: err: invalid input arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
+		pr_err("%s: err: invalid qcedev handle\n", __func__);
+		return -EINVAL;
+	}
+	mem_client = qce_hndl->cntl->mem_client;
+
+	if (mem_client->mtype != MEM_ION)
+		return -EPERM;
+
+	/* Check if the buffer fd is already mapped */
+	mutex_lock(&qce_hndl->registeredbufs.lock);
+	list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
+		if (temp->ion_buf.ion_fd == fd) {
+			found = true;
+			*vaddr = temp->ion_buf.iova;
+			mapped_size = temp->ion_buf.mapped_buf_size;
+			atomic_inc(&temp->ref_count);
+			break;
+		}
+	}
+	mutex_unlock(&qce_hndl->registeredbufs.lock);
+
+	/* If buffer fd is not mapped then create a fresh mapping */
+	if (!found) {
+		pr_debug("%s: info: ion fd not registered with driver\n",
+			__func__);
+		binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
+		if (!binfo) {
+			pr_err("%s: err: failed to allocate binfo\n",
+				__func__);
+			rc = -ENOMEM;
+			goto error;
+		}
+		rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
+							fd_size, binfo);
+		if (rc) {
+			pr_err("%s: err: failed to map fd (%d) error = %d\n",
+				__func__, fd, rc);
+			goto error;
+		}
+
+		*vaddr = binfo->ion_buf.iova;
+		mapped_size = binfo->ion_buf.mapped_buf_size;
+		atomic_inc(&binfo->ref_count);
+
+		/* Add buffer mapping information to regd buffer list */
+		mutex_lock(&qce_hndl->registeredbufs.lock);
+		list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
+		mutex_unlock(&qce_hndl->registeredbufs.lock);
+	}
+
+	/* Make sure the offset is within the mapped range */
+	if (offset >= mapped_size) {
+		pr_err(
+			"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
+			__func__, offset, mapped_size, fd);
+		rc = -ERANGE;
+		goto unmap;
+	}
+
+	/* return the mapped virtual address adjusted by offset */
+	*vaddr += offset;
+
+	return 0;
+
+unmap:
+	if (!found) {
+		qcedev_unmap_buffer(handle, mem_client, binfo);
+		mutex_lock(&qce_hndl->registeredbufs.lock);
+		list_del(&binfo->list);
+		mutex_unlock(&qce_hndl->registeredbufs.lock);
+	}
+
+error:
+	kfree(binfo);
+	return rc;
+}
+
+int qcedev_check_and_unmap_buffer(void *handle, int fd)
+{
+	struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
+	struct qcedev_mem_client *mem_client = NULL;
+	struct qcedev_handle *qce_hndl = handle;
+	bool found = false;
+
+	if (!handle || fd < 0) {
+		pr_err("%s: err: invalid input arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
+		pr_err("%s: err: invalid qcedev handle\n", __func__);
+		return -EINVAL;
+	}
+	mem_client = qce_hndl->cntl->mem_client;
+
+	if (mem_client->mtype != MEM_ION)
+		return -EPERM;
+
+	/* Check if the buffer fd is mapped and present in the regd list. */
+	mutex_lock(&qce_hndl->registeredbufs.lock);
+	list_for_each_entry_safe(binfo, dummy,
+		&qce_hndl->registeredbufs.list, list) {
+		if (binfo->ion_buf.ion_fd == fd) {
+			found = true;
+			atomic_dec(&binfo->ref_count);
+
+			/* Unmap only if there are no more references */
+			if (atomic_read(&binfo->ref_count) == 0) {
+				qcedev_unmap_buffer(qce_hndl,
+					mem_client, binfo);
+				list_del(&binfo->list);
+				kfree(binfo);
+			}
+			break;
+		}
+	}
+	mutex_unlock(&qce_hndl->registeredbufs.lock);
+
+	if (!found) {
+		pr_err("%s: err: calling unmap on unknown fd %d\n",
+			__func__, fd);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int qcedev_unmap_all_buffers(void *handle)
+{
+	struct qcedev_reg_buf_info *binfo = NULL;
+	struct qcedev_mem_client *mem_client = NULL;
+	struct qcedev_handle *qce_hndl = handle;
+	struct list_head *pos;
+
+	if (!handle) {
+		pr_err("%s: err: invalid input arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
+		pr_err("%s: err: invalid qcedev handle\n", __func__);
+		return -EINVAL;
+	}
+	mem_client = qce_hndl->cntl->mem_client;
+
+	if (mem_client->mtype != MEM_ION)
+		return -EPERM;
+
+	mutex_lock(&qce_hndl->registeredbufs.lock);
+	while (!list_empty(&qce_hndl->registeredbufs.list)) {
+		pos = qce_hndl->registeredbufs.list.next;
+		binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
+		if (binfo)
+			qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
+		list_del(pos);
+		kfree(binfo);
+	}
+	mutex_unlock(&qce_hndl->registeredbufs.lock);
+
+	return 0;
+}
+

+ 81 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcedev_smmu.h

@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Qti (or) Qualcomm Technologies Inc CE device driver.
+ *
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DRIVERS_CRYPTO_PARSE_H_
+#define _DRIVERS_CRYPTO_PARSE_H_
+
+#include <linux/dma-buf.h>
+#include <linux/dma-direction.h>
+#include <linux/iommu.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/msm_ion.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct context_bank_info {
+	struct list_head list;
+	const char *name;
+	u32 buffer_type;
+	u32 start_addr;
+	u32 size;
+	bool is_secure;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+};
+
+enum qcedev_mem_type {
+	MEM_ION,
+};
+
+struct qcedev_mem_client {
+	enum qcedev_mem_type mtype;
+};
+
+struct dma_mapping_info {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	struct sg_table *table;
+	struct dma_buf_attachment *attach;
+	struct dma_buf *buf;
+};
+
+struct qcedev_ion_buf_info {
+	struct dma_mapping_info mapping_info;
+	dma_addr_t iova;
+	unsigned long mapped_buf_size;
+	int ion_fd;
+};
+
+struct qcedev_reg_buf_info {
+	struct list_head list;
+	union {
+		struct qcedev_ion_buf_info ion_buf;
+	};
+	atomic_t ref_count;
+};
+
+struct qcedev_buffer_list {
+	struct list_head list;
+	struct mutex lock;
+};
+
+int qcedev_parse_context_bank(struct platform_device *pdev);
+struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype);
+void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client);
+int qcedev_check_and_map_buffer(void *qce_hndl,
+		int fd, unsigned int offset, unsigned int fd_size,
+		unsigned long long *vaddr);
+int qcedev_check_and_unmap_buffer(void *handle, int fd);
+int qcedev_unmap_all_buffers(void *handle);
+
+extern struct qcedev_reg_buf_info *global_binfo_in;
+extern struct qcedev_reg_buf_info *global_binfo_out;
+extern struct qcedev_reg_buf_info *global_binfo_res;
+#endif
+

+ 136 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcedevi.h

@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QTI crypto Driver
+ *
+ * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <crypto/hash.h>
+#include "qcom_crypto_device.h"
+#include "fips_status.h"
+#include "qce.h"
+#include "qcedev_smmu.h"
+
+#define CACHE_LINE_SIZE 64
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+	QCEDEV_CRYPTO_OPER_CIPHER = 0,
+	QCEDEV_CRYPTO_OPER_SHA = 1,
+	QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER = 2,
+	QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+	struct skcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	void *cookie;
+};
+
+struct	qcedev_sha_ctxt {
+	uint32_t	auth_data[4];
+	uint8_t	digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t	diglen;
+	uint8_t	trailing_buf[64];
+	uint32_t	trailing_buf_len;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+	bool		init_done;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+		struct qcedev_offload_cipher_op_req	offload_cipher_op_req;
+	};
+
+	union {
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_handle			*handle;
+	int					err;
+	wait_queue_head_t			wait_q;
+	uint16_t				state;
+	bool					timed_out;
+};
+
+/**********************************************************************
+ * Register ourselves as a char device to be able to access the dev driver
+ * from userspace.
+ */
+
+#define QCEDEV_DEV	"qce"
+
+struct qcedev_control {
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	uint32_t ce_lock_count;
+	uint32_t high_bw_req_count;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* replaced msm_bus with interconnect path */
+	struct icc_path *icc_path;
+
+	/* average and peak bw values for interconnect */
+	uint32_t icc_avg_bw;
+	uint32_t icc_peak_bw;
+
+	/* char device */
+	struct cdev cdev;
+
+	int minor;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned int magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+	struct list_head context_banks;
+	struct qcedev_mem_client *mem_client;
+};
+
+struct qcedev_handle {
+	/* qcedev control handle */
+	struct qcedev_control *cntl;
+	/* qce internal sha context*/
+	struct qcedev_sha_ctxt sha_ctxt;
+	/* qcedev mapped buffer list */
+	struct qcedev_buffer_list registeredbufs;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret);
+
+#endif  /* __CRYPTO_MSM_QCEDEVI_H */

+ 19 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcom_crypto_device.h

@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+#include <linux/types.h>
+
+struct msm_ce_hw_support {
+	uint32_t ce_shared;
+	uint32_t shared_ce_resource;
+	uint32_t hw_key_support;
+	uint32_t sha_hmac;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */

+ 5546 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcrypto.c

@@ -0,0 +1,5546 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI Crypto driver
+ *
+ * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/interconnect.h>
+#include <linux/hardirq.h>
+#include <linux/version.h>
+#include "qcrypto.h"
+#include "qcom_crypto_device.h"
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include "fips_status.h"
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 4096
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE  0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+/* Status of response workq */
+enum resp_workq_sts {
+	NOT_SCHEDULED  = 0,
+	IS_SCHEDULED   = 1,
+	SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+	STOPPED     = 0,
+	IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+	BUS_NO_BANDWIDTH = 0,
+	BUS_HAS_BANDWIDTH,
+	BUS_BANDWIDTH_RELEASING,
+	BUS_BANDWIDTH_ALLOCATING,
+	BUS_SUSPENDED,
+	BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+	u64 aead_sha1_aes_enc;
+	u64 aead_sha1_aes_dec;
+	u64 aead_sha1_des_enc;
+	u64 aead_sha1_des_dec;
+	u64 aead_sha1_3des_enc;
+	u64 aead_sha1_3des_dec;
+	u64 aead_sha256_aes_enc;
+	u64 aead_sha256_aes_dec;
+	u64 aead_sha256_des_enc;
+	u64 aead_sha256_des_dec;
+	u64 aead_sha256_3des_enc;
+	u64 aead_sha256_3des_dec;
+	u64 aead_ccm_aes_enc;
+	u64 aead_ccm_aes_dec;
+	u64 aead_rfc4309_ccm_aes_enc;
+	u64 aead_rfc4309_ccm_aes_dec;
+	u64 aead_op_success;
+	u64 aead_op_fail;
+	u64 aead_bad_msg;
+	u64 sk_cipher_aes_enc;
+	u64 sk_cipher_aes_dec;
+	u64 sk_cipher_des_enc;
+	u64 sk_cipher_des_dec;
+	u64 sk_cipher_3des_enc;
+	u64 sk_cipher_3des_dec;
+	u64 sk_cipher_op_success;
+	u64 sk_cipher_op_fail;
+	u64 sha1_digest;
+	u64 sha256_digest;
+	u64 sha1_hmac_digest;
+	u64 sha256_hmac_digest;
+	u64 ahash_op_success;
+	u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+	unsigned int index;
+	bool in_use;
+	struct crypto_engine *pce;
+	struct crypto_async_request *req;
+	struct qcrypto_resp_ctx *arsp;
+	int res; /* execution result */
+};
+
+struct crypto_engine {
+	struct list_head elist;
+	void *qce; /* qce handle */
+	struct platform_device *pdev; /* platform device */
+	struct crypto_priv *pcp;
+	struct icc_path *icc_path;
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that have this engine assigned
+					 * waiting to be executed
+					 */
+	u64 total_req;
+	u64 err_req;
+	u32 unit;
+	u32 ce_device;
+	u32 ce_hw_instance;
+	unsigned int signature;
+
+	enum qcrypto_bus_state bw_state;
+	bool   high_bw_req;
+	struct timer_list bw_reaper_timer;
+	struct work_struct bw_reaper_ws;
+	struct work_struct bw_allocate_ws;
+
+	/* engine execution sequence number */
+	u32    active_seq;
+	/* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+	u32    last_active_seq;
+
+	bool   check_flag;
+	/*Added to support multi-requests*/
+	unsigned int max_req;
+	struct   qcrypto_req_control *preq_pool;
+	atomic_t req_count;
+	bool issue_req;		/* an request is being issued to qce */
+	bool first_engine;	/* this engine is the first engine or not */
+	unsigned int irq_cpu;	/* the cpu running the irq of this engine */
+	unsigned int max_req_used; /* debug stats */
+};
+
+#define MAX_SMP_CPU    8
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* the lock protects crypto queue and req */
+	spinlock_t lock;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* current active request */
+	struct crypto_async_request *req;
+
+	struct work_struct unlock_ce_ws;
+	struct list_head engine_list; /* list of  qcrypto engines */
+	int32_t total_units;   /* total units of engines */
+	struct mutex engine_lock;
+
+	struct crypto_engine *next_engine; /* next assign engine */
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that waiting for an available
+					 * engine.
+					 */
+	struct llist_head ordered_resp_list;	/* Queue to maintain
+						 * responses in sequence.
+						 */
+	atomic_t resp_cnt;
+	struct workqueue_struct *resp_wq;
+	struct work_struct resp_work;	/*
+					 * Workq to send responses
+					 * in sequence.
+					 */
+	enum resp_workq_sts sched_resp_workq_status;
+	enum req_processing_sts ce_req_proc_sts;
+	int cpu_getting_irqs_frm_first_ce;
+	struct crypto_engine *first_engine;
+	struct crypto_engine *scheduled_eng; /* last engine scheduled */
+
+	/* debug stats */
+	unsigned int no_avail;
+	unsigned int resp_stop;
+	unsigned int resp_start;
+	unsigned int max_qlen;
+	unsigned int queue_work_eng3;
+	unsigned int queue_work_not_eng3;
+	unsigned int queue_work_not_eng3_nz;
+	unsigned int max_resp_qlen;
+	unsigned int max_reorder_cnt;
+	unsigned int cpu_req[MAX_SMP_CPU+1];
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+						struct crypto_engine *pce)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+	unsigned int req_count;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (!xchg(&pqcrypto_req_control->in_use, true)) {
+			req_count = atomic_inc_return(&pce->req_count);
+			if (req_count > pce->max_req_used)
+				pce->max_req_used = req_count;
+			return pqcrypto_req_control;
+		}
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+					struct qcrypto_req_control *preq)
+{
+	/* do this before free req */
+	preq->req = NULL;
+	preq->arsp = NULL;
+	/* free req */
+	if (!xchg(&preq->in_use, false))
+		pr_warn("request info %pK free already\n", preq);
+	else
+		atomic_dec(&pce->req_count);
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+					struct crypto_engine *pce,
+					struct crypto_async_request *areq)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (pqcrypto_req_control->req == areq)
+			return pqcrypto_req_control;
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	int i;
+
+	pce->preq_pool = pqcrypto_req_control;
+	atomic_set(&pce->req_count, 0);
+	for (i = 0; i < pce->max_req; i++) {
+		pqcrypto_req_control->index = i;
+		pqcrypto_req_control->in_use = false;
+		pqcrypto_req_control->pce = pce;
+		pqcrypto_req_control++;
+	}
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+			 unsigned int device)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if (entry->ce_device == device)
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) && (entry->ce_device != device)) ||
+		(entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+				device);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+			(struct crypto_priv *cp,
+			u32 device,
+			u32 hw_instance)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if ((entry->ce_device == device) &&
+			(entry->ce_hw_instance == hw_instance))
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) &&
+		((entry->ce_device != device)
+		|| (entry->ce_hw_instance != hw_instance)))
+		|| (entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+						 device);
+		return NULL;
+	}
+	return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	int count = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		count++;
+	}
+	return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	size_t arr_index = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		arr[arr_index].ce_device = entry->ce_device;
+		arr[arr_index].hw_instance = entry->ce_hw_instance;
+		arr_index++;
+		if (arr_index >= num_engines)
+			break;
+	}
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_AEAD = 2,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct skcipher_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	struct aead_alg aead_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+#define	QCRYPTO_CCM4309_NONCE_LEN	3
+
+struct qcrypto_cipher_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+
+	enum qce_hash_alg_enum  auth_alg; /* for aead */
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+	struct crypto_sync_skcipher *cipher_aes192_fb;
+
+	struct crypto_ahash *ahash_aead_aes192_fb;
+};
+
+struct qcrypto_resp_ctx {
+	struct list_head list;
+	struct llist_node llist;
+	struct crypto_async_request *async_req; /* async req */
+	int res;                                /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+	u8 *iv;
+	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+	unsigned int ivsize;
+	int  aead;
+	int  ccmtype;			/* default: 0, rfc4309: 1 */
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *adata;		/* Pointer to formatted assoc data */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
+	struct aead_request *aead_req;
+	struct ahash_request *fb_hash_req;
+	uint8_t	fb_ahash_digest[SHA256_DIGEST_SIZE];
+	struct scatterlist fb_ablkcipher_src_sg[2];
+	struct scatterlist fb_ablkcipher_dst_sg[2];
+	char *fb_aes_iv;
+	unsigned int  fb_ahash_length;
+	struct skcipher_request *fb_aes_req;
+	struct scatterlist *fb_aes_src;
+	struct scatterlist *fb_aes_dst;
+	unsigned int  fb_aes_cryptlen;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+#define	MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define	COMPLETION_CB_BACKLOG_LENGTH_STOP 400
+#define	COMPLETION_CB_BACKLOG_LENGTH_START \
+			(COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+	enum qce_hash_alg_enum  alg;
+	uint32_t		diglen;
+	uint32_t		authkey_in_len;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+
+	struct scatterlist *src;
+	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
+
+	uint32_t byte_count[4];
+	u64 count;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	 trailing_buf[SHA_MAX_BLOCK_SIZE];
+	uint32_t trailing_buf_len;
+
+	/* dma buffer, Internal use */
+	uint8_t	staging_dmabuf
+		[SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+	uint8_t	digest[SHA_MAX_DIGEST_SIZE];
+	struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+				 bool high_bw_req)
+{
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned int control_flag;
+	int ret = 0;
+
+	if (cp->ce_support.req_bw_before_clk) {
+		if (high_bw_req)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (high_bw_req)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			return;
+		}
+		ret = icc_set_bw(pengine->icc_path,
+				CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return;
+		}
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = icc_set_bw(pengine->icc_path,
+				CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return;
+		}
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = icc_set_bw(pengine->icc_path, 0, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return;
+		}
+		ret = icc_set_bw(pengine->icc_path, 0, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = icc_set_bw(pengine->icc_path, 0, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return;
+		}
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			ret = icc_set_bw(pengine->icc_path,
+					CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
+			if (ret)
+				pr_err("%s Unable to set high bw\n", __func__);
+			return;
+		}
+		break;
+	default:
+		return;
+	}
+}
+
+static void qcrypto_bw_reaper_timer_callback(struct timer_list *data)
+{
+	struct crypto_engine *pengine = from_timer(pengine, data,
+		bw_reaper_timer);
+
+	schedule_work(&pengine->bw_reaper_ws);
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+	pengine->bw_reaper_timer.expires = jiffies +
+			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+	mod_timer(&(pengine->bw_reaper_timer),
+		pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+	schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+					struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_allocate_ws);
+	unsigned long flags;
+	struct crypto_priv *cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qcrypto_ce_set_bus(pengine, true);
+	qcrypto_bw_set_timeout(pengine);
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_HAS_BANDWIDTH;
+	pengine->high_bw_req = false;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_reaper_ws);
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	u32    active_seq;
+	bool restart = false;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	active_seq = pengine->active_seq;
+	if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+		(active_seq == pengine->last_active_seq)) {
+
+		/* check if engine is stuck */
+		if (atomic_read(&pengine->req_count) > 0) {
+			if (pengine->check_flag)
+				dev_warn(&pengine->pdev->dev,
+				"The engine appears to be stuck seq %d.\n",
+				active_seq);
+			pengine->check_flag = false;
+			goto ret;
+		}
+		pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		qcrypto_ce_set_bus(pengine, false);
+
+		spin_lock_irqsave(&cp->lock, flags);
+
+		if (pengine->high_bw_req) {
+			/* we got request while we are disabling clock */
+			pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+
+			qcrypto_ce_set_bus(pengine, true);
+
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_HAS_BANDWIDTH;
+			pengine->high_bw_req = false;
+			restart = true;
+		} else
+			pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+ret:
+	pengine->last_active_seq = active_seq;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
+	if (pengine->bw_state != BUS_NO_BANDWIDTH)
+		qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+}
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct skcipher_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+}
+
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+		struct aead_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_AEAD;
+	q_alg->aead_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+}
+
+static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
+					struct qcrypto_alg *q_alg)
+{
+	if (!ctx || !q_alg) {
+		pr_err("ctx or q_alg is NULL\n");
+		return -EINVAL;
+	}
+	ctx->flags = 0;
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+	if (_qcrypto_init_assign) {
+		ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+		if (ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		ctx->pengine = NULL;
+	INIT_LIST_HEAD(&ctx->rsp_queue);
+	ctx->auth_alg = QCE_HASH_LAST;
+	return 0;
+}
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->flags = 0;
+	sha_ctx->ahash_req = NULL;
+	if (_qcrypto_init_assign) {
+		sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+		if (sha_ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		sha_ctx->pengine = NULL;
+	INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+	return 0;
+}
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&sha_ctx->rsp_queue))
+		pr_err("%s: requests still outstanding\n", __func__);
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _crypto_sha_hmac_ahash_req_complete(void *data, int err);
+#else
+static void _crypto_sha_hmac_ahash_req_complete(
+	    struct crypto_async_request *req, int err);
+#endif
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+}
+
+static int _qcrypto_skcipher_init(struct crypto_skcipher *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+	struct qcrypto_alg *q_alg;
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+	crypto_skcipher_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+}
+
+static int _qcrypto_aes_skcipher_init(struct crypto_skcipher *tfm)
+{
+	const char *name = crypto_tfm_alg_name(&tfm->base);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int ret;
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+		ctx->cipher_aes192_fb = NULL;
+		return _qcrypto_skcipher_init(tfm);
+	}
+	ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher(name, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->cipher_aes192_fb)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		ret = PTR_ERR(ctx->cipher_aes192_fb);
+		ctx->cipher_aes192_fb = NULL;
+		return ret;
+	}
+	return _qcrypto_skcipher_init(tfm);
+}
+
+static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *aeadalg = crypto_aead_alg(tfm);
+	struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
+						aead_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+}
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha1)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_sync_skcipher(
+							ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return 0;
+}
+
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha256)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_sync_skcipher(
+							ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return 0;
+}
+
+static void _qcrypto_skcipher_exit(struct crypto_skcipher *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_skcipher_exit: requests still outstanding\n");
+}
+
+static void _qcrypto_aes_skcipher_exit(struct crypto_skcipher *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	_qcrypto_skcipher_exit(tfm);
+	if (ctx->cipher_aes192_fb)
+		crypto_free_sync_skcipher(ctx->cipher_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding\n");
+}
+
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding\n");
+	if (ctx->cipher_aes192_fb)
+		crypto_free_sync_skcipher(ctx->cipher_aes192_fb);
+	if (ctx->ahash_aead_aes192_fb)
+		crypto_free_ahash(ctx->ahash_aead_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+}
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	pstat = &_qcrypto_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI crypto accelerator %d Statistics\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER AES encryption          : %llu\n",
+					pstat->sk_cipher_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER AES decryption          : %llu\n",
+					pstat->sk_cipher_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER DES encryption          : %llu\n",
+					pstat->sk_cipher_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER DES decryption          : %llu\n",
+					pstat->sk_cipher_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER 3DES encryption         : %llu\n",
+					pstat->sk_cipher_3des_enc);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER 3DES decryption         : %llu\n",
+					pstat->sk_cipher_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER operation success       : %llu\n",
+					pstat->sk_cipher_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   SK CIPHER operation fail          : %llu\n",
+					pstat->sk_cipher_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption            : %llu\n",
+					pstat->aead_sha1_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption            : %llu\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption            : %llu\n",
+					pstat->aead_sha1_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption            : %llu\n",
+					pstat->aead_sha1_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption           : %llu\n",
+					pstat->aead_sha1_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption           : %llu\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES encryption          : %llu\n",
+					pstat->aead_sha256_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES decryption          : %llu\n",
+					pstat->aead_sha256_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES encryption          : %llu\n",
+					pstat->aead_sha256_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES decryption          : %llu\n",
+					pstat->aead_sha256_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES encryption         : %llu\n",
+					pstat->aead_sha256_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES decryption         : %llu\n",
+					pstat->aead_sha256_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES encryption             : %llu\n",
+					pstat->aead_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES decryption             : %llu\n",
+					pstat->aead_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES encryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES decryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success              : %llu\n",
+					pstat->aead_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail                 : %llu\n",
+					pstat->aead_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD bad message                    : %llu\n",
+					pstat->aead_bad_msg);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 digest                   : %llu\n",
+					pstat->sha1_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 digest                 : %llu\n",
+					pstat->sha256_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 HMAC digest              : %llu\n",
+					pstat->sha1_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 HMAC digest            : %llu\n",
+					pstat->sha256_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation success             : %llu\n",
+					pstat->ahash_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation fail                : %llu\n",
+					pstat->ahash_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
+					cp->resp_start, cp->resp_stop,
+					cp->max_resp_qlen, cp->max_reorder_cnt);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   max queue length, no avail          : %u %u\n",
+					cp->max_qlen, cp->no_avail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   work queue                          : %u %u %u\n",
+					cp->queue_work_eng3,
+					cp->queue_work_not_eng3,
+					cp->queue_work_not_eng3_nz);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req max %d          : %llu\n",
+			pe->unit,
+			pe->max_req_used,
+			pe->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error               : %llu\n",
+			pe->unit,
+			pe->err_req
+		);
+		qce_get_driver_stats(pe->qce);
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	for (i = 0; i < MAX_SMP_CPU+1; i++)
+		if (cp->cpu_req[i])
+			len += scnprintf(
+				_debug_read_buf + len,
+				DEBUG_MAX_RW_BUF - len - 1,
+				"CPU %d Issue Req                     : %d\n",
+				i, cp->cpu_req[i]);
+	return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+	unsigned long flags;
+	struct crypto_engine *pe;
+
+	cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_del(&pengine->elist);
+	if (pengine->first_engine) {
+		cp->first_engine = NULL;
+		pe = list_first_entry(&cp->engine_list, struct crypto_engine,
+								elist);
+		if (pe) {
+			pe->first_engine = true;
+			cp->first_engine = pe;
+		}
+	}
+	if (cp->next_engine == pengine)
+		cp->next_engine = NULL;
+	if (cp->scheduled_eng == pengine)
+		cp->scheduled_eng = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	cp->total_units--;
+
+	cancel_work_sync(&pengine->bw_reaper_ws);
+	cancel_work_sync(&pengine->bw_allocate_ws);
+	del_timer_sync(&pengine->bw_reaper_timer);
+
+	if (pengine->icc_path)
+		icc_put(pengine->icc_path);
+	pengine->icc_path = NULL;
+
+	kfree_sensitive(pengine->preq_pool);
+
+	if (cp->total_units)
+		return;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_skcipher(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+			crypto_unregister_aead(&q_alg->aead_alg);
+		list_del(&q_alg->entry);
+		kfree_sensitive(q_alg);
+	}
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return 0;
+	cp = pengine->pcp;
+	mutex_lock(&cp->engine_lock);
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kfree_sensitive(pengine);
+	return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_priv *cp, unsigned int len)
+{
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+		else
+			return -EINVAL;
+	default:
+		//crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_skcipher *tfm,
+		const u8 *key)
+{
+	//struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int ret;
+
+	ctx->enc_key_len = AES_KEYSIZE_192;
+
+	crypto_sync_skcipher_clear_flags(ctx->cipher_aes192_fb,
+		CRYPTO_TFM_REQ_MASK);
+	crypto_sync_skcipher_set_flags(ctx->cipher_aes192_fb,
+		(crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK));
+
+	ret = crypto_sync_skcipher_setkey(ctx->cipher_aes192_fb, key,
+		AES_KEYSIZE_192);
+	/*
+	 * TODO: delete or find equivalent in new crypto_skcipher api
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	*/
+	return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_skcipher *tfm, const u8 *key,
+		unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+
+	if ((keylen == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+					&& ctx->cipher_aes192_fb)
+		return _qcrypto_setkey_aes_192_fallback(tfm, key);
+
+	if (_qcrypto_check_aes_keylen(cp, keylen))
+		return -EINVAL;
+
+	ctx->enc_key_len = keylen;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, keylen);
+		} else {
+			pr_err("%s Invalid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int _qcrypto_setkey_aes_xts(struct crypto_skcipher *tfm,
+		const u8 *key, unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+	if (_qcrypto_check_aes_keylen(cp, keylen/2))
+		return -EINVAL;
+
+	ctx->enc_key_len = keylen;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, keylen);
+		} else {
+			pr_err("%s Invalid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int _qcrypto_setkey_des(struct crypto_skcipher *tfm, const u8 *key,
+		unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct des_ctx dctx;
+
+	if (!key) {
+		pr_err("%s Invalid key pointer\n", __func__);
+		return -EINVAL;
+	}
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for DES algorithm\n", __func__);
+		return 0;
+	}
+
+	if (keylen != DES_KEY_SIZE) {
+		//crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	memset(&dctx, 0, sizeof(dctx));
+	/*Need to be fixed. Compilation error was seen with the below API.
+	Needs to be uncommented and enable
+	if (des_expand_key(&dctx, key, keylen) == -ENOKEY) {
+		if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
+			return -EINVAL;
+		else
+			return 0;
+	}*/
+
+	/*
+	 * TODO: delete of find equivalent in skcipher api
+	if (ret) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+	 */
+
+	ctx->enc_key_len = keylen;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+		memcpy(ctx->enc_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_setkey_3des(struct crypto_skcipher *tfm, const u8 *key,
+		unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for 3DES algorithm\n", __func__);
+		return 0;
+	}
+	if (keylen != DES3_EDE_KEY_SIZE) {
+		//crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->enc_key_len = keylen;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, keylen);
+		} else {
+			pr_err("%s Invalid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void seq_response(struct work_struct *work)
+{
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							 resp_work);
+	struct llist_node *list;
+	struct llist_node *rev = NULL;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	int total_unit;
+
+again:
+	list = llist_del_all(&cp->ordered_resp_list);
+
+	if (!list)
+		goto end;
+
+	while (list) {
+		struct llist_node *t = list;
+
+		list = llist_next(list);
+		t->next = rev;
+		rev = t;
+	}
+
+	while (rev) {
+		struct qcrypto_resp_ctx *arsp;
+		struct crypto_async_request *areq;
+
+		arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+		rev = llist_next(rev);
+
+		areq = arsp->async_req;
+		local_bh_disable();
+		areq->complete(areq, arsp->res);
+		local_bh_enable();
+		atomic_dec(&cp->resp_cnt);
+	}
+
+	if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
+		(cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
+						== STOPPED)) {
+		cp->resp_start++;
+		for (total_unit = cp->total_units; total_unit-- > 0;) {
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine = _avail_eng(cp);
+			spin_unlock_irqrestore(&cp->lock, flags);
+			if (pengine)
+				_start_qcrypto_process(cp, pengine);
+			else
+				break;
+		}
+	}
+end:
+	if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+				IS_SCHEDULED) == SCHEDULE_AGAIN)
+		goto again;
+	else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+				NOT_SCHEDULED) == SCHEDULE_AGAIN)
+		goto end;
+}
+
+#define SCHEUDLE_RSP_QLEN_THRESHOLD 64
+
+static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
+					void *tfm_ctx,
+					struct qcrypto_resp_ctx *cur_arsp,
+					int res)
+{
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	struct qcrypto_resp_ctx *arsp;
+	struct list_head *plist;
+	unsigned int resp_qlen;
+	unsigned int cnt = 0;
+
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	case CRYPTO_ALG_TYPE_SKCIPHER:
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	cur_arsp->res = res;
+	while (!list_empty(plist)) {
+		arsp = list_first_entry(plist,
+				struct qcrypto_resp_ctx, list);
+		if (arsp->res == -EINPROGRESS)
+			break;
+		list_del(&arsp->list);
+		llist_add(&arsp->llist, &cp->ordered_resp_list);
+		atomic_inc(&cp->resp_cnt);
+		cnt++;
+	}
+	resp_qlen = atomic_read(&cp->resp_cnt);
+	if (resp_qlen > cp->max_resp_qlen)
+		cp->max_resp_qlen = resp_qlen;
+	if (cnt > cp->max_reorder_cnt)
+		cp->max_reorder_cnt = cnt;
+	if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
+		cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
+						STOPPED) == IN_PROGRESS) {
+		cp->resp_stop++;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+	if (!llist_empty(&cp->ordered_resp_list)) {
+		unsigned int cpu;
+
+		if (pengine->first_engine) {
+			cpu = WORK_CPU_UNBOUND;
+			cp->queue_work_eng3++;
+		} else {
+			cp->queue_work_not_eng3++;
+			cpu = cp->cpu_getting_irqs_frm_first_ce;
+			/*
+			 * If source not the first engine, and there
+			 * are outstanding requests going on first engine,
+			 * skip scheduling of work queue to anticipate
+			 * more may be coming. If the response queue
+			 * length exceeds threshold, to avoid further
+			 * delay, schedule work queue immediately.
+			 */
+			if (cp->first_engine && atomic_read(
+						&cp->first_engine->req_count)) {
+				if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
+					return;
+				cp->queue_work_not_eng3_nz++;
+			}
+		}
+		if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+					IS_SCHEDULED) == NOT_SCHEDULED)
+			queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
+		else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+					SCHEDULE_AGAIN) == NOT_SCHEDULED)
+			goto retry;
+	}
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_engine *pengine;
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp;
+	struct qcrypto_resp_ctx *arsp;
+	u32 type = 0;
+	void *tfm_ctx = NULL;
+	unsigned int cpu;
+	int res;
+
+	pengine = pqcrypto_req_control->pce;
+	cp = pengine->pcp;
+	areq = pqcrypto_req_control->req;
+	arsp = pqcrypto_req_control->arsp;
+	res = pqcrypto_req_control->res;
+	qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+	if (areq) {
+		type = crypto_tfm_alg_type(areq->tfm);
+		tfm_ctx = crypto_tfm_ctx(areq->tfm);
+	}
+	cpu = smp_processor_id();
+	pengine->irq_cpu = cpu;
+	if (pengine->first_engine) {
+		if (cpu  != cp->cpu_getting_irqs_frm_first_ce)
+			cp->cpu_getting_irqs_frm_first_ce = cpu;
+	}
+	if (areq)
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
+	if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
+		_start_qcrypto_process(cp, pengine);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "%s: %pK ret %d\n",
+				__func__, areq, ret);
+#endif
+	if (digest) {
+		memcpy(rctx->digest, digest, diglen);
+		if (rctx->last_blk)
+			memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		rctx->byte_count[0] = auth32[0];
+		rctx->byte_count[1] = auth32[1];
+		rctx->byte_count[2] = auth32[2];
+		rctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	rctx->last_blk = 0;
+	rctx->first_blk = 0;
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ahash_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ahash_op_success++;
+	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+}
+
+static void _qce_sk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct skcipher_request *areq = (struct skcipher_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = skcipher_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "%s: %pK ret %d\n",
+			__func__, areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_skcipher_ivsize(tfm));
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->sk_cipher_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->sk_cipher_op_success++;
+	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		uint32_t num_sg = 0;
+		uint32_t bytes = 0;
+
+		rctx = skcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		num_sg = qcrypto_count_sg(areq->dst, areq->cryptlen);
+		bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+			rctx->data, areq->cryptlen);
+		if (bytes != areq->cryptlen)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n",
+				bytes, areq->cryptlen);
+		kfree_sensitive(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+}
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kfree_sensitive(rctx->adata);
+	} else {
+		uint32_t ivsize = crypto_aead_ivsize(aead);
+
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen + areq->assoclen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->assoclen +
+					areq->cryptlen - ctx->authsize,
+					ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, ivsize);
+	}
+
+	if (ret == (-EBADMSG))
+		pstat->aead_bad_msg++;
+	else if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	pqcrypto_req_control->res = ret;
+	req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen)
+{
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (assoclen)
+		qreq->nonce[0] |= 64;
+
+	if (i > MAX_NONCE)
+		return -EINVAL;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+				struct scatterlist *sg, unsigned char *adata)
+{
+	uint32_t len;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+		if ((alen >= 65280) && (alen <= 0xffffffff)) {
+			*(__be16 *)adata = cpu_to_be16(0xfffe);
+			*(__be32 *)&adata[2] = cpu_to_be32(alen);
+			len = 6;
+		} else {
+			*(__be16 *)adata = cpu_to_be16(0xffff);
+			*(__be32 *)&adata[6] = cpu_to_be32(alen);
+			len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+
+	num_sg = qcrypto_count_sg(sg, alen);
+	bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+	if (bytes != alen)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", bytes, alen);
+
+	return 0;
+}
+
+static int _qcrypto_process_skcipher(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct skcipher_request *req;
+	struct crypto_skcipher *tfm;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct skcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = skcipher_request_ctx(req);
+	rctx->pengine = pengine;
+	tfm = crypto_skcipher_reqtfm(req);
+	if (pengine->pcp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		uint32_t num_sg = 0;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->cryptlen + 64), GFP_ATOMIC);
+		if (rctx->data == NULL)
+			return -ENOMEM;
+		num_sg = qcrypto_count_sg(req->src, req->cryptlen);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+								req->cryptlen);
+		if (bytes != req->cryptlen)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n",
+							bytes, req->cryptlen);
+		sg_set_buf(&rctx->dsg, rctx->data, req->cryptlen);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->iv;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER; //TODO: change name in qcedev.h
+	qreq.qce_cb = _qce_sk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->iv;
+	qreq.ivsize = crypto_skcipher_ivsize(tfm);
+	qreq.cryptlen = req->cryptlen;
+	qreq.use_pmem = 0;
+	qreq.flags = cipher_ctx->flags;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(pengine->pcp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(pengine->qce, &qreq); //maybe change name?
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_req_ctx *rctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req,
+				struct ahash_request, base);
+	rctx = ahash_request_ctx(req);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx->pengine = pengine;
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &rctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = rctx->byte_count[0];
+	sreq.auth_data[1] = rctx->byte_count[1];
+	sreq.auth_data[2] = rctx->byte_count[2];
+	sreq.auth_data[3] = rctx->byte_count[3];
+	sreq.first_blk = rctx->first_blk;
+	sreq.last_blk = rctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+	sreq.flags = sha_ctx->flags;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting\n", sha_ctx->alg);
+		ret = -1;
+		break;
+	}
+	ret =  qce_process_sha_req(pengine->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct  crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req;
+	struct crypto_aead *aead;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct aead_request, base);
+	aead = crypto_aead_reqtfm(req);
+	rctx = aead_request_ctx(req);
+	rctx->pengine = pengine;
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.auth_alg = cipher_ctx->auth_alg;
+	if (qreq.mode == QCE_MODE_CCM)
+		qreq.ivsize =  AES_BLOCK_SIZE;
+	else
+		qreq.ivsize =  crypto_aead_ivsize(aead);
+	qreq.flags = cipher_ctx->flags;
+
+	if (qreq.mode == QCE_MODE_CCM) {
+		uint32_t assoclen;
+
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+
+		/* if rfc4309 ccm, adjust assoclen */
+		assoclen = req->assoclen;
+		if (rctx->ccmtype)
+			assoclen -= 8;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen);
+		if (ret)
+			return ret;
+
+		if (assoclen) {
+			rctx->adata = kzalloc((assoclen + 0x64),
+								GFP_ATOMIC);
+			if (!rctx->adata)
+				return -ENOMEM;
+			/* Format Associated data    */
+			ret = qcrypto_aead_ccm_format_adata(&qreq,
+						assoclen,
+						req->src,
+						rctx->adata);
+		} else {
+			qreq.assoclen = 0;
+			rctx->adata = NULL;
+		}
+		if (ret) {
+			kfree_sensitive(rctx->adata);
+			return ret;
+		}
+
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		qreq.asg = &rctx->asg;
+		if (rctx->adata)
+			sg_set_buf(qreq.asg, rctx->adata,
+					qreq.assoclen);
+		sg_mark_end(qreq.asg);
+	}
+	ret =  qce_aead_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp)
+{
+	struct crypto_engine *pengine;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->next_engine)
+		pengine = cp->next_engine;
+	else
+		pengine = list_first_entry(&cp->engine_list,
+				struct crypto_engine, elist);
+
+	if (list_is_last(&pengine->elist, &cp->engine_list))
+		cp->next_engine = list_first_entry(
+			&cp->engine_list, struct crypto_engine, elist);
+	else
+		cp->next_engine = list_next_entry(pengine, elist);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+				struct crypto_engine *pengine)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog_eng = NULL;
+	struct crypto_async_request *backlog_cp = NULL;
+	unsigned long flags;
+	u32 type;
+	int ret = 0;
+	struct crypto_stat *pstat;
+	void *tfm_ctx;
+	struct qcrypto_cipher_req_ctx *cipher_rctx;
+	struct qcrypto_sha_req_ctx *ahash_rctx;
+	struct skcipher_request *skcipher_req;
+	struct ahash_request *ahash_req;
+	struct aead_request *aead_req;
+	struct qcrypto_resp_ctx *arsp;
+	struct qcrypto_req_control *pqcrypto_req_control;
+	unsigned int cpu = MAX_SMP_CPU;
+
+	if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED)
+		return 0;
+
+	if (in_interrupt()) {
+		cpu = smp_processor_id();
+		if (cpu >= MAX_SMP_CPU)
+			cpu = MAX_SMP_CPU - 1;
+	} else
+		cpu = MAX_SMP_CPU;
+
+	pstat = &_qcrypto_stat;
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->issue_req ||
+		atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+	/* make sure it is in high bandwidth state */
+	if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* try to get request from request queue of the engine first */
+	async_req = crypto_dequeue_request(&pengine->req_queue);
+	if (!async_req) {
+		/*
+		 * if no request from the engine,
+		 * try to  get from request queue of driver
+		 */
+		backlog_cp = crypto_get_backlog(&cp->req_queue);
+		async_req = crypto_dequeue_request(&cp->req_queue);
+		if (!async_req) {
+			spin_unlock_irqrestore(&cp->lock, flags);
+			return 0;
+		}
+	}
+	pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("Allocation of request failed\n");
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* add associated rsp entry to tfm response queue */
+	type = crypto_tfm_alg_type(async_req->tfm);
+	tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		ahash_req = container_of(async_req,
+			struct ahash_request, base);
+		ahash_rctx = ahash_request_ctx(ahash_req);
+		arsp = &ahash_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_sha_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_SKCIPHER:
+		skcipher_req = container_of(async_req,
+			struct skcipher_request, base);
+		cipher_rctx = skcipher_request_ctx(skcipher_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		aead_req = container_of(async_req,
+			struct aead_request, base);
+		cipher_rctx = aead_request_ctx(aead_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	}
+
+	arsp->res = -EINPROGRESS;
+	arsp->async_req = async_req;
+	pqcrypto_req_control->pce = pengine;
+	pqcrypto_req_control->req = async_req;
+	pqcrypto_req_control->arsp = arsp;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+
+	pengine->issue_req = true;
+	cp->cpu_req[cpu]++;
+	smp_mb(); /* make it visible */
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (backlog_eng)
+		backlog_eng->complete(backlog_eng, -EINPROGRESS);
+	if (backlog_cp)
+		backlog_cp->complete(backlog_cp, -EINPROGRESS);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_SKCIPHER:
+		ret = _qcrypto_process_skcipher(pengine, pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	pengine->issue_req = false;
+	smp_mb(); /* make it visible */
+
+	pengine->total_req++;
+	if (ret) {
+		pengine->err_req++;
+		qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+		if (type == CRYPTO_ALG_TYPE_SKCIPHER)
+			pstat->sk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->ahash_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
+		goto again;
+	}
+	return ret;
+}
+
+static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
+		struct crypto_engine *p)
+{
+
+	if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
+		p =  list_first_entry(&cp->engine_list, struct crypto_engine,
+			elist);
+	else
+		p = list_entry(p->elist.next, struct crypto_engine, elist);
+	return p;
+}
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+	/* call this function with spinlock set */
+	struct crypto_engine *q = NULL;
+	struct crypto_engine *p = cp->scheduled_eng;
+	struct crypto_engine *q1;
+	int eng_cnt = cp->total_units;
+
+	if (unlikely(list_empty(&cp->engine_list))) {
+		pr_err("%s: no valid ce to schedule\n", __func__);
+		return NULL;
+	}
+
+	p = _next_eng(cp, p);
+	q1 = p;
+	while (eng_cnt-- > 0) {
+		if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
+			q = p;
+			break;
+		}
+		p = _next_eng(cp, p);
+		if (q1 == p)
+			break;
+	}
+	cp->scheduled_eng = q;
+	return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_engine *pengine,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	if (pengine) {
+		ret = crypto_enqueue_request(&pengine->req_queue, req);
+	} else {
+		ret = crypto_enqueue_request(&cp->req_queue, req);
+		pengine = _avail_eng(cp);
+		if (cp->req_queue.qlen > cp->max_qlen)
+			cp->max_qlen = cp->req_queue.qlen;
+	}
+	if (pengine) {
+		switch (pengine->bw_state) {
+		case BUS_NO_BANDWIDTH:
+			if (!pengine->high_bw_req) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+			pengine = NULL;
+			break;
+		case BUS_HAS_BANDWIDTH:
+			break;
+		case BUS_BANDWIDTH_RELEASING:
+			pengine->high_bw_req = true;
+			pengine = NULL;
+			break;
+		case BUS_BANDWIDTH_ALLOCATING:
+			pengine = NULL;
+			break;
+		case BUS_SUSPENDED:
+		case BUS_SUSPENDING:
+		default:
+			pengine = NULL;
+			break;
+		}
+	} else {
+		cp->no_avail++;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
+		_start_qcrypto_process(cp, pengine);
+	return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int err;
+
+	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_sync_tfm(subreq, ctx->cipher_aes192_fb);
+
+	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+		req->cryptlen, req->iv);
+	err = crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	int err;
+
+	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_sync_tfm(subreq, ctx->cipher_aes192_fb);
+
+	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+		req->cryptlen, req->iv);
+	err = crypto_skcipher_decrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_aes_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_aes_ctr(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->sk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_aes_xts(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+	struct crypto_priv *cp = ctx->cp;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->sk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
+
+	pstat->aead_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_3des_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_3des_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_aes_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_aes_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_aes_ctr(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->sk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_des_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_des_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_3des_ecb(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->sk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_3des_cbc(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->sk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_dec_aes_xts(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER);
+	rctx = skcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->sk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+	rctx->ccmtype = 0;
+
+	pstat->aead_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->ccmtype = 1;
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+	int ret;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+			ctx->ahash_aead_aes192_fb) {
+		crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
+		ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
+					ctx->auth_key, ctx->auth_key_len);
+		if (ret)
+			goto badkey;
+		crypto_sync_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
+		ret = crypto_sync_skcipher_setkey(ctx->cipher_aes192_fb,
+					ctx->enc_key, ctx->enc_key_len);
+		if (ret)
+			goto badkey;
+	}
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	//crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192) {
+			break;
+		}
+		else {
+			ctx->enc_key_len = 0;
+			return -EINVAL;
+		}
+	default:
+		ctx->enc_key_len = 0;
+		//crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+				 const u8 *key, unsigned int key_len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+		return -EINVAL;
+	key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+	memcpy(ctx->ccm4309_nonce, key + key_len,  QCRYPTO_CCM4309_NONCE_LEN);
+	ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+	return ret;
+}
+
+static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
+								int res)
+{
+	struct aead_request *req;
+	struct crypto_async_request *areq;
+
+	req = rctx->aead_req;
+	areq = &req->base;
+	if (rctx->fb_aes_req)
+		skcipher_request_free(rctx->fb_aes_req);
+	if (rctx->fb_hash_req)
+		ahash_request_free(rctx->fb_hash_req);
+	rctx->fb_aes_req = NULL;
+	rctx->fb_hash_req = NULL;
+	kfree(rctx->fb_aes_iv);
+	areq->complete(areq, res);
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _aead_aes_fb_stage2_ahash_complete(void *data, int err)
+#else
+static void _aead_aes_fb_stage2_ahash_complete(
+		struct crypto_async_request *base, int err)
+#endif
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+	rctx = data;
+#else
+	rctx = base->data;
+#endif
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	/* copy icv */
+	if (err == 0)
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct ahash_request *ahash_req;
+
+	ahash_req = rctx->fb_hash_req;
+	ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				 _aead_aes_fb_stage2_ahash_complete, rctx);
+
+	return crypto_ahash_digest(ahash_req);
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _aead_aes_fb_stage2_decrypt_complete(void *data, int err)
+#else
+static void _aead_aes_fb_stage2_decrypt_complete(
+		struct crypto_async_request *base, int err)
+#endif
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+	rctx = data;
+#else
+	rctx = base->data;
+#endif
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_decrypt(
+					struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct skcipher_request *aes_req;
+
+	aes_req = rctx->fb_aes_req;
+	skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage2_decrypt_complete, rctx);
+	return crypto_skcipher_decrypt(aes_req);
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _aead_aes_fb_stage1_ahash_complete(void *data, int err)
+#else
+static void _aead_aes_fb_stage1_ahash_complete(
+		struct crypto_async_request *base, int err)
+#endif
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+	rctx = data;
+#else
+	rctx = base->data;
+#endif
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	/* compare icv */
+	if (err == 0) {
+		unsigned char *tmp;
+
+		tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+		if (!tmp) {
+			err = -ENOMEM;
+			goto ret;
+		}
+		scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
+			req->cryptlen - ctx->authsize, ctx->authsize, 0);
+		if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
+			err = -EBADMSG;
+		kfree(tmp);
+	}
+ret:
+	if (err)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	else {
+		err = _start_aead_aes_fb_stage2_decrypt(rctx);
+		if (err != -EINPROGRESS &&  err != -EBUSY)
+			_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	}
+
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _aead_aes_fb_stage1_encrypt_complete(void *data, int err)
+#else
+static void _aead_aes_fb_stage1_encrypt_complete(
+		struct crypto_async_request *base, int err)
+#endif
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+	rctx = data;
+#else
+	rctx = base->data;
+#endif
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+
+	if (err) {
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+		return;
+	}
+
+	err = _start_aead_aes_fb_stage2_hmac(rctx);
+
+	/* copy icv */
+	if (err == 0) {
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	}
+	if (err != -EINPROGRESS &&  err != -EBUSY)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
+							bool is_encrypt)
+{
+	int rc = -EINVAL;
+	struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
+	struct skcipher_request *aes_req = NULL;
+	struct ahash_request *ahash_req = NULL;
+	int nbytes;
+	struct scatterlist *src, *dst;
+
+	rctx->fb_aes_iv = NULL;
+	aes_req = skcipher_request_alloc(&ctx->cipher_aes192_fb->base,
+						GFP_KERNEL);
+	if (!aes_req)
+		return -ENOMEM;
+	ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
+	if (!ahash_req)
+		goto ret;
+	rctx->fb_aes_req = aes_req;
+	rctx->fb_hash_req = ahash_req;
+	rctx->aead_req = req;
+	/* assoc and iv are sitting in the beginning of src sg list */
+	/* Similarly, assoc and iv are sitting in the beginning of dst list */
+	src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
+				req->assoclen);
+	dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
+				req->assoclen);
+
+	nbytes = req->cryptlen;
+	if (!is_encrypt)
+		nbytes -=  ctx->authsize;
+	rctx->fb_ahash_length = nbytes +  req->assoclen;
+	rctx->fb_aes_src = src;
+	rctx->fb_aes_dst = dst;
+	rctx->fb_aes_cryptlen = nbytes;
+	rctx->ivsize = crypto_aead_ivsize(aead_tfm);
+	rctx->fb_aes_iv = kmemdup(req->iv, rctx->ivsize, GFP_ATOMIC);
+	if (!rctx->fb_aes_iv)
+		goto ret;
+	skcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
+					rctx->fb_aes_dst,
+					rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
+	if (is_encrypt)
+		ahash_request_set_crypt(ahash_req, req->dst,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+	else
+		ahash_request_set_crypt(ahash_req, req->src,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+
+	if (is_encrypt) {
+
+		skcipher_request_set_callback(aes_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage1_encrypt_complete, rctx);
+
+		rc = crypto_skcipher_encrypt(aes_req);
+		if (rc == 0) {
+			memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+			rc = _start_aead_aes_fb_stage2_hmac(rctx);
+			if (rc == 0) {
+				/* copy icv */
+				scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+			}
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+
+	} else {
+		ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_aead_aes_fb_stage1_ahash_complete, rctx);
+
+		rc = crypto_ahash_digest(ahash_req);
+		if (rc == 0) {
+			unsigned char *tmp;
+
+			tmp = kmalloc(ctx->authsize, GFP_KERNEL);
+			if (!tmp) {
+				rc = -ENOMEM;
+				goto ret;
+			}
+			/* compare icv */
+			scatterwalk_map_and_copy(tmp,
+				src, req->cryptlen - ctx->authsize,
+				ctx->authsize, 0);
+			if (memcmp(rctx->fb_ahash_digest, tmp,
+							ctx->authsize) != 0)
+				rc = -EBADMSG;
+			else
+				rc = _start_aead_aes_fb_stage2_decrypt(rctx);
+			kfree(tmp);
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+	}
+ret:
+	if (aes_req)
+		skcipher_request_free(aes_req);
+	if (ahash_req)
+		ahash_request_free(ahash_req);
+	kfree(rctx->fb_aes_iv);
+	return rc;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_enc++;
+	else
+		pstat->aead_sha256_aes_enc++;
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, true);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_dec++;
+	else
+		pstat->aead_sha256_aes_dec++;
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, false);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_enc++;
+	else
+		pstat->aead_sha256_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_dec++;
+	else
+		pstat->aead_sha256_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_enc++;
+	else
+		pstat->aead_sha256_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_dec++;
+	else
+		pstat->aead_sha256_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	rctx->first_blk = 1;
+	rctx->last_blk = 0;
+	rctx->byte_count[0] = 0;
+	rctx->byte_count[1] = 0;
+	rctx->byte_count[2] = 0;
+	rctx->byte_count[3] = 0;
+	rctx->trailing_buf_len = 0;
+	rctx->count = 0;
+
+	return 0;
+}
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	pstat->sha1_digest++;
+	return 0;
+}
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	pstat->sha256_digest++;
+	return 0;
+}
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+	memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int _sha1_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request  *req, const void *in,
+				bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+	return 0;
+}
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+	memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int _sha256_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request  *req, const void *in,
+			bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * for hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+
+	return 0;
+}
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+	if (srctx->data == NULL) {
+		pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+				PTR_ERR(srctx->data), (req->nbytes + 64));
+		return -ENOMEM;
+	}
+
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+	bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+						req->nbytes);
+	if (bytes != req->nbytes)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", bytes,
+							req->nbytes);
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+
+	return 0;
+}
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, num_sg;
+	struct scatterlist *sg_last;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes;
+	uint32_t offset = 0;
+	uint32_t bytes = 0;
+	uint8_t  *staging;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + rctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+		num_sg = qcrypto_count_sg(req->src, len);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+		rctx->trailing_buf_len = total;
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	k_src = &rctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	offset = req->nbytes - trailing_buf_len;
+
+	if (offset != req->nbytes)
+		scatterwalk_map_and_copy(k_src, req->src, offset,
+						trailing_buf_len, 0);
+
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = rctx->trailing_buf_len;
+	sg_last = req->src;
+
+	while (len < nbytes) {
+		if ((len + sg_last->length) > nbytes)
+			break;
+		len += sg_last->length;
+		sg_last = sg_next(sg_last);
+	}
+	if (rctx->trailing_buf_len) {
+		if (cp->ce_support.aligned_only)  {
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+			if (rctx->data2 == NULL)
+				return -ENOMEM;
+			memcpy(rctx->data2, staging,
+						rctx->trailing_buf_len);
+			memcpy((rctx->data2 + rctx->trailing_buf_len),
+					rctx->data, req->src->length);
+			kfree_sensitive(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&rctx->sg[0], rctx->data,
+					(rctx->trailing_buf_len +
+							req->src->length));
+			req->src = rctx->sg;
+			sg_mark_end(&rctx->sg[0]);
+		} else {
+			sg_mark_end(sg_last);
+			memset(rctx->sg, 0, sizeof(rctx->sg));
+			sg_set_buf(&rctx->sg[0], staging,
+						rctx->trailing_buf_len);
+			sg_mark_end(&rctx->sg[1]);
+			sg_chain(rctx->sg, 2, req->src);
+			req->src = rctx->sg;
+		}
+	} else
+		sg_mark_end(sg_last);
+
+	req->nbytes = nbytes;
+	rctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+	uint8_t  *staging;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	req->src = &rctx->sg[0];
+	req->nbytes = rctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+	rctx->first_blk = 1;
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+#if (KERNEL_VERSION(6, 2, 0) <= LINUX_VERSION_CODE)
+static void _crypto_sha_hmac_ahash_req_complete(void *data, int err)
+{
+	struct completion *ahash_req_complete = data;
+#else
+static void _crypto_sha_hmac_ahash_req_complete(
+		struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+#endif
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	uint8_t	*in_buf;
+	int ret = 0;
+	struct scatterlist sg = {0};
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+
+	ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (ahash_req == NULL)
+		return -ENOMEM;
+	init_completion(&ahash_req_complete);
+	ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&ahash_req_complete);
+	crypto_ahash_clear_flags(tfm, ~0);
+
+	in_buf = kzalloc(len + 64, GFP_KERNEL);
+	if (in_buf == NULL) {
+		ahash_request_free(ahash_req);
+		return -ENOMEM;
+	}
+	memcpy(in_buf, key, len);
+	sg_set_buf(&sg, in_buf, len);
+	sg_mark_end(&sg);
+
+	ahash_request_set_crypt(ahash_req, &sg,
+				&sha_ctx->authkey[0], len);
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		ret = _sha1_digest(ahash_req);
+	else
+		ret = _sha256_digest(ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	kfree_sensitive(in_buf);
+	ahash_request_free(ahash_req);
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+
+	memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+	if (len <= SHA1_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		ret = _sha_hmac_setkey(tfm, key, len);
+		if (ret)
+			pr_err("SHA1 hmac setkey failed\n");
+		sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+	}
+	return ret;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	int ret = 0;
+
+	memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+	if (len <= SHA256_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		ret = _sha_hmac_setkey(tfm, key, len);
+		if (ret)
+			pr_err("SHA256 hmac setkey failed\n");
+		sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+	}
+
+	return ret;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	rctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+	uint8_t  *staging;
+	uint8_t *p;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	p = staging;
+	for (i = 0; i < sha_block_size; i++)
+		*p++ = sha_ctx->authkey[i] ^ 0x5c;
+	memcpy(p, &rctx->digest[0], sha_digest_size);
+	sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&rctx->sg[0]);
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	req->src = &rctx->sg[0];
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(req);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	rctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint8_t  *staging;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+						rctx->trailing_buf_len);
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+	if (ret)
+		return ret;
+	return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+
+	ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+	if (ret)
+		return ret;
+
+	return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+	char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+
+	if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+		return -EINVAL;
+	strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+	strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+	return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev,
+			u32 hw_inst)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(!cp->platform_support.hw_key_support)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(!cp->platform_support.hw_key_support)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(!cp->platform_support.hw_key_support)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct skcipher_request *req,
+							unsigned int flags)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+}
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+}
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init   = _sha1_init,
+		.update = _sha1_update,
+		.final  = _sha1_final,
+		.digest = _sha1_digest,
+		.export = _sha1_export,
+		.import = _sha1_import,
+		.halg   = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize  = sizeof(struct sha1_state),
+			.base       = {
+				.cra_name           = "sha1",
+				.cra_driver_name    = "qcrypto-sha1",
+				.cra_priority       = 300,
+				.cra_flags          = CRYPTO_ALG_ASYNC,
+				.cra_blocksize      = SHA1_BLOCK_SIZE,
+				.cra_ctxsize        = sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask      = 0,
+				.cra_module         = THIS_MODULE,
+				.cra_init           = _qcrypto_ahash_cra_init,
+				.cra_exit           = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init   = _sha256_init,
+		.update = _sha256_update,
+		.final  = _sha256_final,
+		.digest = _sha256_digest,
+		.export = _sha256_export,
+		.import = _sha256_import,
+		.halg   = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize  = sizeof(struct sha256_state),
+			.base       = {
+				.cra_name           = "sha256",
+				.cra_driver_name    = "qcrypto-sha256",
+				.cra_priority       = 300,
+				.cra_flags          = CRYPTO_ALG_ASYNC,
+				.cra_blocksize      = SHA256_BLOCK_SIZE,
+				.cra_ctxsize        = sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask      = 0,
+				.cra_module         = THIS_MODULE,
+				.cra_init           = _qcrypto_ahash_cra_init,
+				.cra_exit           = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init   = _sha1_hmac_init,
+		.update = _sha1_hmac_update,
+		.final  = _sha1_hmac_final,
+		.export = _sha1_hmac_export,
+		.import = _sha1_hmac_import,
+		.digest = _sha1_hmac_digest,
+		.setkey = _sha1_hmac_setkey,
+		.halg   = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.statesize  = sizeof(struct sha1_state),
+			.base       = {
+				.cra_name           = "hmac(sha1)",
+				.cra_driver_name    = "qcrypto-hmac-sha1",
+				.cra_priority       = 300,
+				.cra_flags          = CRYPTO_ALG_ASYNC,
+				.cra_blocksize      = SHA1_BLOCK_SIZE,
+				.cra_ctxsize        = sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask      = 0,
+				.cra_module         = THIS_MODULE,
+				.cra_init           = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit           = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init   = _sha256_hmac_init,
+		.update = _sha256_hmac_update,
+		.final  = _sha256_hmac_final,
+		.export = _sha256_hmac_export,
+		.import = _sha256_hmac_import,
+		.digest = _sha256_hmac_digest,
+		.setkey = _sha256_hmac_setkey,
+		.halg   = {
+			.digestsize = SHA256_DIGEST_SIZE,
+			.statesize  = sizeof(struct sha256_state),
+			.base       = {
+				.cra_name           = "hmac(sha256)",
+				.cra_driver_name    = "qcrypto-hmac-sha256",
+				.cra_priority       = 300,
+				.cra_flags          = CRYPTO_ALG_ASYNC,
+				.cra_blocksize      = SHA256_BLOCK_SIZE,
+				.cra_ctxsize        = sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask      = 0,
+				.cra_module         = THIS_MODULE,
+				.cra_init           = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit           = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct skcipher_alg _qcrypto_sk_cipher_algos[] = {
+	{
+		.setkey         = _qcrypto_setkey_aes,
+		.encrypt        = _qcrypto_enc_aes_ecb,
+		.decrypt        = _qcrypto_dec_aes_ecb,
+		.init           = _qcrypto_aes_skcipher_init,
+		.exit           = _qcrypto_aes_skcipher_exit,
+		.min_keysize    = AES_MIN_KEY_SIZE,
+		.max_keysize    = AES_MAX_KEY_SIZE,
+		.base           = {
+			.cra_name           = "ecb(aes)",
+			.cra_driver_name    = "qcrypto-ecb-aes",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = AES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_aes,
+		.encrypt        = _qcrypto_enc_aes_cbc,
+		.decrypt        = _qcrypto_dec_aes_cbc,
+		.init           = _qcrypto_aes_skcipher_init,
+		.exit           = _qcrypto_aes_skcipher_exit,
+		.min_keysize    = AES_MIN_KEY_SIZE,
+		.max_keysize    = AES_MAX_KEY_SIZE,
+		.ivsize         = AES_BLOCK_SIZE,
+		.base           = {
+			.cra_name           = "cbc(aes)",
+			.cra_driver_name    = "qcrypto-cbc-aes",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = AES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_aes,
+		.encrypt        = _qcrypto_enc_aes_ctr,
+		.decrypt        = _qcrypto_dec_aes_ctr,
+		.init           = _qcrypto_aes_skcipher_init,
+		.exit           = _qcrypto_aes_skcipher_exit,
+		.min_keysize    = AES_MIN_KEY_SIZE,
+		.max_keysize    = AES_MAX_KEY_SIZE,
+		.ivsize         = AES_BLOCK_SIZE,
+		.base           = {
+			.cra_name           = "ctr(aes)",
+			.cra_driver_name    = "qcrypto-ctr-aes",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = AES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_des,
+		.encrypt        = _qcrypto_enc_des_ecb,
+		.decrypt        = _qcrypto_dec_des_ecb,
+		.init           = _qcrypto_skcipher_init,
+		.exit           = _qcrypto_skcipher_exit,
+		.min_keysize    = DES_KEY_SIZE,
+		.max_keysize    = DES_KEY_SIZE,
+		.base           = {
+			.cra_name           = "ecb(des)",
+			.cra_driver_name    = "qcrypto-ecb-des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_des,
+		.encrypt        = _qcrypto_enc_des_cbc,
+		.decrypt        = _qcrypto_dec_des_cbc,
+		.init           = _qcrypto_skcipher_init,
+		.exit           = _qcrypto_skcipher_exit,
+		.min_keysize    = DES_KEY_SIZE,
+		.max_keysize    = DES_KEY_SIZE,
+		.ivsize         = DES_BLOCK_SIZE,
+		.base           = {
+			.cra_name           = "cbc(des)",
+			.cra_driver_name    = "qcrypto-cbc-des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_3des,
+		.encrypt        = _qcrypto_enc_3des_ecb,
+		.decrypt        = _qcrypto_dec_3des_ecb,
+		.init           = _qcrypto_skcipher_init,
+		.exit           = _qcrypto_skcipher_exit,
+		.min_keysize    = DES3_EDE_KEY_SIZE,
+		.max_keysize    = DES3_EDE_KEY_SIZE,
+		.base           = {
+			.cra_name           = "ecb(des3_ede)",
+			.cra_driver_name    = "qcrypto-ecb-3des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_setkey_3des,
+		.encrypt        = _qcrypto_enc_3des_cbc,
+		.decrypt        = _qcrypto_dec_3des_cbc,
+		.init           = _qcrypto_skcipher_init,
+		.exit           = _qcrypto_skcipher_exit,
+		.min_keysize    = DES3_EDE_KEY_SIZE,
+		.max_keysize    = DES3_EDE_KEY_SIZE,
+		.ivsize         = DES3_EDE_BLOCK_SIZE,
+		.base           = {
+			.cra_name           = "cbc(des3_ede)",
+			.cra_driver_name    = "qcrypto-cbc-3des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+};
+
+static struct skcipher_alg _qcrypto_sk_cipher_xts_algo = {
+	.setkey         = _qcrypto_setkey_aes_xts,
+	.encrypt        = _qcrypto_enc_aes_xts,
+	.decrypt        = _qcrypto_dec_aes_xts,
+	.init           = _qcrypto_skcipher_init,
+	.exit           = _qcrypto_skcipher_exit,
+	.min_keysize    = AES_MIN_KEY_SIZE,
+	.max_keysize    = AES_MAX_KEY_SIZE,
+	.ivsize         = AES_BLOCK_SIZE,
+	.base           = {
+		.cra_name           = "xts(aes)",
+		.cra_driver_name    = "qcrypto-xts-aes",
+		.cra_priority       = 300,
+		.cra_flags          = CRYPTO_ALG_ASYNC,
+		.cra_blocksize      = AES_BLOCK_SIZE,
+		.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask      = 0,
+		.cra_module         = THIS_MODULE,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_aes_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_aes_cbc,
+		.init           = _qcrypto_cra_aead_aes_sha1_init,
+		.exit           = _qcrypto_cra_aead_aes_exit,
+		.ivsize         = AES_BLOCK_SIZE,
+		.maxauthsize    = SHA1_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha1),cbc(aes))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha1-cbc-aes",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = AES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_des_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_des_cbc,
+		.init           = _qcrypto_cra_aead_sha1_init,
+		.exit           = _qcrypto_cra_aead_exit,
+		.ivsize         = DES_BLOCK_SIZE,
+		.maxauthsize    = SHA1_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha1),cbc(des))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha1-cbc-des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module	        = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_3des_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_3des_cbc,
+		.init           = _qcrypto_cra_aead_sha1_init,
+		.exit           = _qcrypto_cra_aead_exit,
+		.ivsize         = DES3_EDE_BLOCK_SIZE,
+		.maxauthsize    = SHA1_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha1-cbc-3des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_aes_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_aes_cbc,
+		.init           = _qcrypto_cra_aead_aes_sha256_init,
+		.exit           = _qcrypto_cra_aead_aes_exit,
+		.ivsize         = AES_BLOCK_SIZE,
+		.maxauthsize    = SHA256_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha256),cbc(aes))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha256-cbc-aes",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = AES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_des_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_des_cbc,
+		.init           = _qcrypto_cra_aead_sha256_init,
+		.exit           = _qcrypto_cra_aead_exit,
+		.ivsize         = DES_BLOCK_SIZE,
+		.maxauthsize    = SHA256_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha256),cbc(des))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha256-cbc-des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+	{
+		.setkey         = _qcrypto_aead_setkey,
+		.setauthsize    = _qcrypto_aead_setauthsize,
+		.encrypt        = _qcrypto_aead_encrypt_3des_cbc,
+		.decrypt        = _qcrypto_aead_decrypt_3des_cbc,
+		.init           = _qcrypto_cra_aead_sha256_init,
+		.exit           = _qcrypto_cra_aead_exit,
+		.ivsize         = DES3_EDE_BLOCK_SIZE,
+		.maxauthsize    = SHA256_DIGEST_SIZE,
+		.base           = {
+			.cra_name           = "authenc(hmac(sha256),cbc(des3_ede))",
+			.cra_driver_name    = "qcrypto-aead-hmac-sha256-cbc-3des",
+			.cra_priority       = 300,
+			.cra_flags          = CRYPTO_ALG_ASYNC,
+			.cra_blocksize      = DES3_EDE_BLOCK_SIZE,
+			.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+			.cra_alignmask      = 0,
+			.cra_module         = THIS_MODULE,
+		},
+	},
+};
+
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+	.setkey         = _qcrypto_aead_ccm_setkey,
+	.setauthsize    = _qcrypto_aead_ccm_setauthsize,
+	.encrypt        = _qcrypto_aead_encrypt_aes_ccm,
+	.decrypt        = _qcrypto_aead_decrypt_aes_ccm,
+	.init           = _qcrypto_cra_aead_ccm_init,
+	.exit           = _qcrypto_cra_aead_exit,
+	.ivsize         = AES_BLOCK_SIZE,
+	.maxauthsize    = AES_BLOCK_SIZE,
+	.base           = {
+		.cra_name           = "ccm(aes)",
+		.cra_driver_name    = "qcrypto-aes-ccm",
+		.cra_priority       = 300,
+		.cra_flags          = CRYPTO_ALG_ASYNC,
+		.cra_blocksize      = AES_BLOCK_SIZE,
+		.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask      = 0,
+		.cra_module         = THIS_MODULE,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+	.setkey         = _qcrypto_aead_rfc4309_ccm_setkey,
+	.setauthsize    = _qcrypto_aead_rfc4309_ccm_setauthsize,
+	.encrypt        = _qcrypto_aead_rfc4309_enc_aes_ccm,
+	.decrypt        = _qcrypto_aead_rfc4309_dec_aes_ccm,
+	.init           = _qcrypto_cra_aead_rfc4309_ccm_init,
+	.exit           = _qcrypto_cra_aead_exit,
+	.ivsize         = 8,
+	.maxauthsize    = 16,
+	.base           = {
+		.cra_name           = "rfc4309(ccm(aes))",
+		.cra_driver_name    = "qcrypto-rfc4309-aes-ccm",
+		.cra_priority       = 300,
+		.cra_flags          = CRYPTO_ALG_ASYNC,
+		.cra_blocksize      = 1,
+		.cra_ctxsize        = sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask      = 0,
+		.cra_module         = THIS_MODULE,
+	},
+};
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp = &qcrypto_dev;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+	pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+	if (!pengine)
+		return -ENOMEM;
+
+	pengine->icc_path = of_icc_get(&pdev->dev, "data_path");
+	if (IS_ERR(pengine->icc_path)) {
+		dev_err(&pdev->dev, "failed to get icc path\n");
+		rc = PTR_ERR(pengine->icc_path);
+		goto exit_kzfree;
+	}
+	pengine->bw_state = BUS_NO_BANDWIDTH;
+
+	rc = icc_set_bw(pengine->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set high bandwidth\n");
+		goto exit_kzfree;
+	}
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		rc = -ENODEV;
+		goto exit_free_pdata;
+	}
+	rc = icc_set_bw(pengine->icc_path, 0, 0);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set low bandwidth\n");
+		goto exit_qce_close;
+	}
+
+	platform_set_drvdata(pdev, pengine);
+	pengine->qce = handle;
+	pengine->pcp = cp;
+	pengine->pdev = pdev;
+	pengine->signature = 0xdeadbeef;
+
+	timer_setup(&(pengine->bw_reaper_timer),
+			qcrypto_bw_reaper_timer_callback, 0);
+	INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+	INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+	pengine->high_bw_req = false;
+	pengine->active_seq = 0;
+	pengine->last_active_seq = 0;
+	pengine->check_flag = false;
+	pengine->max_req_used = 0;
+	pengine->issue_req = false;
+
+	crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+	mutex_lock(&cp->engine_lock);
+	cp->total_units++;
+	pengine->unit = cp->total_units;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->first_engine = list_empty(&cp->engine_list);
+	if (pengine->first_engine)
+		cp->first_engine = pengine;
+	list_add_tail(&pengine->elist, &cp->engine_list);
+	cp->next_engine = pengine;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qce_hw_support(pengine->qce, &cp->ce_support);
+	pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+	pengine->max_req = cp->ce_support.max_request;
+	pqcrypto_req_control = kcalloc(pengine->max_req,
+				sizeof(struct qcrypto_req_control),
+				GFP_KERNEL);
+	if (pqcrypto_req_control == NULL) {
+		rc = -ENOMEM;
+		goto exit_unlock_mutex;
+	}
+	qcrypto_init_req_control(pengine, pqcrypto_req_control);
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = cp->ce_support.is_shared;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+		cp->platform_support.sha_hmac = 1;
+		pengine->ce_device = cp->ce_support.ce_device;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+
+	if (cp->total_units != 1)
+		goto exit_unlock_mutex;
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_sk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_sk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.base.cra_name,
+					strlen(q_alg->cipher_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_skcipher(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.base.cra_driver_name);
+			kfree_sensitive(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_sk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_xts_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.base.cra_name,
+					strlen(q_alg->cipher_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_skcipher(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.base.cra_driver_name);
+			kfree_sensitive(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.base.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_ahash_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+				q_alg->sha_alg.halg.base.cra_name,
+				strlen(q_alg->sha_alg.halg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->sha_alg.halg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kfree_sensitive(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+		|| cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	/* register crypto aead (hmac-sha256) algorithms the device supports */
+	if (cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha256_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_hmac_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->sha_alg.halg.base.cra_name,
+					strlen(
+					q_alg->sha_alg.halg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						 "The algorithm name %s is too long.\n",
+						 q_alg->sha_alg.halg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kfree_sensitive(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kfree_sensitive(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+
+		q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_rfc4309_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+	}
+	mutex_unlock(&cp->engine_lock);
+
+	return 0;
+err:
+	_qcrypto_remove_engine(pengine);
+	kfree_sensitive(pqcrypto_req_control);
+exit_unlock_mutex:
+	mutex_unlock(&cp->engine_lock);
+exit_qce_close:
+	if (pengine->qce)
+		qce_close(pengine->qce);
+exit_free_pdata:
+	icc_set_bw(pengine->icc_path, 0, 0);
+	platform_set_drvdata(pdev, NULL);
+exit_kzfree:
+	memset(pengine, 0, ksize((void *)pengine));
+	kfree(pengine);
+	return rc;
+}
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+					|| cp->req_queue.qlen)
+		return 1;
+	return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+	del_timer_sync(&pengine->bw_reaper_timer);
+	qcrypto_ce_set_bus(pengine, false);
+}
+
+static int  _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+
+	pengine = platform_get_drvdata(pdev);
+	if (!pengine)
+		return -EINVAL;
+
+	/*
+	 * Check if this platform supports clock management in suspend/resume
+	 * If not, just simply return 0.
+	 */
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	switch (pengine->bw_state) {
+	case BUS_NO_BANDWIDTH:
+		if (!pengine->high_bw_req)
+			pengine->bw_state = BUS_SUSPENDED;
+		else
+			ret = -EBUSY;
+		break;
+	case BUS_HAS_BANDWIDTH:
+		if (_qcrypto_engine_in_use(pengine)) {
+			ret = -EBUSY;
+		} else {
+			pengine->bw_state = BUS_SUSPENDING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+			_qcrypto_do_suspending(pengine);
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_SUSPENDED;
+		}
+		break;
+	case BUS_BANDWIDTH_RELEASING:
+	case BUS_BANDWIDTH_ALLOCATING:
+	case BUS_SUSPENDED:
+	case BUS_SUSPENDING:
+	default:
+			ret = -EBUSY;
+			break;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (ret)
+		return ret;
+	if (qce_pm_table.suspend) {
+		qcrypto_ce_set_bus(pengine, true);
+		qce_pm_table.suspend(pengine->qce);
+		qcrypto_ce_set_bus(pengine, false);
+	}
+	return 0;
+}
+
+static int  _qcrypto_resume(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+	int ret = 0;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return -EINVAL;
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->bw_state == BUS_SUSPENDED) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		if (qce_pm_table.resume) {
+			qcrypto_ce_set_bus(pengine, true);
+			qce_pm_table.resume(pengine->qce);
+			qcrypto_ce_set_bus(pengine, false);
+		}
+		spin_lock_irqsave(&cp->lock, flags);
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+		pengine->active_seq++;
+		pengine->check_flag = false;
+		if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+			if (!pengine->high_bw_req) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+		}
+	} else
+		ret = -EBUSY;
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return ret;
+}
+
+static const struct of_device_id qcrypto_match[] = {
+	{.compatible = "qcom,qcrypto",},
+	{}
+};
+
+static struct platform_driver __qcrypto = {
+	.probe      = _qcrypto_probe,
+	.remove     = _qcrypto_remove,
+	.suspend    = _qcrypto_suspend,
+	.resume     = _qcrypto_resume,
+	.driver     = {
+		.name           = "qcrypto",
+		.of_match_table = qcrypto_match,
+	},
+};
+
+static int _debug_qcrypto;
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		pe->total_req = 0;
+		pe->err_req = 0;
+		qce_clear_driver_stats(pe->qce);
+		pe->max_req_used = 0;
+	}
+	cp->max_qlen = 0;
+	cp->resp_start = 0;
+	cp->resp_stop = 0;
+	cp->no_avail = 0;
+	cp->max_resp_qlen = 0;
+	cp->queue_work_eng3 = 0;
+	cp->queue_work_not_eng3 = 0;
+	cp->queue_work_not_eng3_nz = 0;
+	cp->max_reorder_cnt = 0;
+	for (i = 0; i < MAX_SMP_CPU + 1; i++)
+		cp->cpu_req[i] = 0;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open   = simple_open,
+	.read   = _debug_stats_read,
+	.write  = _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_debug("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcrypto = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_debug("qcrypto debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	struct crypto_priv *pcp = &qcrypto_dev;
+
+	_qcrypto_debug_init();
+	INIT_LIST_HEAD(&pcp->alg_list);
+	INIT_LIST_HEAD(&pcp->engine_list);
+	init_llist_head(&pcp->ordered_resp_list);
+	spin_lock_init(&pcp->lock);
+	mutex_init(&pcp->engine_lock);
+	pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+	if (!pcp->resp_wq) {
+		pr_err("Error allocating workqueue\n");
+		return -ENOMEM;
+	}
+	INIT_WORK(&pcp->resp_work, seq_response);
+	pcp->total_units = 0;
+	pcp->next_engine = NULL;
+	pcp->scheduled_eng = NULL;
+	pcp->ce_req_proc_sts = IN_PROGRESS;
+	crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+	return platform_driver_register(&__qcrypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	pr_debug("%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&__qcrypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Crypto driver");

+ 61 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcrypto.h

@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <crypto/aead.h>
+
+#define QCRYPTO_CTX_KEY_MASK		0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY		0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY	0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK		0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B	0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB	0x00000200
+
+
+int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
+
+int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
+
+int qcrypto_cipher_clear_flag(struct skcipher_request *req,
+							unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
+
+struct crypto_engine_entry {
+	u32 hw_instance;
+	u32 ce_device;
+	int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
+				unsigned int fde_pfe,
+				unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+	int (*cipher_set)(struct skcipher_request *req,
+			unsigned int fde_pfe,
+			unsigned int hw_inst);
+	int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
+	int (*get_num_engines)(void);
+	void (*get_engine_list)(size_t num_engines,
+				struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */

+ 529 - 0
qcom/opensource/securemsm-kernel/crypto-qti/qcryptohw_50.h

@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG		0x0007C
+#define CRYPTO_BAM_CD_ENABLE			27
+#define CRYPTO_BAM_CD_ENABLE_MASK		(1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A104
+#define CRYPTO_STATUS3_REG			0x1A11C
+#define CRYPTO_STATUS4_REG			0x1A124
+#define CRYPTO_STATUS5_REG			0x1A128
+#define CRYPTO_STATUS6_REG			0x1A13C
+
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+#define CRYPTO_DATA_PATT_PROC_CFG_REG		0x1A500
+#define CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG	0x1A504
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG		0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG		0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG		0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG		0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG		0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG		0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG		0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG		0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0			0x1A23C
+#define CRYPTO_CNTR_MASK_REG1			0x1A238
+#define CRYPTO_CNTR_MASK_REG2			0x1A234
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_PWR_CTRL				0x1A408
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR				20
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_LITTLE_ENDIAN_MASK		(1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+#define CRYPTO_AUTH_ALG_ZUC	5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		4
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+#define CRYPTO_ENCR_ALG_ZUC		6
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+/*  F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				1
+#define CRYPTO_ENCR_SNOW3G_SEL			2
+#define CRYPTO_ENCR_KASUMI_SEL			3
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_AUTH_AES_SEL			6
+#define CRYPTO_AUTH_SNOW3G_SEL			7
+#define CRYPTO_AUTH_KASUMI_SEL			8
+#define CRYPTO_BAM_PIPE_SETS			9	/* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS			13	/* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS			19	/* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL			26
+#define CRYPTO_AUTH_ZUC_SEL			27
+#define CRYPTO_ZUC_ENABLE			28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */

+ 123 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp1.h

@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#pragma once
+// AUTOGENERATED FILE: DO NOT EDIT
+
+#include <linux/types.h>
+#include "smcinvoke_object.h"
+
+#define HDCP1_PROVISION 0
+#define HDCP1_VERIFY 1
+#define HDCP1_SET_ENCRYPTION 2
+#define HDCP1_SET_ENCRYPTION_V2 3
+#define HDCP1_SET_KEY 4
+#define HDCP1_SET_KEY_V2 5
+#define HDCP1_SET_MODE 6
+
+static inline int32_t hdcp1_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t hdcp1_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t hdcp1_provision(struct Object self, uint32_t keyFormat_val,
+		   const void *key_ptr, size_t key_len,
+		   const void *dps_ptr, size_t dps_len)
+{
+	union ObjectArg a[3] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&keyFormat_val, sizeof(uint32_t)};
+	a[1].bi = (struct ObjectBufIn) {key_ptr, key_len * 1};
+	a[2].bi = (struct ObjectBufIn) {dps_ptr, dps_len * 1};
+
+	return Object_invoke(self, HDCP1_PROVISION, a,
+		 ObjectCounts_pack(3, 0, 0, 0));
+}
+
+static inline int32_t hdcp1_verify(struct Object self, uint32_t deviceType_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&deviceType_val, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP1_VERIFY, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp1_set_encryption(struct Object self, uint32_t enable_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&enable_val, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP1_SET_ENCRYPTION, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp1_set_encryption_v2(struct Object self, uint32_t enable_val,
+		   uint32_t deviceType_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+	struct {
+		uint32_t m_enable;
+		uint32_t m_deviceType;
+	} i;
+
+	a[0].b = (struct ObjectBuf) {&i, 8};
+	i.m_enable = enable_val;
+	i.m_deviceType = deviceType_val;
+
+	return Object_invoke(self, HDCP1_SET_ENCRYPTION_V2, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp1_set_key(struct Object self, void *ksv_ptr, size_t ksv_len,
+		 size_t *ksv_lenout)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+	int32_t result = 0;
+
+	a[0].b = (struct ObjectBuf) {ksv_ptr, ksv_len * 1};
+
+	result = Object_invoke(self, HDCP1_SET_KEY, a,
+		   ObjectCounts_pack(0, 1, 0, 0));
+
+	*ksv_lenout = a[0].b.size / 1;
+
+	return result;
+}
+
+static inline int32_t hdcp1_set_key_v2(struct Object self, void *ksv_ptr,
+		size_t ksv_len, size_t *ksv_lenout,
+		uint32_t deviceType_val)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+	int32_t result = 0;
+
+	a[1].b = (struct ObjectBuf) {ksv_ptr, ksv_len * 1};
+	a[0].b = (struct ObjectBuf) {&deviceType_val, sizeof(uint32_t)};
+
+	result = Object_invoke(self, HDCP1_SET_KEY_V2, a,
+		   ObjectCounts_pack(1, 1, 0, 0));
+
+	*ksv_lenout = a[1].b.size / 1;
+
+	return result;
+}
+
+static inline int32_t hdcp1_set_mode(struct Object self, int32_t mode_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&mode_val, sizeof(int32_t)};
+
+	return Object_invoke(self, HDCP1_SET_MODE, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}

+ 27 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp1_ops.h

@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#pragma once
+// AUTOGENERATED FILE: DO NOT EDIT
+
+#include <linux/types.h>
+#include "smcinvoke_object.h"
+
+#define IHDCP1OPS_NOTIFY_TOPOLOGY_CHANGE 0
+
+static inline int32_t hdcp1_ops_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t hdcp1_ops_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t hdcp1_ops_notify_topology_change(struct Object self)
+{
+	return Object_invoke(self, IHDCP1OPS_NOTIFY_TOPOLOGY_CHANGE, 0, 0);
+}

+ 304 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp2p2.h

@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#pragma once
+// AUTOGENERATED FILE: DO NOT EDIT
+
+#include <linux/types.h>
+#include "smcinvoke_object.h"
+
+#define HDCP2P2_PROVISION_KEY 0
+#define HDCP2P2_VERIFY_KEY 1
+#define HDCP2P2_TX_INIT 2
+#define HDCP2P2_TX_DEINIT 3
+#define HDCP2P2_RCVD_MSG 4
+#define HDCP2P2_SEND_TIMEOUT 5
+#define HDCP2P2_SET_HW_KEY 6
+#define HDCP2P2_QUERY_STREAM_TYPE 7
+#define HDCP2P2_INIT 8
+#define HDCP2P2_DEINIT 9
+#define HDCP2P2_VERSION 10
+#define HDCP2P2_SESSION_INIT 11
+#define HDCP2P2_SESSION_DEINIT 12
+#define HDCP2P2_START_AUTH 13
+#define HDCP2P2_SESSION_OPEN_STREAM 14
+#define HDCP2P2_SESSION_CLOSE_STREAM 15
+#define HDCP2P2_FORCE_ENCRYPTION 16
+#define HDCP2P2_DELETE_PAIRING_INFO 17
+
+static inline int32_t hdcp2p2_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t hdcp2p2_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t hdcp2p2_provision_key(struct Object self, const void *key_ptr,
+		 size_t key_len,
+		 const void *dps_ptr,
+		 size_t dps_len)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+
+	a[0].bi = (struct ObjectBufIn) {key_ptr, key_len * 1};
+	a[1].bi = (struct ObjectBufIn) {dps_ptr, dps_len * 1};
+
+	return Object_invoke(self, HDCP2P2_PROVISION_KEY, a,
+		 ObjectCounts_pack(2, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_verify_key(struct Object self)
+{
+	return Object_invoke(self, HDCP2P2_VERIFY_KEY, 0, 0);
+}
+
+static inline int32_t hdcp2p2_tx_init(struct Object self, uint32_t sessionid_val,
+		   uint32_t *ctxhandle_ptr)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&sessionid_val, sizeof(uint32_t)};
+	a[1].b = (struct ObjectBuf) {ctxhandle_ptr, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_TX_INIT, a,
+						 ObjectCounts_pack(1, 1, 0, 0));
+}
+
+static inline int32_t hdcp2p2_tx_deinit(struct Object self, uint32_t ctxhandle_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_TX_DEINIT, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_rcvd_msg(
+	struct Object self, const void *reqMsg_ptr, size_t reqMsg_len,
+	uint32_t ctxhandle_val, void *resMsg_ptr, size_t resMsg_len,
+	size_t *resMsg_lenout, uint32_t *timeout_ptr, uint32_t *flag_ptr, uint32_t *state_ptr)
+{
+	union ObjectArg a[4] = {{{0, 0}}};
+	int32_t result = 0;
+	struct {
+		uint32_t m_timeout;
+		uint32_t m_flag;
+		uint32_t m_state;
+	} o = {0, 0, 0};
+	a[2].b = (struct ObjectBuf) {&o, 12};
+	a[0].bi = (struct ObjectBufIn) {reqMsg_ptr, reqMsg_len * 1};
+	a[1].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+	a[3].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
+
+	result = Object_invoke(self, HDCP2P2_RCVD_MSG, a,
+			ObjectCounts_pack(2, 2, 0, 0));
+
+	*resMsg_lenout = a[3].b.size / 1;
+	*timeout_ptr = o.m_timeout;
+	*flag_ptr = o.m_flag;
+	*state_ptr = o.m_state;
+
+	return result;
+}
+
+static inline int32_t hdcp2p2_send_timeout(struct Object self, uint32_t ctxhandle_val,
+		void *resMsg_ptr, size_t resMsg_len,
+		size_t *resMsg_lenout,
+		uint32_t *timeout_ptr)
+{
+	union ObjectArg a[3] = {{{0, 0}}};
+	int32_t result = 0;
+
+	a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+	a[1].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
+	a[2].b = (struct ObjectBuf) {timeout_ptr, sizeof(uint32_t)};
+
+	result = Object_invoke(self, HDCP2P2_SEND_TIMEOUT, a,
+		   ObjectCounts_pack(1, 2, 0, 0));
+
+	*resMsg_lenout = a[1].b.size / 1;
+
+	return result;
+}
+
+static inline int32_t hdcp2p2_set_hw_key(struct Object self, uint32_t ctxhandle_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_SET_HW_KEY, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_query_stream_type(
+	struct Object self, uint32_t ctxhandle_val, void *resMsg_ptr, size_t resMsg_len,
+	size_t *resMsg_lenout, uint32_t *timeout_ptr)
+{
+	union ObjectArg a[3] = {{{0, 0}}};
+	int32_t result = 0;
+
+	a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+	a[1].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
+	a[2].b = (struct ObjectBuf) {timeout_ptr, sizeof(uint32_t)};
+
+	result = Object_invoke(self, HDCP2P2_QUERY_STREAM_TYPE, a,
+		   ObjectCounts_pack(1, 2, 0, 0));
+
+	*resMsg_lenout = a[1].b.size / 1;
+
+	return result;
+}
+
+static inline int32_t hdcp2p2_init(struct Object self, uint32_t clientVersion_val,
+		uint32_t *appversion_ptr)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&clientVersion_val, sizeof(uint32_t)};
+	a[1].b = (struct ObjectBuf) {appversion_ptr, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_INIT, a,
+						 ObjectCounts_pack(1, 1, 0, 0));
+}
+
+static inline int32_t hdcp2p2_deinit(struct Object self)
+{
+	return Object_invoke(self, HDCP2P2_DEINIT, 0, 0);
+}
+
+static inline int32_t hdcp2p2_version(struct Object self, uint32_t *appversion_ptr)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {appversion_ptr, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_VERSION, a,
+		 ObjectCounts_pack(0, 1, 0, 0));
+}
+
+static inline int32_t hdcp2p2_session_init(struct Object self, uint32_t deviceId_val,
+		uint32_t *sessionId_ptr)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&deviceId_val, sizeof(uint32_t)};
+	a[1].b = (struct ObjectBuf) {sessionId_ptr, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_SESSION_INIT, a,
+		 ObjectCounts_pack(1, 1, 0, 0));
+}
+
+static inline int32_t hdcp2p2_session_deinit(struct Object self,
+		  uint32_t sessionId_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) {&sessionId_val, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_SESSION_DEINIT, a,
+		ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_start_auth(struct Object self, uint32_t ctxhandle_val,
+		  void *resMsg_ptr, size_t resMsg_len,
+		  size_t *resMsg_lenout,
+		  uint32_t *timeout_ptr,
+		  uint32_t *flag_ptr,
+		  uint32_t *tzctxhandle_ptr)
+{
+	union ObjectArg a[3] = {{{0, 0}}};
+	int32_t result = 0;
+	struct {
+		uint32_t m_timeout;
+		uint32_t m_flag;
+		uint32_t m_tzctxhandle;
+	} o = {0, 0, 0};
+
+	a[1].b = (struct ObjectBuf) {&o, 12};
+	a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
+	a[2].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
+
+	result = Object_invoke(self, HDCP2P2_START_AUTH, a,
+		   ObjectCounts_pack(1, 2, 0, 0));
+
+	*resMsg_lenout = a[2].b.size / 1;
+	*timeout_ptr = o.m_timeout;
+	*flag_ptr = o.m_flag;
+	*tzctxhandle_ptr = o.m_tzctxhandle;
+
+	return result;
+}
+
+static inline int32_t hdcp2p2_session_open_stream(struct Object self,
+		   uint32_t sessionid_val,
+		   uint32_t vcpayloadid_val,
+		   uint32_t stream_number_val,
+		   uint32_t streamMediaType_val,
+		   uint32_t *resStreamId_ptr)
+{
+	union ObjectArg a[2] = {{{0, 0}}};
+	struct {
+		uint32_t m_sessionid;
+		uint32_t m_vcpayloadid;
+		uint32_t m_stream_number;
+		uint32_t m_streamMediaType;
+	} i = {0, 0, 0, 0};
+
+	a[0].b = (struct ObjectBuf) {&i, 16};
+	i.m_sessionid = sessionid_val;
+	i.m_vcpayloadid = vcpayloadid_val;
+	i.m_stream_number = stream_number_val;
+	i.m_streamMediaType = streamMediaType_val;
+	a[1].b = (struct ObjectBuf) {resStreamId_ptr, sizeof(uint32_t)};
+
+	return Object_invoke(self, HDCP2P2_SESSION_OPEN_STREAM, a,
+		 ObjectCounts_pack(1, 1, 0, 0));
+}
+
+static inline int32_t hdcp2p2_session_close_stream(struct Object self,
+		uint32_t sessionid_val,
+		uint32_t streamId_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+	struct {
+		uint32_t m_sessionid;
+		uint32_t m_streamId;
+	} i = {0, 0};
+
+	a[0].b = (struct ObjectBuf) {&i, 8};
+	i.m_sessionid = sessionid_val;
+	i.m_streamId = streamId_val;
+
+	return Object_invoke(self, HDCP2P2_SESSION_CLOSE_STREAM, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_force_encryption(struct Object self,
+		uint32_t ctxhandle_val,
+		uint32_t enable_val)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+	struct {
+		uint32_t m_ctxhandle;
+		uint32_t m_enable;
+	} i = {0, 0};
+
+	a[0].b = (struct ObjectBuf) {&i, 8};
+	i.m_ctxhandle = ctxhandle_val;
+	i.m_enable = enable_val;
+
+	return Object_invoke(self, HDCP2P2_FORCE_ENCRYPTION, a,
+		 ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t hdcp2p2_delete_pairing_info(struct Object self)
+{
+	return Object_invoke(self, HDCP2P2_DELETE_PAIRING_INFO, 0, 0);
+}

+ 338 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_main.c

@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hdcp_main.h"
+#include "hdcp_qseecom.h"
+#include "hdcp_smcinvoke.h"
+
+struct hdcp_ta_interface ta_interface;
+static DEFINE_MUTEX(hdcp1_mutex_g);
+static DEFINE_MUTEX(hdcp2_mutex_g);
+
+void select_interface(bool use_smcinvoke)
+{
+	if (use_smcinvoke) {
+		ta_interface.trusted_app_hdcp1_init = &hdcp1_init_smcinvoke;
+		ta_interface.trusted_app_hdcp1_feature_supported = &hdcp1_feature_supported_smcinvoke;
+		ta_interface.trusted_app_hdcp1_set_enc = &hdcp1_set_enc_smcinvoke;
+		ta_interface.trusted_app_hdcp1_ops_notify = &hdcp1_ops_notify_smcinvoke;
+		ta_interface.trusted_app_hdcp1_start = &hdcp1_start_smcinvoke;
+		ta_interface.trusted_app_hdcp1_stop = &hdcp1_stop_smcinvoke;
+		ta_interface.trusted_app_hdcp2_init = &hdcp2_init_smcinvoke;
+		ta_interface.trusted_app_hdcp2_deinit = &hdcp2_deinit_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_start = &hdcp2_app_start_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_start_auth = &hdcp2_app_start_auth_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_process_msg = &hdcp2_app_process_msg_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_enable_encryption = &hdcp2_app_enable_encryption_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_timeout = &hdcp2_app_timeout_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_query_stream = &hdcp2_app_query_stream_smcinvoke;
+		ta_interface.trusted_app_hdcp2_app_stop = &hdcp2_app_stop_smcinvoke;
+		ta_interface.trusted_app_hdcp2_feature_supported = &hdcp2_feature_supported_smcinvoke;
+		ta_interface.trusted_app_hdcp2_force_encryption = &hdcp2_force_encryption_smcinvoke;
+		ta_interface.trusted_app_hdcp2_open_stream = &hdcp2_open_stream_smcinvoke;
+		ta_interface.trusted_app_hdcp2_close_stream = &hdcp2_close_stream_smcinvoke;
+		ta_interface.trusted_app_hdcp2_update_app_data = &hdcp2_update_app_data_smcinvoke;
+	} else {
+		ta_interface.trusted_app_hdcp1_init = &hdcp1_init_qseecom;
+		ta_interface.trusted_app_hdcp1_feature_supported = &hdcp1_feature_supported_qseecom;
+		ta_interface.trusted_app_hdcp1_set_enc = &hdcp1_set_enc_qseecom;
+		ta_interface.trusted_app_hdcp1_ops_notify = &hdcp1_ops_notify_qseecom;
+		ta_interface.trusted_app_hdcp1_start = &hdcp1_start_qseecom;
+		ta_interface.trusted_app_hdcp1_stop = &hdcp1_stop_qseecom;
+		ta_interface.trusted_app_hdcp2_init = &hdcp2_init_qseecom;
+		ta_interface.trusted_app_hdcp2_deinit = &hdcp2_deinit_qseecom;
+		ta_interface.trusted_app_hdcp2_app_start = &hdcp2_app_start_qseecom;
+		ta_interface.trusted_app_hdcp2_app_start_auth = &hdcp2_app_start_auth_qseecom;
+		ta_interface.trusted_app_hdcp2_app_process_msg = &hdcp2_app_process_msg_qseecom;
+		ta_interface.trusted_app_hdcp2_app_timeout = &hdcp2_app_timeout_qseecom;
+		ta_interface.trusted_app_hdcp2_app_enable_encryption = &hdcp2_app_enable_encryption_qseecom;
+		ta_interface.trusted_app_hdcp2_app_query_stream = &hdcp2_app_query_stream_qseecom;
+		ta_interface.trusted_app_hdcp2_app_stop = &hdcp2_app_stop_qseecom;
+		ta_interface.trusted_app_hdcp2_feature_supported = &hdcp2_feature_supported_qseecom;
+		ta_interface.trusted_app_hdcp2_force_encryption = &hdcp2_force_encryption_qseecom;
+		ta_interface.trusted_app_hdcp2_open_stream = &hdcp2_open_stream_qseecom;
+		ta_interface.trusted_app_hdcp2_close_stream = &hdcp2_close_stream_qseecom;
+		ta_interface.trusted_app_hdcp2_update_app_data = &hdcp2_update_app_data_qseecom;
+	}
+}
+
+int hdcp1_count_ones(u8 *array, u8 len)
+{
+	int i, j, count = 0;
+
+	for (i = 0; i < len; i++)
+		for (j = 0; j < 8; j++)
+			count += (((array[i] >> j) & 0x1) ? 1 : 0);
+
+	return count;
+}
+
+
+
+int hdcp1_validate_aksv(u32 aksv_msb, u32 aksv_lsb)
+{
+	int const number_of_ones = 20;
+	u8 aksv[5] = {0};
+
+	pr_debug("AKSV=%02x%08x\n", aksv_msb, aksv_lsb);
+
+	aksv[0] = aksv_lsb & 0xFF;
+	aksv[1] = (aksv_lsb >> 8) & 0xFF;
+	aksv[2] = (aksv_lsb >> 16) & 0xFF;
+	aksv[3] = (aksv_lsb >> 24) & 0xFF;
+	aksv[4] = aksv_msb & 0xFF;
+
+	/* check there are 20 ones in AKSV */
+	if (hdcp1_count_ones(aksv, 5) != number_of_ones) {
+		pr_err("AKSV bit count failed\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+
+bool hdcp2_feature_supported(void *data)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp2_mutex_g);
+	ret = ta_interface.trusted_app_hdcp2_feature_supported(data);
+	mutex_unlock(&hdcp2_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp2_feature_supported);
+
+int hdcp2_force_encryption(void *ctx, uint32_t enable)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp2_mutex_g);
+	ret = ta_interface.trusted_app_hdcp2_force_encryption(ctx, enable);
+	mutex_unlock(&hdcp2_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp2_force_encryption);
+
+int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd,
+				   struct hdcp2_app_data *app_data)
+{
+	int ret = 0;
+	uint32_t req_len = 0;
+
+	if (!ctx || !app_data) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	req_len = app_data->request.length;
+
+	mutex_lock(&hdcp2_mutex_g);
+	switch (cmd) {
+	case HDCP2_CMD_START:
+		ret = ta_interface.trusted_app_hdcp2_app_start(ctx, req_len);
+		break;
+	case HDCP2_CMD_START_AUTH:
+		ret = ta_interface.trusted_app_hdcp2_app_start_auth(ctx, req_len);
+		break;
+	case HDCP2_CMD_PROCESS_MSG:
+		ret = ta_interface.trusted_app_hdcp2_app_process_msg(ctx, req_len);
+		break;
+	case HDCP2_CMD_TIMEOUT:
+		ret = ta_interface.trusted_app_hdcp2_app_timeout(ctx, req_len);
+		break;
+	case HDCP2_CMD_EN_ENCRYPTION:
+		ret = ta_interface.trusted_app_hdcp2_app_enable_encryption(ctx, req_len);
+		break;
+	case HDCP2_CMD_QUERY_STREAM:
+		ret = ta_interface.trusted_app_hdcp2_app_query_stream(ctx, req_len);
+		break;
+	case HDCP2_CMD_STOP:
+		ret = ta_interface.trusted_app_hdcp2_app_stop(ctx);
+		break;
+	default:
+		goto error;
+	}
+
+	if (ret)
+		goto error;
+
+	ret = ta_interface.trusted_app_hdcp2_update_app_data(ctx, app_data);
+
+error:
+	mutex_unlock(&hdcp2_mutex_g);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp2_app_comm);
+
+int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, uint8_t stream_number,
+		  uint32_t *stream_id)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp2_mutex_g);
+	ret = ta_interface.trusted_app_hdcp2_open_stream(ctx, vc_payload_id, stream_number,
+		   stream_id);
+	mutex_unlock(&hdcp2_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp2_open_stream);
+
+int hdcp2_close_stream(void *ctx, uint32_t stream_id)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp2_mutex_g);
+	ret = ta_interface.trusted_app_hdcp2_close_stream(ctx, stream_id);
+	mutex_unlock(&hdcp2_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp2_close_stream);
+
+void *hdcp2_init(u32 device_type)
+{
+	void *data = NULL;
+
+	mutex_lock(&hdcp2_mutex_g);
+	data = ta_interface.trusted_app_hdcp2_init(device_type);
+	mutex_unlock(&hdcp2_mutex_g);
+
+	return data;
+}
+EXPORT_SYMBOL_GPL(hdcp2_init);
+
+void hdcp2_set_hdcp_key_verify_retries(void *ctx, u32 max_hdcp_key_verify_retries)
+{
+	struct hdcp2_qsee_handle *handle = ctx;
+
+	handle->max_hdcp_key_verify_retries = max_hdcp_key_verify_retries;
+
+	pr_debug("hdcp2 max_hdcp_key_verify_retries %d\n", handle->max_hdcp_key_verify_retries);
+}
+EXPORT_SYMBOL_GPL(hdcp2_set_hdcp_key_verify_retries);
+
+void hdcp2_deinit(void *ctx)
+{
+	ta_interface.trusted_app_hdcp2_deinit(ctx);
+}
+EXPORT_SYMBOL_GPL(hdcp2_deinit);
+
+void *hdcp1_init(void)
+{
+	void *data = NULL;
+
+	mutex_lock(&hdcp1_mutex_g);
+	data = ta_interface.trusted_app_hdcp1_init();
+	mutex_unlock(&hdcp1_mutex_g);
+
+	return data;
+}
+EXPORT_SYMBOL_GPL(hdcp1_init);
+
+void hdcp1_set_hdcp_key_verify_retries(void *ctx, u32 max_hdcp_key_verify_retries)
+{
+	struct hdcp1_qsee_handle *handle = ctx;
+
+	handle->max_hdcp_key_verify_retries = max_hdcp_key_verify_retries;
+
+	pr_debug("hdcp1 max_hdcp_key_verify_retries %d\n", handle->max_hdcp_key_verify_retries);
+}
+EXPORT_SYMBOL_GPL(hdcp1_set_hdcp_key_verify_retries);
+
+void hdcp1_deinit(void *data)
+{
+	kfree(data);
+}
+EXPORT_SYMBOL_GPL(hdcp1_deinit);
+
+bool hdcp1_feature_supported(void *data)
+{
+	bool supported = false;
+
+	mutex_lock(&hdcp1_mutex_g);
+	supported = ta_interface.trusted_app_hdcp1_feature_supported(data);
+	mutex_unlock(&hdcp1_mutex_g);
+
+	return supported;
+}
+EXPORT_SYMBOL_GPL(hdcp1_feature_supported);
+
+int hdcp1_set_enc(void *data, bool enable)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp1_mutex_g);
+	ret = ta_interface.trusted_app_hdcp1_set_enc(data, enable);
+	mutex_unlock(&hdcp1_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp1_set_enc);
+
+int hdcp1_ops_notify(void *data, void *topo, bool is_authenticated)
+{
+	int ret = 0;
+
+	ret = ta_interface.trusted_app_hdcp1_ops_notify(data, topo, is_authenticated);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp1_ops_notify);
+
+int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb)
+{
+	int ret = 0;
+
+	mutex_lock(&hdcp1_mutex_g);
+	ret = ta_interface.trusted_app_hdcp1_start(data, aksv_msb, aksv_lsb);
+	mutex_unlock(&hdcp1_mutex_g);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(hdcp1_start);
+
+void hdcp1_stop(void *data)
+{
+	mutex_lock(&hdcp1_mutex_g);
+	ta_interface.trusted_app_hdcp1_stop(data);
+	mutex_unlock(&hdcp1_mutex_g);
+}
+EXPORT_SYMBOL_GPL(hdcp1_stop);
+
+static int __init hdcp_module_init(void)
+{
+	struct device_node *np = NULL;
+	bool use_smcinvoke = false;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,hdcp");
+	if (!np) {
+		/*select qseecom interface as default if hdcp node
+		*is not present in dtsi
+		 */
+		 select_interface(use_smcinvoke);
+		return 0;
+		}
+
+	use_smcinvoke = of_property_read_bool(np, "qcom,use-smcinvoke");
+
+	select_interface(use_smcinvoke);
+
+	return 0;
+}
+
+static void __exit hdcp_module_exit(void)
+{
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HDCP driver");
+
+module_init(hdcp_module_init);
+module_exit(hdcp_module_exit);

+ 113 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_main.h

@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HDCP_MAIN_H__
+#define __HDCP_MAIN_H__
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <misc/qseecom_kernel.h>
+
+#define HDCP2P2_APP_NAME "hdcp2p2"
+#define HDCP1_APP_NAME "hdcp1"
+#define HDCP1OPS_APP_NAME "ops"
+#define HDCPSRM_APP_NAME "hdcpsrm"
+#define QSEECOM_SBUFF_SIZE 0x1000
+
+#define MAX_REC_ID_LIST_SIZE 160
+#define MAX_TX_MESSAGE_SIZE 129
+#define MAX_RX_MESSAGE_SIZE 534
+#define MAX_TOPOLOGY_ELEMS 32
+#define HDCP1_NOTIFY_TOPOLOGY 1
+#define HDCP1_AKSV_SIZE 8
+
+#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \
+	((((maj)&0xFF) << 16) | (((min)&0xFF) << 8) | ((patch)&0xFF))
+
+#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
+#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
+#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v)&0xFF)
+
+#define HDCP_CLIENT_MAJOR_VERSION 2
+#define HDCP_CLIENT_MINOR_VERSION 1
+#define HDCP_CLIENT_PATCH_VERSION 0
+
+#define HDCP_SUCCESS 0
+
+/* Wait 200ms after authentication */
+#define SLEEP_FORCE_ENCRYPTION_MS 200
+
+/* Error code when Qseecomd is not up at boot time */
+#define QSEECOMD_ERROR -4103
+
+/* Wait for 100ms on every retry to check if Qseecomd is up */
+#define SLEEP_QSEECOMD_WAIT_MS 100
+
+#define SLEEP_SET_HW_KEY_MS 300
+
+/* flags set by tz in response message */
+#define HDCP_TXMTR_SUBSTATE_INIT 0
+#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST 1
+#define HDCP_TXMTR_SUBSTATE_PROCESSED_RECIEVERID_LIST 2
+#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_STREAM_READY_MESSAGE 3
+#define HDCP_TXMTR_SUBSTATE_REPEATER_AUTH_COMPLETE 4
+
+enum hdcp_state {
+	HDCP_STATE_INIT = 0x00,
+	HDCP_STATE_APP_LOADED = 0x01,
+	HDCP_STATE_SESSION_INIT = 0x02,
+	HDCP_STATE_TXMTR_INIT = 0x04,
+	HDCP_STATE_AUTHENTICATED = 0x08,
+	HDCP_STATE_ERROR = 0x10
+};
+
+struct hdcp_ta_interface
+{
+	void *(*trusted_app_hdcp1_init)(void);
+	bool (*trusted_app_hdcp1_feature_supported)(void *data);
+	int (*trusted_app_hdcp1_set_enc)(void *data,bool enable);
+	int (*trusted_app_hdcp1_ops_notify)(void *data, void *topo,
+		bool is_authenticated);
+	int (*trusted_app_hdcp1_start)(void *data, u32 *aksv_msb,
+		  u32 *aksv_lsb);
+	void (*trusted_app_hdcp1_stop)(void *data);
+	void *(*trusted_app_hdcp2_init)(u32 device_type);
+	void (*trusted_app_hdcp2_deinit)(void *ctx);
+	int (*trusted_app_hdcp2_app_start)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_start_auth)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_process_msg)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_timeout)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_enable_encryption)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_query_stream)(void *ctx, uint32_t req_len);
+	int (*trusted_app_hdcp2_app_stop)(void *ctx);
+	bool (*trusted_app_hdcp2_feature_supported)(void *ctx);
+	int (*trusted_app_hdcp2_force_encryption)(void *ctx, uint32_t enable);
+	int (*trusted_app_hdcp2_open_stream)(void *ctx, uint8_t vc_payload_id,
+		 uint8_t stream_number, uint32_t *stream_id);
+	int (*trusted_app_hdcp2_close_stream)(void *ctx, uint32_t stream_id);
+	int (*trusted_app_hdcp2_update_app_data)(void *ctx,
+		 struct hdcp2_app_data *app_data);
+};
+
+int hdcp1_validate_aksv(u32 aksv_msb, u32 aksv_lsb);
+
+#endif /* __HDCP_MAIN_H__ */

+ 1456 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_qseecom.c

@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2022 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/errno.h>
+#include <linux/hdcp_qseecom.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <misc/qseecom_kernel.h>
+
+#include "hdcp_qseecom.h"
+#include "hdcp_main.h"
+
+#define HDCP_CMD_STATUS_TO_STR(x) #x
+
+#define hdcp2_app_init_var(x) \
+	struct hdcp_##x##_req *req_buf = NULL; \
+	struct hdcp_##x##_rsp *rsp_buf = NULL; \
+	if (!handle || !handle->qseecom_handle) { \
+		pr_err("invalid qseecom_handle while processing %s\n", #x); \
+		rc = -EINVAL; \
+		goto error; \
+	} \
+	req_buf = (struct hdcp_##x##_req *)handle->qseecom_handle->sbuf; \
+	rsp_buf = (struct hdcp_##x##_rsp *)(handle->qseecom_handle->sbuf + \
+			   QSEECOM_ALIGN(sizeof(struct hdcp_##x##_req))); \
+	req_buf->commandid = hdcp_cmd_##x
+
+#define hdcp2_app_process_cmd(x) \
+	({ \
+		int rc = qseecom_send_command( \
+			handle->qseecom_handle, req_buf, \
+			QSEECOM_ALIGN(sizeof(struct hdcp_##x##_req)), rsp_buf, \
+			QSEECOM_ALIGN(sizeof(struct hdcp_##x##_rsp))); \
+		if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS)) { \
+			pr_err("qseecom cmd %s failed with err = %d, status = %d:%s\n", \
+				   #x, rc, rsp_buf->status, \
+				   hdcp_cmd_status_to_str(rsp_buf->status)); \
+			rc = -EINVAL; \
+		} \
+		rc; \
+	})
+
+const char *hdcp_errors[] =	{"HDCP_SUCCESS",
+				"HDCP_FAIL",
+				"HDCP_BAD_PARAM",
+				"HDCP_DEVICE_TYPE_UNSUPPORTED",
+				"HDCP_INVALID_COMMAND",
+				"HDCP_INVALID_COMMAND_HANDLE",
+				"HDCP_ERROR_SIZE_IN",
+				"HDCP_ERROR_SIZE_OUT",
+				"HDCP_DATA_SIZE_INSUFFICIENT",
+				"HDCP_UNSUPPORTED_RX_VERSION",
+				"HDCP_WRONG_RX_CAPAB_MASK",
+				"HDCP_WRONG_RX_RSVD",
+				"HDCP_WRONG_RX_HDCP_CAPABLE",
+				"HDCP_RSA_SIGNATURE_VERIFY_FAILED",
+				"HDCP_VERIFY_H_PRIME_FAILED",
+				"HDCP_LC_FAILED",
+				"HDCP_MESSAGE_TIMEOUT",
+				"HDCP_COUNTER_ROLL_OVER",
+				"HDCP_WRONG_RXINFO_RSVD",
+				"HDCP_RXINFO_MAX_DEVS",
+				"HDCP_RXINFO_MAX_CASCADE",
+				"HDCP_WRONG_INITIAL_SEQ_NUM_V",
+				"HDCP_SEQ_NUM_V_ROLL_OVER",
+				"HDCP_WRONG_SEQ_NUM_V",
+				"HDCP_VERIFY_V_FAILED",
+				"HDCP_RPT_METHOD_INVOKED",
+				"HDCP_RPT_STRM_LEN_WRONG",
+				"HDCP_VERIFY_STRM_M_FAILED",
+				"HDCP_TRANSMITTER_NOT_FOUND",
+				"HDCP_SESSION_NOT_FOUND",
+				"HDCP_MAX_SESSION_EXCEEDED",
+				"HDCP_MAX_CONNECTION_EXCEEDED",
+				"HDCP_MAX_STREAMS_EXCEEDED",
+				"HDCP_MAX_DEVICES",
+				"HDCP_ALLOC_FAILED",
+				"HDCP_CONNECTION_NOT_FOUND",
+				"HDCP_HASH_FAILED",
+				"HDCP_BN_FAILED",
+				"HDCP_ENCRYPT_KM_FAILED",
+				"HDCP_DECRYPT_KM_FAILED",
+				"HDCP_HMAC_FAILED",
+				"HDCP_GET_RANDOM_FAILED",
+				"HDCP_INVALID_KEY_HEADER",
+				"HDCP_INVALID_KEY_LC_HASH",
+				"HDCP_INVALID_KEY_HASH",
+				"HDCP_KEY_WRITE_FAILED",
+				"HDCP_KEY_READ_FAILED",
+				"HDCP_KEY_DECRYPT_FAILED",
+				"HDCP_TEST_KEY_ON_SECURE_DEVICE",
+				"HDCP_KEY_VERSION_UNSUPPORTED",
+				"HDCP_RXID_NOT_FOUND",
+				"HDCP_STORAGE_INIT_FAILED",
+				"HDCP_STORAGE_FILE_OPEN_FAILED",
+				"HDCP_STORAGE_FILE_READ_FAILED",
+				"HDCP_STORAGE_FILE_WRITE_FAILED",
+				"HDCP_STORAGE_ID_UNSUPPORTED",
+				"HDCP_MUTUAL_EXCLUSIVE_DEVICE_PRESENT",
+				"HDCP_INVALID_STATE",
+				"HDCP_CONFIG_READ_FAILED",
+				"HDCP_OPEN_TZ_SERVICE_FAILED",
+				"HDCP_HW_CLOCK_OFF",
+				"HDCP_SET_HW_KEY_FAILED",
+				"HDCP_CLEAR_HW_KEY_FAILED",
+				"HDCP_GET_CONTENT_LEVEL_FAILED",
+				"HDCP_STREAMID_INUSE",
+				"HDCP_STREAM_NOT_FOUND",
+				"HDCP_FORCE_ENCRYPTION_FAILED",
+				"HDCP_STREAMNUMBER_INUSE"};
+
+#define HDCP_TXMTR_SERVICE_ID 0x0001000
+#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x)
+
+#define HDCP_CMD_STATUS_TO_STR(x) #x
+
+enum {
+	hdcp_cmd_tx_init = SERVICE_CREATE_CMD(1),
+	hdcp_cmd_tx_init_v1 = SERVICE_CREATE_CMD(1),
+	hdcp_cmd_tx_deinit = SERVICE_CREATE_CMD(2),
+	hdcp_cmd_rcvd_msg = SERVICE_CREATE_CMD(3),
+	hdcp_cmd_send_timeout = SERVICE_CREATE_CMD(4),
+	hdcp_cmd_set_hw_key = SERVICE_CREATE_CMD(5),
+	hdcp_cmd_query_stream_type = SERVICE_CREATE_CMD(6),
+	hdcp_cmd_init_v1 = SERVICE_CREATE_CMD(11),
+	hdcp_cmd_init = SERVICE_CREATE_CMD(11),
+	hdcp_cmd_deinit = SERVICE_CREATE_CMD(12),
+	hdcp_cmd_version = SERVICE_CREATE_CMD(14),
+	hdcp_cmd_verify_key = SERVICE_CREATE_CMD(15),
+	hdcp_cmd_session_init = SERVICE_CREATE_CMD(16),
+	hdcp_cmd_session_deinit = SERVICE_CREATE_CMD(17),
+	hdcp_cmd_start_auth = SERVICE_CREATE_CMD(18),
+	hdcp_cmd_session_open_stream = SERVICE_CREATE_CMD(20),
+	hdcp_cmd_session_close_stream = SERVICE_CREATE_CMD(21),
+	hdcp_cmd_force_encryption = SERVICE_CREATE_CMD(22),
+};
+
+static struct qseecom_handle *qseecom_handle_g;
+static struct qseecom_handle *hdcpsrm_qseecom_handle_g;
+static int hdcp2_app_started;
+
+static struct qseecom_handle *hdcp1_qseecom_handle_g;
+static int hdcp1_app_started;
+
+static const char *hdcp_cmd_status_to_str(uint32_t err)
+{
+	int len = ARRAY_SIZE(hdcp_errors);
+
+	if (err >= 0 && err < len)
+		return hdcp_errors[err];
+	else
+		return "";
+}
+
+static int hdcp1_app_load(struct hdcp1_qsee_handle *handle)
+{
+	int rc = 0;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		goto error;
+	}
+
+	if (!hdcp1_qseecom_handle_g) {
+		rc = qseecom_start_app(&hdcp1_qseecom_handle_g, handle->app_name,
+				QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("%s app load failed (%d)\n", handle->app_name, rc);
+			goto error;
+		}
+	}
+	handle->qseecom_handle = hdcp1_qseecom_handle_g;
+	hdcp1_app_started++;
+
+	rc = qseecom_start_app(&handle->hdcpops_handle, HDCP1OPS_APP_NAME,
+			QSEECOM_SBUFF_SIZE);
+	if (rc) {
+		pr_warn("%s app load failed (%d)\n", HDCP1OPS_APP_NAME, rc);
+		handle->hdcpops_handle = NULL;
+	}
+
+	handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+	pr_debug("%s app loaded\n", handle->app_name);
+
+error:
+	return rc;
+}
+
+static void hdcp1_app_unload(struct hdcp1_qsee_handle *handle)
+{
+	int rc = 0;
+
+	if (!handle || !handle->qseecom_handle) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_warn("%s app not loaded\n", handle->app_name);
+		return;
+	}
+
+	if (handle->hdcpops_handle) {
+		/* deallocate the resources for HDCP 1.x ops handle */
+		rc = qseecom_shutdown_app(&handle->hdcpops_handle);
+		if (rc)
+			pr_warn("%s app unload failed (%d)\n", HDCP1OPS_APP_NAME, rc);
+	}
+
+	hdcp1_app_started--;
+	if (!hdcp1_app_started) {
+	/* deallocate the resources for qseecom HDCP 1.x handle */
+		rc = qseecom_shutdown_app(&hdcp1_qseecom_handle_g);
+		if (rc) {
+			pr_err("%s app unload failed (%d)\n", handle->app_name, rc);
+			return;
+		}
+		hdcp1_qseecom_handle_g = NULL;
+	}
+	handle->qseecom_handle = NULL;
+
+	handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+	pr_debug("%s app unloaded\n", handle->app_name);
+}
+
+static int hdcp1_set_key(struct hdcp1_qsee_handle *hdcp1_handle, u32 *aksv_msb,
+				 u32 *aksv_lsb)
+{
+	int rc = 0;
+	struct hdcp1_key_set_req *key_set_req;
+	struct hdcp1_key_set_rsp *key_set_rsp;
+	struct qseecom_handle *handle = NULL;
+
+	if (aksv_msb == NULL || aksv_lsb == NULL) {
+		pr_err("invalid aksv\n");
+		return -EINVAL;
+	}
+
+	if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", hdcp1_handle->app_name);
+		return -EINVAL;
+	}
+
+	handle = hdcp1_handle->qseecom_handle;
+
+	/* set keys and request aksv */
+	key_set_req = (struct hdcp1_key_set_req *)handle->sbuf;
+	key_set_req->commandid = HDCP1_SET_KEY;
+	key_set_rsp = (struct hdcp1_key_set_rsp *)(handle->sbuf +
+			QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req)));
+	rc = qseecom_send_command(
+		handle, key_set_req, QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req)),
+		key_set_rsp, QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err=%d\n", rc);
+		return -ENOKEY;
+	}
+
+	rc = key_set_rsp->ret;
+	if (rc) {
+		pr_err("set key cmd failed, rsp=%d\n", key_set_rsp->ret);
+		return -ENOKEY;
+	}
+
+	/* copy bytes into msb and lsb */
+	*aksv_msb = key_set_rsp->ksv[0] << 24 | key_set_rsp->ksv[1] << 16 |
+				key_set_rsp->ksv[2] << 8 | key_set_rsp->ksv[3];
+	*aksv_lsb = key_set_rsp->ksv[4] << 24 | key_set_rsp->ksv[5] << 16 |
+				key_set_rsp->ksv[6] << 8 | key_set_rsp->ksv[7];
+
+	rc = hdcp1_validate_aksv(*aksv_msb, *aksv_lsb);
+	if (rc) {
+		pr_err("aksv validation failed (%d)\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int hdcp1_verify_key(struct hdcp1_qsee_handle *hdcp1_handle)
+{
+	int rc = 0;
+	struct hdcp1_key_verify_req *key_verify_req;
+	struct hdcp1_key_verify_rsp *key_verify_rsp;
+	struct qseecom_handle *handle = NULL;
+
+	if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", hdcp1_handle->app_name);
+		return -EINVAL;
+	}
+
+	handle = hdcp1_handle->qseecom_handle;
+
+	key_verify_req = (struct hdcp1_key_verify_req *)handle->sbuf;
+	key_verify_req->commandid = HDCP1_KEY_VERIFY;
+	key_verify_rsp =
+		(struct hdcp1_key_verify_rsp *)(handle->sbuf +
+		 QSEECOM_ALIGN(sizeof(struct hdcp1_key_verify_req)));
+	rc = qseecom_send_command(
+		handle, key_verify_req,
+		QSEECOM_ALIGN(sizeof(struct hdcp1_key_verify_req)), key_verify_rsp,
+		QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_rsp)));
+
+	if (rc < 0) {
+		pr_err("command HDCP1_KEY_VERIFY failed (%d)\n", rc);
+		return -EINVAL;
+	}
+
+	rc = key_verify_rsp->ret;
+
+	if (rc == QSEECOMD_ERROR)
+		qseecomd_down = true;
+	else
+		qseecomd_down = false;
+
+	if (rc) {
+		pr_err("key_verify failed, rsp=%d\n", key_verify_rsp->ret);
+		return -EINVAL;
+	}
+
+	pr_debug("success\n");
+
+	return 0;
+}
+
+static int hdcp2_app_unload(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(deinit);
+
+	hdcp2_app_started--;
+	if (!hdcp2_app_started) {
+
+		hdcp2_app_process_cmd(deinit);
+		/* deallocate the resources for qseecom HDCPSRM handle */
+		rc = qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle);
+		if (rc)
+			pr_err("qseecom_shutdown_app failed for HDCPSRM (%d)\n", rc);
+
+		hdcpsrm_qseecom_handle_g = NULL;
+		/* deallocate the resources for qseecom HDCP2P2 handle */
+		rc = qseecom_shutdown_app(&handle->qseecom_handle);
+		if (rc) {
+			pr_err("qseecom_shutdown_app failed for HDCP2P2 (%d)\n", rc);
+			return rc;
+		}
+		qseecom_handle_g = NULL;
+	}
+	handle->qseecom_handle = NULL;
+	handle->hdcpsrm_qseecom_handle = NULL;
+
+	handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+	pr_debug("%s app unloaded\n", handle->app_name);
+
+	return rc;
+error:
+	if (!hdcp2_app_started)
+		qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle);
+	return rc;
+}
+
+static int hdcp2_verify_key(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(verify_key);
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", handle->app_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = hdcp2_app_process_cmd(verify_key);
+	pr_debug("verify_key = %d\n", rc);
+
+	if (rsp_buf->status == QSEECOMD_ERROR)
+		qseecomd_down = true;
+	else
+		qseecomd_down = false;
+
+error:
+	return rc;
+}
+
+static int hdcp2_app_tx_deinit(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(tx_deinit);
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", handle->app_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rc = hdcp2_app_process_cmd(tx_deinit);
+	if (rc)
+		goto error;
+
+	handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT;
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+static int hdcp2_app_session_deinit(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(session_deinit);
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", handle->app_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->sessionid = handle->session_id;
+
+	rc = hdcp2_app_process_cmd(session_deinit);
+	if (rc)
+		goto error;
+
+	handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT;
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+void *hdcp1_init_qseecom(void)
+{
+	struct hdcp1_qsee_handle *handle =
+		kzalloc(sizeof(struct hdcp1_qsee_handle), GFP_KERNEL);
+
+	if (!handle)
+		goto error;
+
+	handle->app_name = HDCP1_APP_NAME;
+
+error:
+	return handle;
+}
+
+bool hdcp1_feature_supported_qseecom(void *data)
+{
+	bool supported = false;
+	struct hdcp1_qsee_handle *handle = data;
+	int rc = 0;
+	int retry = 0;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		goto error;
+	}
+
+	if (handle->feature_supported) {
+		supported = true;
+		goto error;
+	}
+
+	rc = hdcp1_app_load(handle);
+
+	/* Other than in SUCCESS case, if there is a FAILURE when
+	 * handle is NULL, the hdcp1_app_load will return zero.
+	 * Checking the hdcp_state will ensure that the conditional
+	 * is ONLY true when hdcp1_app_load had no Failures.
+	 */
+	if (!rc && (handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		do {
+			if (!hdcp1_verify_key(handle)) {
+				pr_debug("HDCP 1.x supported\n");
+				pr_debug("hdcp1_verify_key succeeded on %d retry.\n", retry);
+				handle->feature_supported = true;
+				supported = true;
+				break;
+			} else if (qseecomd_down) {
+				pr_debug("Qseecomd is not up. Going to sleep.\n");
+				msleep(SLEEP_QSEECOMD_WAIT_MS);
+				retry++;
+			} else
+				break;
+		} while (handle->max_hdcp_key_verify_retries >= retry);
+
+		if (qseecomd_down) {
+			pr_err("hdcp1_verify_key failed after %d retries as Qseecomd is not up.\n",
+				handle->max_hdcp_key_verify_retries);
+		}
+		hdcp1_app_unload(handle);
+	}
+error:
+	return supported;
+}
+
+int hdcp1_set_enc_qseecom(void *data, bool enable)
+{
+	int rc = 0;
+	struct hdcp1_set_enc_req *set_enc_req;
+	struct hdcp1_set_enc_rsp *set_enc_rsp;
+	struct hdcp1_qsee_handle *hdcp1_handle = data;
+	struct qseecom_handle *handle = NULL;
+
+	if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!hdcp1_handle->feature_supported) {
+		pr_err("HDCP 1.x not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", hdcp1_handle->app_name);
+		return -EINVAL;
+	}
+
+	handle = hdcp1_handle->qseecom_handle;
+
+	/* set keys and request aksv */
+	set_enc_req = (struct hdcp1_set_enc_req *)handle->sbuf;
+	set_enc_req->commandid = HDCP1_SET_ENC;
+	set_enc_req->enable = enable;
+	set_enc_rsp = (struct hdcp1_set_enc_rsp *)(handle->sbuf +
+		   QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req)));
+	rc = qseecom_send_command(
+		handle, set_enc_req, QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req)),
+		set_enc_rsp, QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_rsp)));
+
+	if (rc < 0) {
+		pr_err("qseecom cmd failed err=%d\n", rc);
+		return -EINVAL;
+	}
+
+	rc = set_enc_rsp->ret;
+	if (rc) {
+		pr_err("enc cmd failed, rsp=%d\n", set_enc_rsp->ret);
+		return -EINVAL;
+	}
+
+	pr_debug("success\n");
+	return 0;
+}
+
+int hdcp1_ops_notify_qseecom(void *data, void *topo, bool is_authenticated)
+{
+	int rc = 0;
+	struct hdcp1_ops_notify_req *ops_notify_req;
+	struct hdcp1_ops_notify_rsp *ops_notify_rsp;
+	struct hdcp1_qsee_handle *hdcp1_handle = data;
+	struct qseecom_handle *handle = NULL;
+	struct hdcp1_topology *topology = NULL;
+
+	if (!hdcp1_handle || !hdcp1_handle->hdcpops_handle) {
+		pr_err("invalid HDCP 1.x ops handle\n");
+		return -EINVAL;
+	}
+
+	if (!hdcp1_handle->feature_supported) {
+		pr_err("HDCP 1.x not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", HDCP1OPS_APP_NAME);
+		return -EINVAL;
+	}
+
+	handle = hdcp1_handle->hdcpops_handle;
+	topology = (struct hdcp1_topology *)topo;
+
+	/* set keys and request aksv */
+	ops_notify_req = (struct hdcp1_ops_notify_req *)handle->sbuf;
+	ops_notify_req->commandid = HDCP1_NOTIFY_TOPOLOGY;
+	ops_notify_req->device_type = DEVICE_TYPE_DP;
+	ops_notify_req->is_authenticated = is_authenticated;
+	ops_notify_req->topology.depth = topology->depth;
+	ops_notify_req->topology.device_count = topology->device_count;
+	ops_notify_req->topology.max_devices_exceeded =
+		topology->max_devices_exceeded;
+	ops_notify_req->topology.max_cascade_exceeded =
+		topology->max_cascade_exceeded;
+
+	/*
+	 * For hdcp1.4 below two nodes are not applicable but as
+	 * TZ ops ta talks with other drivers with same structure
+	 * and want to maintain same interface across hdcp versions,
+	 * we are setting the values to 0.
+	 */
+	ops_notify_req->topology.hdcp2LegacyDeviceDownstream = 0;
+	ops_notify_req->topology.hdcp1DeviceDownstream = 0;
+
+	memset(ops_notify_req->recv_id_list, 0,
+		   sizeof(uint8_t) * MAX_REC_ID_LIST_SIZE);
+
+	ops_notify_rsp =
+		(struct hdcp1_ops_notify_rsp *)(handle->sbuf +
+		  QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_req)));
+	rc = qseecom_send_command(
+		handle, ops_notify_req,
+		QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_req)), ops_notify_rsp,
+		QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_rsp)));
+
+	rc = ops_notify_rsp->ret;
+	if (rc < 0) {
+		pr_warn("Ops notify cmd failed, rsp=%d\n", ops_notify_rsp->ret);
+		return -EINVAL;
+	}
+
+	pr_debug("ops notify success\n");
+	return 0;
+}
+
+int hdcp1_start_qseecom(void *data, u32 *aksv_msb, u32 *aksv_lsb)
+{
+	int rc = 0;
+	struct hdcp1_qsee_handle *handle = data;
+
+	if (!aksv_msb || !aksv_lsb) {
+		pr_err("invalid aksv output buffer\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!handle->feature_supported) {
+		pr_err("feature not supported\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_debug("%s app already loaded\n", handle->app_name);
+		goto error;
+	}
+
+	rc = hdcp1_app_load(handle);
+	if (rc)
+		goto error;
+
+	rc = hdcp1_set_key(handle, aksv_msb, aksv_lsb);
+	if (rc)
+		goto key_error;
+
+	pr_debug("success\n");
+	return rc;
+
+key_error:
+	hdcp1_app_unload(handle);
+error:
+	return rc;
+}
+
+void hdcp1_stop_qseecom(void *data)
+{
+	struct hdcp1_qsee_handle *hdcp1_handle = data;
+
+	if (!hdcp1_handle || !hdcp1_handle->qseecom_handle ||
+		!hdcp1_handle->hdcpops_handle) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_debug("%s app not loaded\n", hdcp1_handle->app_name);
+		return;
+	}
+
+	hdcp1_app_unload(hdcp1_handle);
+}
+
+static int hdcp2_app_init_legacy(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(init_v1);
+
+	if (!handle->legacy_app) {
+		pr_err("wrong init function\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("library already loaded\n");
+		goto error;
+	}
+
+	rc = hdcp2_app_process_cmd(init_v1);
+	if (rc)
+		goto error;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+static int hdcp2_app_tx_init_legacy(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(tx_init_v1);
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("app not loaded\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) {
+		pr_err("txmtr already initialized\n");
+		goto error;
+	}
+
+	rc = hdcp2_app_process_cmd(tx_init_v1);
+	if (rc)
+		goto error;
+
+	handle->app_data.response.data = rsp_buf->message;
+	handle->app_data.response.length = rsp_buf->msglen;
+	handle->app_data.timeout = rsp_buf->timeout;
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+	handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+static int hdcp2_app_init(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+	uint32_t app_minor_version = 0;
+
+	hdcp2_app_init_var(init);
+
+	if (handle->legacy_app) {
+		pr_err("wrong init function\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("library already loaded\n");
+		goto error;
+	}
+
+	req_buf->clientversion = HDCP_CLIENT_MAKE_VERSION(
+		HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION,
+		HDCP_CLIENT_PATCH_VERSION);
+
+	rc = hdcp2_app_process_cmd(init);
+	if (rc)
+		goto error;
+
+	app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion);
+	if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) {
+		pr_err("client-app minor version mismatch app(%d), client(%d)\n",
+			   app_minor_version, HDCP_CLIENT_MINOR_VERSION);
+		rc = -1;
+		goto error;
+	}
+
+	pr_debug("success\n");
+
+	pr_debug("client version major(%d), minor(%d), patch(%d)\n",
+			 HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION,
+			 HDCP_CLIENT_PATCH_VERSION);
+
+	pr_debug("app version major(%d), minor(%d), patch(%d)\n",
+			 HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion),
+			 HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion),
+			 HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion));
+error:
+	return rc;
+}
+
+static int hdcp2_app_tx_init(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(tx_init);
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) {
+		pr_err("txmtr already initialized\n");
+		goto error;
+	}
+
+	req_buf->sessionid = handle->session_id;
+
+	rc = hdcp2_app_process_cmd(tx_init);
+	if (rc)
+		goto error;
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+	handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+static int hdcp_get_version(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+	uint32_t app_major_version = 0;
+
+	hdcp2_app_init_var(version);
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("library already loaded\n");
+		goto error;
+	}
+
+	rc = hdcp2_app_process_cmd(version);
+	if (rc)
+		goto error;
+
+	app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion);
+
+	pr_debug("hdp2p2 app major version %d, app version %d\n", app_major_version,
+			 rsp_buf->appversion);
+
+	if (app_major_version == 1)
+		handle->legacy_app = true;
+
+error:
+	return rc;
+}
+
+static int hdcp2_app_load(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("%s app already loaded\n", handle->app_name);
+		goto error;
+	}
+
+	if (!qseecom_handle_g) {
+		rc = qseecom_start_app(&qseecom_handle_g,
+			 handle->app_name, QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("qseecom_start_app failed for HDCP2P2 (%d)\n", rc);
+			goto error;
+		}
+	}
+
+	handle->qseecom_handle = qseecom_handle_g;
+
+	if (!hdcpsrm_qseecom_handle_g) {
+		rc = qseecom_start_app(&hdcpsrm_qseecom_handle_g,
+			 HDCPSRM_APP_NAME, QSEECOM_SBUFF_SIZE);
+		if (rc) {
+			pr_err("qseecom_start_app failed for HDCPSRM (%d)\n", rc);
+			goto hdcpsrm_error;
+		}
+	}
+
+	handle->hdcpsrm_qseecom_handle = hdcpsrm_qseecom_handle_g;
+	pr_debug("qseecom_start_app success\n");
+
+	rc = hdcp_get_version(handle);
+	if (rc) {
+		pr_err("library get version failed\n");
+		goto get_version_error;
+	}
+
+	if (handle->legacy_app) {
+		handle->app_init = hdcp2_app_init_legacy;
+		handle->tx_init = hdcp2_app_tx_init_legacy;
+	} else {
+		handle->app_init = hdcp2_app_init;
+		handle->tx_init = hdcp2_app_tx_init;
+	}
+
+	if (!hdcp2_app_started) {
+		rc = handle->app_init(handle);
+		if (rc) {
+			pr_err("app init failed\n");
+			goto get_version_error;
+		}
+	}
+
+	hdcp2_app_started++;
+
+	handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+	return rc;
+get_version_error:
+	if (!hdcp2_app_started) {
+		qseecom_shutdown_app(&hdcpsrm_qseecom_handle_g);
+		hdcpsrm_qseecom_handle_g = NULL;
+	}
+	handle->hdcpsrm_qseecom_handle = NULL;
+hdcpsrm_error:
+	if (!hdcp2_app_started) {
+		qseecom_shutdown_app(&qseecom_handle_g);
+		qseecom_handle_g = NULL;
+	}
+	handle->qseecom_handle = NULL;
+error:
+	return rc;
+}
+
+static int hdcp2_app_session_init(struct hdcp2_qsee_handle *handle)
+{
+	int rc = 0;
+
+	hdcp2_app_init_var(session_init);
+
+	if (!handle->qseecom_handle || !handle->qseecom_handle->sbuf) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", handle->app_name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) {
+		pr_err("session already initialized\n");
+		goto error;
+	}
+
+	req_buf->deviceid = handle->device_type;
+
+	rc = hdcp2_app_process_cmd(session_init);
+	if (rc)
+		goto error;
+
+	pr_debug("session id %d\n", rsp_buf->sessionid);
+
+	handle->session_id = rsp_buf->sessionid;
+	handle->hdcp_state |= HDCP_STATE_SESSION_INIT;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+void *hdcp2_init_qseecom(u32 device_type)
+{
+	struct hdcp2_qsee_handle *handle =
+		kzalloc(sizeof(struct hdcp2_qsee_handle), GFP_KERNEL);
+
+	if (!handle)
+		goto error;
+
+	handle->device_type = device_type;
+	handle->app_name = HDCP2P2_APP_NAME;
+
+	handle->res_buf = kmalloc(QSEECOM_SBUFF_SIZE, GFP_KERNEL);
+	if (!handle->res_buf) {
+		kfree_sensitive(handle);
+		return NULL;
+	}
+
+	handle->req_buf = kmalloc(QSEECOM_SBUFF_SIZE, GFP_KERNEL);
+	if (!handle->req_buf) {
+		kfree_sensitive(handle->res_buf);
+		kfree_sensitive(handle);
+		return NULL;
+	}
+
+	handle->app_data.request.data =  handle->req_buf;
+	handle->app_data.response.data = handle->res_buf;
+error:
+	return handle;
+}
+
+void hdcp2_deinit_qseecom(void *ctx)
+{
+	struct hdcp2_qsee_handle *handle = NULL;
+	int rc = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	kfree_sensitive(handle->res_buf);
+	kfree_sensitive(handle->req_buf);
+
+error:
+	kfree_sensitive(ctx);
+}
+
+int hdcp2_app_start_qseecom(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_qsee_handle *handle = NULL;
+	int rc = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	rc = hdcp2_app_load(handle);
+	if (rc)
+		goto error;
+
+	if (!handle->legacy_app) {
+		rc = hdcp2_app_session_init(handle);
+		if (rc)
+			goto error;
+	}
+
+	if (handle->tx_init == NULL) {
+		pr_err("invalid txmtr init function pointer\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = handle->tx_init(handle);
+
+error:
+	return rc;
+}
+
+int hdcp2_app_start_auth_qseecom(void *ctx, uint32_t req_len)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(start_auth);
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->ctxHandle = handle->tz_ctxhandle;
+
+	rc = hdcp2_app_process_cmd(start_auth);
+	if (rc)
+		goto error;
+
+	memcpy(handle->res_buf, rsp_buf->message, rsp_buf->msglen);
+
+	handle->app_data.response.length = rsp_buf->msglen;
+	handle->app_data.timeout = rsp_buf->timeout;
+	handle->app_data.repeater_flag = false;
+
+	handle->tz_ctxhandle = rsp_buf->ctxhandle;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+int hdcp2_app_process_msg_qseecom(void *ctx, uint32_t req_len)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(rcvd_msg);
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	if (!handle->app_data.request.data) {
+		pr_err("invalid request buffer\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->msglen = handle->app_data.request.length;
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	memcpy(req_buf->msg, handle->req_buf, handle->app_data.request.length);
+
+	rc = hdcp2_app_process_cmd(rcvd_msg);
+	if (rc)
+		goto error;
+
+	/* check if it's a repeater */
+	if (rsp_buf->flag == HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST)
+		handle->app_data.repeater_flag = true;
+
+	memcpy(handle->res_buf, rsp_buf->msg, rsp_buf->msglen);
+
+	handle->app_data.response.length = rsp_buf->msglen;
+	handle->app_data.timeout = rsp_buf->timeout;
+
+error:
+	return rc;
+}
+
+int hdcp2_app_timeout_qseecom(void *ctx, uint32_t req_len)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(send_timeout);
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	rc = hdcp2_app_process_cmd(send_timeout);
+	if (rc)
+		goto error;
+
+	memcpy(handle->res_buf, rsp_buf->message, rsp_buf->msglen);
+
+	handle->app_data.response.length = rsp_buf->msglen;
+	handle->app_data.timeout = rsp_buf->timeout;
+error:
+	return rc;
+}
+
+int hdcp2_app_enable_encryption_qseecom(void *ctx, uint32_t req_len)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(set_hw_key);
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	/*
+	 * wait at least 200ms before enabling encryption
+	 * as per hdcp2p2 specifications.
+	 */
+	msleep(SLEEP_SET_HW_KEY_MS);
+
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rc = hdcp2_app_process_cmd(set_hw_key);
+	if (rc)
+		goto error;
+
+	handle->hdcp_state |= HDCP_STATE_AUTHENTICATED;
+error:
+	return rc;
+}
+
+int hdcp2_app_query_stream_qseecom(void *ctx, uint32_t req_len)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(query_stream_type);
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+
+	rc = hdcp2_app_process_cmd(query_stream_type);
+	if (rc)
+		goto error;
+
+	memcpy(handle->res_buf, rsp_buf->msg, rsp_buf->msglen);
+
+	handle->app_data.response.length = rsp_buf->msglen;
+	handle->app_data.timeout = rsp_buf->timeout;
+error:
+	return rc;
+}
+
+int hdcp2_app_stop_qseecom(void *ctx)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = hdcp2_app_tx_deinit(handle);
+	if (rc)
+		goto error;
+
+	if (!handle->legacy_app) {
+		rc = hdcp2_app_session_deinit(handle);
+		if (rc)
+			goto error;
+	}
+
+	rc = hdcp2_app_unload(handle);
+error:
+	return rc;
+}
+
+bool hdcp2_feature_supported_qseecom(void *ctx)
+{
+	int rc = 0;
+	int retry = 0;
+	bool supported = false;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->feature_supported) {
+		supported = true;
+		goto error;
+	}
+
+	rc = hdcp2_app_load(handle);
+	if (!rc) {
+		do {
+			if (!hdcp2_verify_key(handle)) {
+				pr_debug("HDCP 2.2 supported.\n");
+				pr_debug("hdcp2_verify_key succeeded on %d retry.\n", retry);
+				handle->feature_supported = true;
+				supported = true;
+				break;
+			} else if (qseecomd_down) {
+				pr_debug("Qseecomd is not up. Going to sleep.\n");
+				msleep(SLEEP_QSEECOMD_WAIT_MS);
+				retry++;
+			} else
+				break;
+		} while (handle->max_hdcp_key_verify_retries >= retry);
+
+		if (qseecomd_down) {
+			pr_err("hdcp2_verify_key failed after %d retries as Qseecomd is not up.\n",
+				handle->max_hdcp_key_verify_retries);
+		}
+		hdcp2_app_unload(handle);
+	}
+error:
+	return supported;
+}
+
+int hdcp2_force_encryption_qseecom(void *ctx, uint32_t enable)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	hdcp2_app_init_var(force_encryption);
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state == HDCP_STATE_AUTHENTICATED)
+		msleep(SLEEP_FORCE_ENCRYPTION_MS);
+
+	req_buf->ctxhandle = handle->tz_ctxhandle;
+	req_buf->enable = enable;
+
+	rc = hdcp2_app_process_cmd(force_encryption);
+	if (rc || (rsp_buf->commandid != hdcp_cmd_force_encryption))
+		goto error;
+
+error:
+	return rc;
+}
+
+int hdcp2_open_stream_qseecom(void *ctx, uint8_t vc_payload_id,
+		uint8_t stream_number, uint32_t *stream_id)
+{
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+	int rc = 0;
+
+	hdcp2_app_init_var(session_open_stream);
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->sessionid = handle->session_id;
+	req_buf->vcpayloadid = vc_payload_id;
+	req_buf->stream_number = stream_number;
+	req_buf->streamMediaType = 0;
+
+	rc = hdcp2_app_process_cmd(session_open_stream);
+	if (rc)
+		goto error;
+
+	*stream_id = rsp_buf->streamid;
+
+	pr_debug("success\n");
+
+error:
+	return rc;
+}
+
+int hdcp2_close_stream_qseecom(void *ctx, uint32_t stream_id)
+{
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+	int rc = 0;
+
+	hdcp2_app_init_var(session_close_stream);
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	req_buf->sessionid = handle->session_id;
+	req_buf->streamid = stream_id;
+
+	rc = hdcp2_app_process_cmd(session_close_stream);
+	if (rc)
+		goto error;
+
+	pr_debug("success\n");
+error:
+	return rc;
+}
+
+int hdcp2_update_app_data_qseecom(void *ctx, struct hdcp2_app_data *app_data)
+{
+	int rc = 0;
+	struct hdcp2_qsee_handle *handle = (struct hdcp2_qsee_handle *)ctx;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	app_data->request.data = handle->app_data.request.data;
+	app_data->request.length = handle->app_data.request.length;
+	app_data->response.data = handle->app_data.response.data;
+	app_data->response.length = handle->app_data.response.length;
+	app_data->timeout = handle->app_data.timeout;
+	app_data->repeater_flag = handle->app_data.repeater_flag;
+
+error:
+	return rc;
+}

+ 346 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_qseecom.h

@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HDCP_QSEECOM_H__
+#define __HDCP_QSEECOM_H__
+
+#include <linux/hdcp_qseecom.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "hdcp_main.h"
+
+/*
+ * @max_hdcp_key_verify_retries - Max number of retries by default set to 0 which
+ *                                is equivalent to 0MS. Actual value will be the one
+ *                                from the dtsi file.
+ */
+struct hdcp1_qsee_handle {
+	struct qseecom_handle *qseecom_handle;
+	struct qseecom_handle *hdcpops_handle;
+	bool feature_supported;
+	uint32_t device_type;
+	enum hdcp_state hdcp_state;
+	char *app_name;
+	uint32_t max_hdcp_key_verify_retries;
+};
+
+/*
+ * If Qseecomd starts late and hdcp key
+ * verification has already started, qseecomd_down
+ * flag will be set to true. It will be set to false
+ * once the Qseecomd is up. Initial assumption is
+ * that the Qseecomd will start in time.
+ */
+static bool qseecomd_down;
+/*
+ * @max_hdcp_key_verify_retries - Max number of retries by default set to 0 which
+ *                                is equivalent to 0MS. Actual value will be the one
+ *                                from the dtsi file.
+ */
+struct hdcp2_qsee_handle {
+	struct hdcp2_app_data app_data;
+	uint32_t tz_ctxhandle;
+	bool feature_supported;
+	enum hdcp_state hdcp_state;
+	struct qseecom_handle *qseecom_handle;
+	struct qseecom_handle *hdcpsrm_qseecom_handle;
+	uint32_t session_id;
+	bool legacy_app;
+	uint32_t device_type;
+	char *app_name;
+	unsigned char *req_buf;
+	unsigned char *res_buf;
+	int (*app_init)(struct hdcp2_qsee_handle *handle);
+	int (*tx_init)(struct hdcp2_qsee_handle *handle);
+	uint32_t max_hdcp_key_verify_retries;
+};
+
+struct hdcp1_key_set_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp1_key_set_rsp {
+	uint32_t commandid;
+	uint32_t ret;
+	uint8_t ksv[HDCP1_AKSV_SIZE];
+} __packed;
+
+struct hdcp1_ops_notify_req {
+	uint32_t commandid;
+	uint32_t device_type;
+	uint8_t recv_id_list[MAX_REC_ID_LIST_SIZE];
+	int32_t recv_id_len;
+	struct hdcp1_topology topology;
+	bool is_authenticated;
+} __packed;
+
+struct hdcp1_ops_notify_rsp {
+	uint32_t commandid;
+	uint32_t ret;
+} __packed;
+
+struct hdcp1_set_enc_req {
+	uint32_t commandid;
+	uint32_t enable;
+} __packed;
+
+struct hdcp1_set_enc_rsp {
+	uint32_t commandid;
+	uint32_t ret;
+} __packed;
+
+struct hdcp1_key_verify_req {
+	uint32_t commandid;
+	uint32_t key_type;
+} __packed;
+
+struct hdcp1_key_verify_rsp {
+	uint32_t commandId;
+	uint32_t ret;
+} __packed;
+
+struct hdcp_init_v1_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_init_v1_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_init_req {
+	uint32_t commandid;
+	uint32_t clientversion;
+} __packed;
+
+struct hdcp_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t appversion;
+} __packed;
+
+struct hdcp_session_init_req {
+	uint32_t commandid;
+	uint32_t deviceid;
+} __packed;
+
+struct hdcp_session_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t sessionid;
+} __packed;
+
+struct hdcp_tx_init_v1_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_tx_init_v1_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_tx_init_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+} __packed;
+
+struct hdcp_tx_init_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+} __packed;
+
+struct hdcp_version_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_version_rsp {
+	uint32_t status;
+	uint32_t commandId;
+	uint32_t appversion;
+} __packed;
+
+struct hdcp_session_open_stream_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+	uint32_t vcpayloadid;
+	uint32_t stream_number;
+	uint32_t streamMediaType;
+} __packed;
+
+struct hdcp_session_open_stream_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t streamid;
+} __packed;
+
+struct hdcp_session_close_stream_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+	uint32_t streamid;
+} __packed;
+
+struct hdcp_session_close_stream_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_force_encryption_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t enable;
+} __packed;
+
+struct hdcp_force_encryption_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_tx_deinit_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+} __packed;
+
+struct hdcp_tx_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_session_deinit_req {
+	uint32_t commandid;
+	uint32_t sessionid;
+} __packed;
+
+struct hdcp_session_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_deinit_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_deinit_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_query_stream_type_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+} __packed;
+
+struct hdcp_query_stream_type_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t msg[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_set_hw_key_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+} __packed;
+
+struct hdcp_set_hw_key_rsp {
+	uint32_t status;
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_send_timeout_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+} __packed;
+
+struct hdcp_send_timeout_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_start_auth_req {
+	uint32_t commandid;
+	uint32_t ctxHandle;
+} __packed;
+
+struct hdcp_start_auth_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t timeout;
+	uint32_t msglen;
+	uint8_t message[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_rcvd_msg_req {
+	uint32_t commandid;
+	uint32_t ctxhandle;
+	uint32_t msglen;
+	uint8_t msg[MAX_RX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_rcvd_msg_rsp {
+	uint32_t status;
+	uint32_t commandid;
+	uint32_t state;
+	uint32_t timeout;
+	uint32_t flag;
+	uint32_t msglen;
+	uint8_t msg[MAX_TX_MESSAGE_SIZE];
+} __packed;
+
+struct hdcp_verify_key_req {
+	uint32_t commandid;
+} __packed;
+
+struct hdcp_verify_key_rsp {
+	uint32_t status;
+	uint32_t commandId;
+} __packed;
+
+#define HDCP1_SET_KEY 202
+#define HDCP1_KEY_VERIFY 204
+#define HDCP1_SET_ENC 205
+
+/* DP device type */
+#define DEVICE_TYPE_DP 0x8002
+
+void *hdcp1_init_qseecom(void);
+bool hdcp1_feature_supported_qseecom(void *data);
+int hdcp1_set_enc_qseecom(void *data, bool enable);
+int hdcp1_ops_notify_qseecom(void *data, void *topo, bool is_authenticated);
+int hdcp1_start_qseecom(void *data, u32 *aksv_msb, u32 *aksv_lsb);
+void hdcp1_stop_qseecom(void *data);
+
+void *hdcp2_init_qseecom(u32 device_type);
+void hdcp2_deinit_qseecom(void *ctx);
+int hdcp2_app_start_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_start_auth_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_process_msg_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_timeout_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_enable_encryption_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_query_stream_qseecom(void *ctx, uint32_t req_len);
+int hdcp2_app_stop_qseecom(void *ctx);
+bool hdcp2_feature_supported_qseecom(void *ctx);
+int hdcp2_force_encryption_qseecom(void *ctx, uint32_t enable);
+int hdcp2_open_stream_qseecom(void *ctx, uint8_t vc_payload_id,
+		  uint8_t stream_number, uint32_t *stream_id);
+int hdcp2_close_stream_qseecom(void *ctx, uint32_t stream_id);
+int hdcp2_update_app_data_qseecom(void *ctx, struct hdcp2_app_data *app_data);
+
+#endif /* __HDCP_QSEECOM_H__ */

+ 1103 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_smcinvoke.c

@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "smcinvoke_object.h"
+#include <include/linux/IClientEnv.h>
+#include <include/smci/uid/CAppClient.h>
+#include <include/smci/uid/CAppLoader.h>
+#include <include/smci/interface/IAppClient.h>
+#include <include/smci/interface/IAppController.h>
+#include <include/smci/interface/IAppLoader.h>
+#include <include/smci/interface/IOpener.h>
+#include "hdcp_main.h"
+#include "hdcp_smcinvoke.h"
+#include "hdcp1.h"
+#include "hdcp1_ops.h"
+#include "hdcp2p2.h"
+
+static int hdcp1_verify_key(struct hdcp1_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", HDCP1_APP_NAME);
+		return -EINVAL;
+	}
+
+	ret = hdcp1_verify(handle->hdcp1_app_obj, 1);
+	if (ret)
+		pr_err("hdcp1_verify failed error:%d\n", ret);
+
+	return ret;
+}
+
+static int hdcp1_key_set(struct hdcp1_smcinvoke_handle *handle,
+		 uint32_t *aksv_msb, uint32_t *aksv_lsb)
+{
+	int ret = 0;
+	uint8_t *ksvRes = NULL;
+	size_t ksvResLen = 0;
+
+	ksvRes = kzalloc(HDCP1_AKSV_SIZE, GFP_KERNEL);
+	if (!ksvRes)
+		return -EINVAL;
+
+	if (aksv_msb == NULL || aksv_lsb == NULL) {
+		pr_err("invalid aksv\n");
+		return -EINVAL;
+	}
+
+	if (!handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("hdcp1 app not loaded\n");
+		return -EINVAL;
+	}
+
+	ret = hdcp1_set_key(handle->hdcp1_app_obj, ksvRes, HDCP1_AKSV_SIZE,
+				 &ksvResLen);
+	if (ret) {
+		pr_err("hdcp1_set_key failed ret=%d\n", ret);
+		return -ENOKEY;
+	}
+
+	/* copy bytes into msb and lsb */
+	*aksv_msb = ksvRes[0] << 24 | ksvRes[1] << 16 | ksvRes[2] << 8 | ksvRes[3];
+	*aksv_lsb = ksvRes[4] << 24 | ksvRes[5] << 16 | ksvRes[6] << 8 | ksvRes[7];
+
+	ret = hdcp1_validate_aksv(*aksv_msb, *aksv_lsb);
+	if (ret)
+		pr_err("aksv validation failed (%d)\n", ret);
+
+	return ret;
+}
+
+int load_app(char *app_name, struct Object *app_obj,
+			 struct Object *app_controller_obj)
+{
+	int ret = 0;
+	uint8_t *buffer = NULL;
+	struct qtee_shm shm = {0};
+	size_t size = 0;
+	struct Object client_env = {NULL, NULL};
+	struct Object app_loader = {NULL, NULL};
+
+	buffer = firmware_request_from_smcinvoke(app_name, &size, &shm);
+	if (buffer == NULL) {
+		pr_err("firmware_request_from_smcinvoke failed\n");
+		return -EINVAL;
+	}
+
+	ret = get_client_env_object(&client_env);
+	if (ret) {
+		pr_err("get_client_env_object failed :%d\n", ret);
+		client_env.invoke = NULL;
+		client_env.context = NULL;
+		goto error;
+	}
+
+	ret = IClientEnv_open(client_env, CAppLoader_UID, &app_loader);
+	if (ret) {
+		pr_err("IClientEnv_open failed :%d\n", ret);
+		app_loader.invoke = NULL;
+		app_loader.context = NULL;
+		goto error;
+	}
+
+	ret = IAppLoader_loadFromBuffer(app_loader, (const void *)buffer, size,
+			app_controller_obj);
+	if (ret) {
+		pr_err("IAppLoader_loadFromBuffer failed :%d\n", ret);
+		app_controller_obj->invoke = NULL;
+		app_controller_obj->context = NULL;
+		goto error;
+	}
+
+	ret = IAppController_getAppObject(*app_controller_obj, app_obj);
+	if (ret) {
+		pr_err("IAppController_getAppObject failed :%d\n", ret);
+		goto error;
+	}
+
+error:
+	qtee_shmbridge_free_shm(&shm);
+	Object_ASSIGN_NULL(app_loader);
+	Object_ASSIGN_NULL(client_env);
+	return ret;
+}
+
+static int hdcp1_app_load(struct hdcp1_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED)
+		goto error;
+
+	ret = load_app(HDCP1_APP_NAME, &(handle->hdcp1_app_obj),
+		   &(handle->hdcp1_appcontroller_obj));
+	if (ret) {
+		pr_err("hdcp1 TA load failed :%d\n", ret);
+		goto error;
+	}
+
+	if (Object_isNull(handle->hdcp1_app_obj)) {
+		pr_err("hdcp1_app_obj is NULL\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = load_app(HDCP1OPS_APP_NAME, &(handle->hdcp1ops_app_obj),
+		   &(handle->hdcp1ops_appcontroller_obj));
+	if (ret) {
+		pr_err("hdcp1ops TA load failed :%d\n", ret);
+		goto error;
+	}
+
+	if (Object_isNull(handle->hdcp1ops_app_obj)) {
+		pr_err("hdcp1ops_app_obj is NULL\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+
+error:
+	return ret;
+}
+
+static void hdcp1_app_unload(struct hdcp1_smcinvoke_handle *handle)
+{
+	if (!handle || !handle->hdcp1_app_obj.context) {
+		pr_err("invalid handle\n");
+		return;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_warn("hdcp1 app not loaded\n");
+		return;
+	}
+
+	Object_ASSIGN_NULL(handle->hdcp1_app_obj);
+	Object_ASSIGN_NULL(handle->hdcp1_appcontroller_obj);
+	Object_ASSIGN_NULL(handle->hdcp1ops_app_obj);
+	Object_ASSIGN_NULL(handle->hdcp1ops_appcontroller_obj);
+
+	handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+	pr_debug("%s app unloaded\n", HDCP1_APP_NAME);
+}
+
+void *hdcp1_init_smcinvoke(void)
+{
+	struct hdcp1_smcinvoke_handle *handle =
+		kzalloc(sizeof(struct hdcp1_smcinvoke_handle), GFP_KERNEL);
+
+	if (!handle)
+		goto error;
+
+error:
+	return handle;
+}
+
+bool hdcp1_feature_supported_smcinvoke(void *data)
+{
+	int ret = 0;
+	bool supported = false;
+	struct hdcp1_smcinvoke_handle *handle = data;
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		goto error;
+	}
+
+	if (handle->feature_supported) {
+		supported = true;
+		goto error;
+	}
+
+	ret = hdcp1_app_load(handle);
+	if (!ret && (handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		if (!hdcp1_verify_key(handle)) {
+			pr_debug("HDCP 1.x supported\n");
+			handle->feature_supported = true;
+			supported = true;
+		}
+		hdcp1_app_unload(handle);
+	}
+error:
+	return supported;
+}
+
+int hdcp1_set_enc_smcinvoke(void *data, bool enable)
+{
+	int ret = 0;
+	struct hdcp1_smcinvoke_handle *handle = data;
+
+	if (!handle || !handle->hdcp1_app_obj.context) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return -EINVAL;
+	}
+
+	if (!handle->feature_supported) {
+		pr_err("HDCP 1.x not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", HDCP1_APP_NAME);
+		return -EINVAL;
+	}
+
+	ret = hdcp1_set_encryption(handle->hdcp1_app_obj, enable);
+	if (ret)
+		pr_err("hdcp1_set_encryption failed :%d\n", ret);
+
+	return ret;
+}
+
+int hdcp1_ops_notify_smcinvoke(void *data, void *topo, bool is_authenticated)
+{
+	int ret = 0;
+	struct hdcp1_smcinvoke_handle *handle = data;
+
+	if (!handle || !handle->hdcp1ops_app_obj.context) {
+		pr_err("invalid HDCP 1.x ops handle\n");
+		return -EINVAL;
+	}
+
+	if (!handle->feature_supported) {
+		pr_err("HDCP 1.x not supported\n");
+		return -EINVAL;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", HDCP1OPS_APP_NAME);
+		return -EINVAL;
+	}
+
+	ret = hdcp1_ops_notify_topology_change(handle->hdcp1ops_app_obj);
+	if (ret)
+		pr_err("hdcp1_ops_notify_topology_change failed, ret=%d\n", ret);
+
+	return ret;
+}
+
+int hdcp1_start_smcinvoke(void *data, u32 *aksv_msb, u32 *aksv_lsb)
+{
+	int ret = 0;
+	struct hdcp1_smcinvoke_handle *handle = data;
+
+	if (!aksv_msb || !aksv_lsb) {
+		pr_err("invalid aksv output buffer\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!handle) {
+		pr_err("invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!handle->feature_supported) {
+		pr_err("feature not supported\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_debug("%s app already loaded\n", HDCP1_APP_NAME);
+		goto error;
+	}
+
+	ret = hdcp1_app_load(handle);
+	if (ret)
+		goto error;
+
+	ret = hdcp1_key_set(handle, aksv_msb, aksv_lsb);
+	if (ret)
+		goto key_error;
+
+	pr_debug("success\n");
+	return ret;
+
+key_error:
+	hdcp1_app_unload(handle);
+error:
+	return ret;
+}
+
+void hdcp1_stop_smcinvoke(void *data)
+{
+	struct hdcp1_smcinvoke_handle *hdcp1_handle = data;
+
+	if (!hdcp1_handle) {
+		pr_err("invalid HDCP 1.x handle\n");
+		return;
+	}
+
+	if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("hdcp1 app not loaded\n");
+		return;
+	}
+
+	Object_ASSIGN_NULL(hdcp1_handle->hdcp1_app_obj);
+	Object_ASSIGN_NULL(hdcp1_handle->hdcp1_appcontroller_obj);
+	Object_ASSIGN_NULL(hdcp1_handle->hdcp1ops_app_obj);
+	Object_ASSIGN_NULL(hdcp1_handle->hdcp1ops_appcontroller_obj);
+
+	hdcp1_handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+}
+
+void *hdcp2_init_smcinvoke(u32 device_type)
+{
+	struct hdcp2_smcinvoke_handle *handle =
+		kzalloc(sizeof(struct hdcp2_smcinvoke_handle), GFP_KERNEL);
+
+	if (!handle)
+		goto error;
+
+	handle->device_type = device_type;
+
+error:
+	return handle;
+}
+
+void hdcp2_deinit_smcinvoke(void *ctx)
+{
+	kfree_sensitive(ctx);
+}
+
+int hdcp_get_version(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+	uint32_t app_major_version = 0;
+	uint32_t appversion = 0;
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("hdcp2p2 TA already loaded\n");
+		goto error;
+	}
+
+	ret = hdcp2p2_version(handle->hdcp2_app_obj, &appversion);
+	if (ret) {
+		pr_err("hdcp2p2_version failed :%d\n", ret);
+		goto error;
+	}
+	app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(appversion);
+
+	pr_debug("hdp2p2 app major version %d, app version %d\n", app_major_version,
+			 appversion);
+error:
+	return ret;
+}
+
+int hdcp2_app_init(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+	uint32_t app_minor_version = 0;
+	uint32_t clientversion = 0;
+	uint32_t appversion = 0;
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("hdcp2p2 TA already loaded\n");
+		goto error;
+	}
+
+	clientversion = HDCP_CLIENT_MAKE_VERSION(HDCP_CLIENT_MAJOR_VERSION,
+					HDCP_CLIENT_MINOR_VERSION,
+					HDCP_CLIENT_PATCH_VERSION);
+
+	ret = hdcp2p2_init(handle->hdcp2_app_obj, clientversion, &appversion);
+	if (ret) {
+		pr_err("hdcp2p2_init failed:%d\n", ret);
+		goto error;
+	}
+
+	app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(appversion);
+	if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) {
+		pr_err("client-app minor version mismatch app(%d), client(%d)\n",
+			   app_minor_version, HDCP_CLIENT_MINOR_VERSION);
+		ret = -1;
+		goto error;
+	}
+
+	pr_err("client version major(%d), minor(%d), patch(%d)\n",
+		   HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION,
+		   HDCP_CLIENT_PATCH_VERSION);
+
+	pr_err("app version major(%d), minor(%d), patch(%d)\n",
+		   HCDP_TXMTR_GET_MAJOR_VERSION(appversion),
+		   HCDP_TXMTR_GET_MINOR_VERSION(appversion),
+		   HCDP_TXMTR_GET_PATCH_VERSION(appversion));
+error:
+	return ret;
+}
+
+int hdcp2_app_tx_init(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+	uint32_t ctxhandle = 0;
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) {
+		pr_err("txmtr already initialized\n");
+		goto error;
+	}
+
+	ret = hdcp2p2_tx_init(handle->hdcp2_app_obj, handle->session_id, &ctxhandle);
+	if (ret) {
+		pr_err("hdcp2p2_tx_init failed :%d\n", ret);
+		goto error;
+	}
+
+	handle->tz_ctxhandle = ctxhandle;
+	handle->hdcp_state |= HDCP_STATE_TXMTR_INIT;
+
+error:
+	return ret;
+}
+
+int hdcp2_app_tx_deinit(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("hdcp2p2 TA not loaded\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_tx_deinit(handle->hdcp2_app_obj, handle->tz_ctxhandle);
+	if (ret) {
+		pr_err("hdcp2p2_tx_deinit failed :%d\n", ret);
+		goto error;
+	}
+	handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT;
+
+error:
+	return ret;
+}
+
+static int hdcp2_app_load(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_APP_LOADED) {
+		pr_err("hdcp2p2 TA already loaded\n");
+		goto error;
+	}
+
+	ret = load_app(HDCP2P2_APP_NAME, &(handle->hdcp2_app_obj),
+				   &(handle->hdcp2_appcontroller_obj));
+	if (ret) {
+		pr_err("hdcp2p2 TA load_app failed :%d\n", ret);
+		goto error;
+	}
+
+	if (Object_isNull(handle->hdcp2_app_obj)) {
+		pr_err("hdcp2p2 app object is NULL\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = load_app(HDCPSRM_APP_NAME, &(handle->hdcpsrm_app_obj),
+		   &(handle->hdcpsrm_appcontroller_obj));
+	if (ret == 16) {
+		pr_err("hdcpsrm TA already loaded\n");
+	} else if (ret) {
+		pr_err("hdcpsrm TA load failed :%d\n", ret);
+		goto error;
+	}
+
+	if (Object_isNull(handle->hdcpsrm_app_obj)) {
+		pr_err("hdcpsrm app object is NULL\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp_get_version(handle);
+	if (ret) {
+		pr_err("library get version failed\n");
+		goto error;
+	}
+
+	ret = hdcp2_app_init(handle);
+	if (ret) {
+		pr_err("app init failed\n");
+		goto error;
+	}
+
+	handle->hdcp_state |= HDCP_STATE_APP_LOADED;
+
+error:
+	return ret;
+}
+
+static int hdcp2_app_unload(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	ret = hdcp2p2_deinit(handle->hdcp2_app_obj);
+	if (ret) {
+		pr_err("hdcp2p2_deinit failed:%d\n", ret);
+		goto error;
+	}
+
+	Object_ASSIGN_NULL(handle->hdcp2_app_obj);
+	Object_ASSIGN_NULL(handle->hdcp2_appcontroller_obj);
+	Object_ASSIGN_NULL(handle->hdcpsrm_app_obj);
+	Object_ASSIGN_NULL(handle->hdcpsrm_appcontroller_obj);
+
+	handle->hdcp_state &= ~HDCP_STATE_APP_LOADED;
+
+error:
+	return ret;
+}
+
+static int hdcp2_verify_key(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("%s app not loaded\n", HDCP2P2_APP_NAME);
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_verify_key(handle->hdcp2_app_obj);
+	if (ret) {
+		pr_err("hdcp2p2_verify_key failed:%d\n", ret);
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+static int hdcp2_app_session_init(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+	uint32_t sessionId = 0;
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("hdcp2p2 app not loaded\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) {
+		pr_err("session already initialized\n");
+		goto error;
+	}
+
+	if (Object_isNull(handle->hdcp2_app_obj)) {
+		pr_err("hdcp2_app_obj is NULL\n");
+		goto error;
+	}
+
+	ret = hdcp2p2_session_init(handle->hdcp2_app_obj, handle->device_type,
+			&sessionId);
+	if (ret) {
+		pr_err("hdcp2p2_session_init failed ret:%d\n", ret);
+		goto error;
+	}
+
+	handle->session_id = sessionId;
+	handle->hdcp_state |= HDCP_STATE_SESSION_INIT;
+error:
+	return ret;
+}
+
+static int hdcp2_app_session_deinit(struct hdcp2_smcinvoke_handle *handle)
+{
+	int ret = 0;
+
+	if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) {
+		pr_err("hdcp2p2 app not loaded\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_session_deinit(handle->hdcp2_app_obj, handle->session_id);
+	if (ret) {
+		pr_err("hdcp2p2_session_deinit failed:%d\n", ret);
+		goto error;
+	}
+
+	handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT;
+error:
+	return ret;
+}
+
+int hdcp2_app_start_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = (struct hdcp2_smcinvoke_handle *)ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.data = kmalloc(MAX_RX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!handle->app_data.request.data) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.response.data = kmalloc(MAX_TX_MESSAGE_SIZE, GFP_KERNEL);
+	if (!handle->app_data.response.data) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2_app_load(handle);
+	if (ret)
+		goto error;
+
+	ret = hdcp2_app_session_init(handle);
+	if (ret)
+		goto error;
+
+	ret = hdcp2_app_tx_init(handle);
+	if (ret)
+		goto error;
+
+error:
+	return ret;
+}
+
+int hdcp2_app_start_auth_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+	size_t resMsgOut = 0;
+	uint32_t timeout = 0;
+	uint32_t flag = 0;
+	uint32_t ctxhandle = 0;
+
+	uint8_t resMsg[MAX_TX_MESSAGE_SIZE] = {0};
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_start_auth(handle->hdcp2_app_obj, handle->tz_ctxhandle,
+	  resMsg, MAX_TX_MESSAGE_SIZE, &resMsgOut, &timeout,
+	  &flag, &ctxhandle);
+	if (ret) {
+		pr_err("hdcp2p2_start_auth failed :%d\n", ret);
+		goto error;
+	}
+
+	memcpy(handle->app_data.response.data, resMsg, resMsgOut);
+
+	handle->app_data.response.length = resMsgOut;
+	handle->app_data.timeout = timeout;
+	handle->app_data.repeater_flag = false;
+
+	handle->tz_ctxhandle = ctxhandle;
+
+error:
+	return ret;
+}
+
+int hdcp2_app_process_msg_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+	size_t resMsgLen = 0;
+	uint32_t timeout = 0;
+	uint32_t flag = 0;
+	uint32_t state = 0;
+
+	uint8_t resMsg[MAX_TX_MESSAGE_SIZE] = {0};
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	if (!handle->app_data.request.data) {
+		pr_err("invalid request buffer\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_rcvd_msg(
+		handle->hdcp2_app_obj, handle->app_data.request.data,
+		handle->app_data.request.length, handle->tz_ctxhandle, resMsg,
+		MAX_TX_MESSAGE_SIZE, &resMsgLen, &timeout, &flag, &state);
+	if (ret) {
+		pr_err("hdcp2p2_rcvd_msg failed :%d\n", ret);
+		goto error;
+	}
+
+	memcpy(handle->app_data.response.data, resMsg, resMsgLen);
+
+	/* check if it's a repeater */
+	if (flag == HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST)
+		handle->app_data.repeater_flag = true;
+
+	handle->app_data.response.length = resMsgLen;
+	handle->app_data.timeout = timeout;
+
+error:
+	return ret;
+}
+
+int hdcp2_app_timeout_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+	uint32_t timeout = 0;
+	size_t resMsgLenOut = 0;
+
+	uint8_t resMsg[MAX_TX_MESSAGE_SIZE] = {0};
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	ret = hdcp2p2_send_timeout(handle->hdcp2_app_obj, handle->tz_ctxhandle,
+		resMsg, MAX_TX_MESSAGE_SIZE, &resMsgLenOut,
+		&timeout);
+	if (ret) {
+		pr_err("hdcp2p2_send_timeout failed :%d\n", ret);
+		goto error;
+	}
+
+	memcpy(handle->app_data.response.data, resMsg, resMsgLenOut);
+
+	handle->app_data.response.length = resMsgLenOut;
+	handle->app_data.timeout = timeout;
+
+error:
+	return ret;
+}
+
+int hdcp2_app_enable_encryption_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	/*
+	 * wait at least 200ms before enabling encryption
+	 * as per hdcp2p2 specifications.
+	 */
+	msleep(SLEEP_SET_HW_KEY_MS);
+
+	ret = hdcp2p2_set_hw_key(handle->hdcp2_app_obj, handle->tz_ctxhandle);
+	if (ret) {
+		pr_err("hdcp2p2_set_hw_key failed:%d\n", ret);
+		goto error;
+	}
+
+	handle->hdcp_state |= HDCP_STATE_AUTHENTICATED;
+error:
+	return ret;
+}
+
+int hdcp2_app_query_stream_smcinvoke(void *ctx, uint32_t req_len)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	uint32_t timeout = 0;
+	size_t resMsgLenOut = 0;
+
+	uint8_t resMsg[MAX_TX_MESSAGE_SIZE] = {0};
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	handle->app_data.request.length = req_len;
+
+	ret = hdcp2p2_query_stream_type(
+		handle->hdcp2_app_obj, handle->tz_ctxhandle, resMsg,
+		MAX_TX_MESSAGE_SIZE, &resMsgLenOut, &timeout);
+	if (ret) {
+		pr_err("hdcp2p2_query_stream_type failed :%d\n", ret);
+		goto error;
+	}
+
+	memcpy(handle->app_data.response.data, resMsg, resMsgLenOut);
+
+	handle->app_data.response.length = resMsgLenOut;
+	handle->app_data.timeout = timeout;
+error:
+	return ret;
+}
+
+int hdcp2_app_stop_smcinvoke(void *ctx)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	ret = hdcp2_app_tx_deinit(handle);
+	if (ret)
+		goto end;
+
+	ret = hdcp2_app_session_deinit(handle);
+	if (ret)
+		goto end;
+
+	ret = hdcp2_app_unload(handle);
+
+	kfree(handle->app_data.request.data);
+	kfree(handle->app_data.response.data);
+
+end:
+	return ret;
+}
+
+bool hdcp2_feature_supported_smcinvoke(void *ctx)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+	bool supported = false;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->feature_supported) {
+		supported = true;
+		goto error;
+	}
+
+	ret = hdcp2_app_load(handle);
+	if (!ret) {
+		if (!hdcp2_verify_key(handle)) {
+			pr_debug("HDCP 2.2 supported\n");
+			handle->feature_supported = true;
+			supported = true;
+		}
+		hdcp2_app_unload(handle);
+	}
+error:
+	return supported;
+}
+
+int hdcp2_force_encryption_smcinvoke(void *ctx, uint32_t enable)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (handle->hdcp_state == HDCP_STATE_AUTHENTICATED)
+		msleep(SLEEP_FORCE_ENCRYPTION_MS);
+
+	ret = hdcp2p2_force_encryption(handle->hdcp2_app_obj, handle->tz_ctxhandle,
+		enable);
+	if (ret) {
+		pr_err("hdcp2p2_force_encryption failed :%d\n", ret);
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+int hdcp2_open_stream_smcinvoke(void *ctx, uint8_t vc_payload_id,
+		uint8_t stream_number, uint32_t *stream_id)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+	uint32_t streamid = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_session_open_stream(handle->hdcp2_app_obj,
+		   handle->session_id, vc_payload_id,
+		   stream_number, 0, &streamid);
+	if (ret) {
+		pr_err("hdcp2p2_session_open_stream failed :%d\n", ret);
+		goto error;
+	}
+
+	*stream_id = streamid;
+
+error:
+	return ret;
+}
+
+int hdcp2_close_stream_smcinvoke(void *ctx, uint32_t stream_id)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = ctx;
+
+	if (!handle) {
+		pr_err("Invalid handle\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) {
+		pr_err("session not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) {
+		pr_err("txmtr not initialized\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = hdcp2p2_session_close_stream(handle->hdcp2_app_obj,
+		handle->session_id, stream_id);
+	if (ret) {
+		pr_err("hdcp2p2_session_close_stream failed :%d\n", ret);
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+int hdcp2_update_app_data_smcinvoke(void *ctx, struct hdcp2_app_data *app_data)
+{
+	struct hdcp2_smcinvoke_handle *handle = NULL;
+	int ret = 0;
+
+	handle = ctx;
+
+	if (!handle || !app_data) {
+		pr_err("Invalid handle\n");
+		return -EINVAL;
+	}
+
+	app_data->request.data = handle->app_data.request.data;
+	app_data->request.length = handle->app_data.request.length;
+	app_data->response.data = handle->app_data.response.data;
+	app_data->response.length = handle->app_data.response.length;
+	app_data->timeout = handle->app_data.timeout;
+	app_data->repeater_flag = handle->app_data.repeater_flag;
+	return ret;
+}

+ 62 - 0
qcom/opensource/securemsm-kernel/hdcp/hdcp_smcinvoke.h

@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __HDCP_SMCINVOKE_H__
+#define __HDCP_SMCINVOKE_H__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <include/linux/smcinvoke_object.h>
+
+#include "hdcp_main.h"
+
+struct hdcp1_smcinvoke_handle {
+	struct Object hdcp1_app_obj;
+	struct Object hdcp1_appcontroller_obj;
+	struct Object hdcp1ops_app_obj;
+	struct Object hdcp1ops_appcontroller_obj;
+	bool feature_supported;
+	uint32_t device_type;
+	enum hdcp_state hdcp_state;
+};
+
+struct hdcp2_smcinvoke_handle {
+	struct hdcp2_app_data app_data;
+	uint32_t tz_ctxhandle;
+	bool feature_supported;
+	enum hdcp_state hdcp_state;
+	struct Object hdcp2_app_obj;
+	struct Object hdcp2_appcontroller_obj;
+	struct Object hdcpsrm_app_obj;
+	struct Object hdcpsrm_appcontroller_obj;
+	uint32_t session_id;
+	uint32_t device_type;
+};
+
+void *hdcp1_init_smcinvoke(void);
+bool hdcp1_feature_supported_smcinvoke(void *data);
+int hdcp1_set_enc_smcinvoke(void *data, bool enable);
+int hdcp1_ops_notify_smcinvoke(void *data, void *topo, bool is_authenticated);
+int hdcp1_start_smcinvoke(void *data, u32 *aksv_msb, u32 *aksv_lsb);
+void hdcp1_stop_smcinvoke(void *data);
+
+void *hdcp2_init_smcinvoke(u32 device_type);
+void hdcp2_deinit_smcinvoke(void *ctx);
+int hdcp2_app_start_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_start_auth_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_process_msg_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_timeout_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_enable_encryption_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_query_stream_smcinvoke(void *ctx, uint32_t req_len);
+int hdcp2_app_stop_smcinvoke(void *ctx);
+bool hdcp2_feature_supported_smcinvoke(void *ctx);
+int hdcp2_force_encryption_smcinvoke(void *ctx, uint32_t enable);
+int hdcp2_open_stream_smcinvoke(void *ctx, uint8_t vc_payload_id,
+		uint8_t stream_number, uint32_t *stream_id);
+int hdcp2_close_stream_smcinvoke(void *ctx, uint32_t stream_id);
+int hdcp2_update_app_data_smcinvoke(void *ctx, struct hdcp2_app_data *app_data);
+
+#endif /* __HDCP_SMCINVOKE_H__ */

+ 10 - 0
qcom/opensource/securemsm-kernel/include/linux/CTrustedCameraDriver.h

@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "smcinvoke_object.h"
+
+#define CTrustedCameraDriver_UID 283
+
+

+ 159 - 0
qcom/opensource/securemsm-kernel/include/linux/IClientEnv.h

@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ *
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define IClientEnv_OP_open 0
+#define IClientEnv_OP_registerLegacy 1
+#define IClientEnv_OP_register 2
+#define IClientEnv_OP_registerWithWhitelist 3
+#define IClientEnv_OP_notifyDomainChange 4
+#define IClientEnv_OP_registerWithCredentials 5
+#define IClientEnv_OP_loadCmnlibFromBuffer 6
+#define IClientEnv_OP_configTaRegion 7
+#define IClientEnv_OP_adciAccept 8
+#define IClientEnv_OP_adciShutdown 9
+
+#include "smcinvoke_object.h"
+
+static inline int32_t
+IClientEnv_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IClientEnv_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IClientEnv_open(struct Object self, uint32_t uid_val, struct Object *obj_ptr)
+{
+	union ObjectArg a[2];
+	int32_t result;
+
+	a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) };
+
+	result = Object_invoke(self, IClientEnv_OP_open, a, ObjectCounts_pack(1, 0, 0, 1));
+
+	*obj_ptr = a[1].o;
+
+	return result;
+}
+
+static inline int32_t
+IClientEnv_registerLegacy(struct Object self, const void *credentials_ptr, size_t credentials_len,
+			struct Object *clientEnv_ptr)
+{
+	union ObjectArg a[2];
+	int32_t result;
+
+	a[0].bi = (struct ObjectBufIn) { credentials_ptr, credentials_len * 1 };
+
+	result = Object_invoke(self, IClientEnv_OP_registerLegacy, a,
+			ObjectCounts_pack(1, 0, 0, 1));
+
+	*clientEnv_ptr = a[1].o;
+
+	return result;
+}
+
+static inline int32_t
+IClientEnv_register(struct Object self, struct Object credentials_val,
+			struct Object *clientEnv_ptr)
+{
+	union ObjectArg a[2];
+	int32_t result;
+
+	a[0].o = credentials_val;
+
+	result = Object_invoke(self, IClientEnv_OP_register, a,
+			ObjectCounts_pack(0, 0, 1, 1));
+
+	*clientEnv_ptr = a[1].o;
+
+	return result;
+}
+
+static inline int32_t
+IClientEnv_registerWithWhitelist(struct Object self,
+		struct Object credentials_val, const uint32_t *uids_ptr,
+		size_t uids_len, struct Object *clientEnv_ptr)
+{
+	union ObjectArg a[3];
+	int32_t result;
+
+	a[1].o = credentials_val;
+	a[0].bi = (struct ObjectBufIn) { uids_ptr, uids_len *
+					sizeof(uint32_t) };
+
+	result = Object_invoke(self, IClientEnv_OP_registerWithWhitelist, a,
+			ObjectCounts_pack(1, 0, 1, 1));
+
+	*clientEnv_ptr = a[2].o;
+
+	return result;
+}
+
+static inline int32_t
+IClientEnv_notifyDomainChange(struct Object self)
+{
+	return Object_invoke(self, IClientEnv_OP_notifyDomainChange, 0, 0);
+}
+
+static inline int32_t
+IClientEnv_registerWithCredentials(struct Object self, struct Object
+		credentials_val, struct Object *clientEnv_ptr)
+{
+	union ObjectArg a[2]={{{0,0}}};
+	int32_t result;
+
+	a[0].o = credentials_val;
+
+	result = Object_invoke(self, IClientEnv_OP_registerWithCredentials, a,
+			ObjectCounts_pack(0, 0, 1, 1));
+
+	*clientEnv_ptr = a[1].o;
+
+	return result;
+}
+
+static inline int32_t
+IClientEnv_loadCmnlibFromBuffer(struct Object self, const void *cmnlibElf_ptr, size_t cmnlibElf_len)
+{
+  union ObjectArg a[1]={{{0,0}}};
+  a[0].bi = (struct ObjectBufIn) { cmnlibElf_ptr, cmnlibElf_len * 1 };
+
+  return Object_invoke(self, IClientEnv_OP_loadCmnlibFromBuffer, a, ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t
+IClientEnv_configTaRegion(struct Object self, uint64_t appRgnAddr_val, uint32_t appRgnSize_val)
+{
+  union ObjectArg a[1]={{{0,0}}};
+  struct {
+    uint64_t m_appRgnAddr;
+    uint32_t m_appRgnSize;
+  } i;
+  a[0].b = (struct ObjectBuf) { &i, 12 };
+  i.m_appRgnAddr = appRgnAddr_val;
+  i.m_appRgnSize = appRgnSize_val;
+
+  return Object_invoke(self, IClientEnv_OP_configTaRegion, a, ObjectCounts_pack(1, 0, 0, 0));
+}
+
+static inline int32_t
+IClientEnv_adciAccept(struct Object self)
+{
+	return Object_invoke(self, IClientEnv_OP_adciAccept, 0, 0);
+}
+
+static inline int32_t
+IClientEnv_adciShutdown(struct Object self)
+{
+	return Object_invoke(self, IClientEnv_OP_adciShutdown, 0, 0);
+}

+ 130 - 0
qcom/opensource/securemsm-kernel/include/linux/ITrustedCameraDriver.h

@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "smcinvoke_object.h"
+
+/**
+ * Struct containing values for programming of domain ID
+ *
+ * @version:            Version info
+ * @protect:            To protect or reset the lanes
+ * @csid_hw_idx_mask:   Bit position denoting CSID in use
+ * @cdm_hw_idx_mask:    Bit position denoting CDM in use
+ * @vc_mask:            VC mask for identifying domain
+ * @phy_lane_sel_mask:  PHY lane info - contains CPHY, DPHY and PHY ID values
+ *                      0-15 bits -- PHY index
+ *                      16-23 bits -- CPHY lanes
+ *                      24-31 bits -- DPHY lanes
+ * @reserved:           Reserved bit
+ */
+
+typedef struct {
+	uint32_t version;
+	uint32_t protect;
+	uint32_t csid_hw_idx_mask;
+	uint32_t cdm_hw_idx_mask;
+	uint64_t vc_mask;
+	uint64_t phy_lane_sel_mask;
+	uint64_t reserved;
+} ITCDriverSensorInfo;
+
+#define ITrustedCameraDriver_ERROR_NOT_ALLOWED 10
+
+#define ITrustedCameraDriver_OP_dynamicProtectSensor 0
+#define ITrustedCameraDriver_OP_getVersion 1
+#define ITrustedCameraDriver_OP_dynamicConfigureFDPort 3
+
+static inline int32_t
+ITrustedCameraDriver_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+ITrustedCameraDriver_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+/*
+ * Description: This method allows protecting a camera sensor based on the sensor
+ *              information provided.
+ *
+ * In:          this - ITrustedCameraDriver object
+ * In:          phy_info_ptr - Camera HW settings required for securing the usecase
+ * Out:         void
+ * Return:      Object_OK on success
+ *              secure camera error codes from seccam_def on failure
+ */
+
+static inline int32_t
+ITrustedCameraDriver_dynamicProtectSensor(struct Object self,
+		const ITCDriverSensorInfo *phy_info_ptr)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].bi = (struct ObjectBufIn) { phy_info_ptr, sizeof(ITCDriverSensorInfo) };
+
+	return Object_invoke(self, ITrustedCameraDriver_OP_dynamicProtectSensor, a,
+			ObjectCounts_pack(1, 0, 0, 0));
+}
+
+/*
+ * Description: Get the current version info
+ *
+ * In:         this - ITrustedCameraDriver object
+ * Out:        arch_ver_ptr - the pointer of arch version number.
+ * Out:        max_ver_ptr -  the pointer of the second part of the version number
+ * Out:        min_ver_ptr -  the pointer of the third part of the version number
+ * Return:     Object_OK on success
+ */
+
+static inline int32_t
+ITrustedCameraDriver_getVersion(struct Object self, uint32_t *arch_ver_ptr,
+		uint32_t *max_ver_ptr, uint32_t *min_ver_ptr)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+	int32_t result;
+	struct {
+		uint32_t m_arch_ver;
+		uint32_t m_max_ver;
+		uint32_t m_min_ver;
+	} o = {0};
+
+	a[0].b = (struct ObjectBuf) { &o, 12 };
+
+	result = Object_invoke(self, ITrustedCameraDriver_OP_getVersion, a,
+			ObjectCounts_pack(0, 1, 0, 0));
+
+	*arch_ver_ptr = o.m_arch_ver;
+	*max_ver_ptr = o.m_max_ver;
+	*min_ver_ptr = o.m_min_ver;
+
+	return result;
+}
+
+/*
+ * Description: Dynamic configuration to allow secure/non-secure FD port
+ *              on all the CSIDs
+ *
+ * In:          this - ITrustedCameraDriver object
+ * In:          protect - to secure or non-secure the port
+ * Out:         void
+ * Return:      Object_OK on success
+ *              Object_ERROR on failure
+ *              ITrustedCameraDriver_ERROR_NOT_ALLOWED on request to
+ *              configure FD port even when disabled by OEM
+ */
+
+static inline int32_t
+ITrustedCameraDriver_dynamicConfigureFDPort(struct Object self, uint32_t protect)
+{
+	union ObjectArg a[1] = {{{0, 0}}};
+
+	a[0].b = (struct ObjectBuf) { &protect, sizeof(uint32_t) };
+
+	return Object_invoke(self, ITrustedCameraDriver_OP_dynamicConfigureFDPort, a,
+			ObjectCounts_pack(1, 0, 0, 0));
+}

+ 108 - 0
qcom/opensource/securemsm-kernel/include/linux/smci_clientenv.h

@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_CLIENTENV_H
+#define __SMCI_CLIENTENV_H
+
+#include "smci_object.h"
+#include "IClientEnv.h"
+
+#define SMCI_CLIENTENV_OP_OPEN 0
+#define SMCI_CLIENTENV_OP_REGISTERLEGACY 1
+#define SMCI_CLIENTENV_OP_REGISTER 2
+#define SMCI_CLIENTENV_OP_REGISTERWITHWHITELIST 3
+#define SMCI_CLIENTENV_OP_NOTIFYDOMAINCHANGE 4
+#define SMCI_CLIENTENV_OP_REGISTERWITHCREDENTIALS 5
+#define SMCI_CLIENTENV_OP_LOADCMNLIBFROMBUFFER 6
+#define SMCI_CLIENTENV_OP_CONFIGTAREGION 7
+#define SMCI_CLIENTENV_OP_ADCIACCEPT 8
+#define SMCI_CLIENTENV_OP_ADCISUTDOWN 9
+
+static inline int32_t
+smci_clientenv_release(struct smci_object self)
+{
+	return IClientEnv_release(self);
+}
+
+static inline int32_t
+smci_clientenv_retain(struct smci_object self)
+{
+	return IClientEnv_retain(self);
+}
+
+static inline int32_t
+smci_clientenv_open(struct smci_object self, uint32_t uid_val, struct smci_object *obj_ptr)
+{
+	return IClientEnv_open(self, uid_val, obj_ptr);
+}
+
+static inline int32_t
+smci_clientenv_registerlegacy(struct smci_object self, const void *credentials_ptr,
+		size_t credentials_len, struct smci_object *clientenv_ptr)
+{
+	return IClientEnv_registerLegacy(self, credentials_ptr,
+		credentials_len, clientenv_ptr);
+}
+
+static inline int32_t
+smci_clientenv_register(struct smci_object self, struct smci_object credentials_val,
+			struct smci_object *clientenv_ptr)
+{
+	return IClientEnv_register(self, credentials_val,
+			clientenv_ptr);
+}
+
+static inline int32_t
+smci_clientenv_registerwithwhitelist(struct smci_object self,
+		struct smci_object credentials_val, const uint32_t *uids_ptr,
+		size_t uids_len, struct smci_object *clientenv_ptr)
+{
+	return IClientEnv_registerWithWhitelist(self,
+		credentials_val, uids_ptr,
+		uids_len, clientenv_ptr);
+}
+
+static inline int32_t
+smc_clientenv_notifydomainchange(struct smci_object self)
+{
+	return IClientEnv_notifyDomainChange(self);
+}
+
+static inline int32_t
+smci_clientenv_registerwithcredentials(struct smci_object self, struct smci_object
+		credentials_val, struct smci_object *clientenv_ptr)
+{
+	return IClientEnv_registerWithCredentials(self,
+		credentials_val, clientenv_ptr);
+}
+
+static inline int32_t
+smci_clientenv_loadcmnlibfrombuffer(struct smci_object self, const void *cmnlibelf_ptr,
+		size_t cmnlibelf_len)
+{
+	return IClientEnv_loadCmnlibFromBuffer(self, cmnlibelf_ptr, cmnlibelf_len);
+}
+
+static inline int32_t
+smci_clientenv_configtaregion(struct smci_object self, uint64_t apprgnaddr_val,
+		uint32_t apprgnsize_val)
+
+{
+	return IClientEnv_configTaRegion(self, apprgnaddr_val, apprgnsize_val);
+}
+
+static inline int32_t
+smci_clientenv_adciaccept(struct smci_object self)
+{
+	return IClientEnv_adciAccept(self);
+}
+
+static inline int32_t
+smci_clientenv_adcishutdown(struct smci_object self)
+{
+	return IClientEnv_adciShutdown(self);
+}
+
+#endif /* __SMCI_CLIENTENV_H */

+ 151 - 0
qcom/opensource/securemsm-kernel/include/linux/smci_object.h

@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_OBJECT_H
+#define __SMCI_OBJECT_H
+
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/qtee_shmbridge.h>
+#include "smcinvoke.h"
+#include "smcinvoke_object.h"
+
+/*
+ * Method bits are not modified by transport layers.  These describe the
+ * method (member function) being requested by the client.
+ */
+
+#define SMCI_OBJECT_OP_METHOD_MASK     (0x0000FFFFu)
+#define SMCI_OBJECT_OP_METHODID(op)    ((op) & SMCI_OBJECT_OP_METHOD_MASK)
+#define SMCI_OBJECT_OP_RELEASE       (SMCI_OBJECT_OP_METHOD_MASK - 0)
+#define SMCI_OBJECT_OP_RETAIN        (SMCI_OBJECT_OP_METHOD_MASK - 1)
+#define SMCI_OBJECT_OP_MAP_REGION    0
+#define SMCI_OBJECT_OP_YIELD 1
+#define SMCI_OBJECT_OP_SLEEP 2
+
+#define SMCI_OBJECT_COUNTS_MAX_BI   0xF
+#define SMCI_OBJECT_COUNTS_MAX_BO   0xF
+#define SMCI_OBJECT_COUNTS_MAX_OI   0xF
+#define SMCI_OBJECT_COUNTS_MAX_OO   0xF
+
+/* unpack counts */
+
+#define SMCI_OBJECT_COUNTS_NUM_BI(k)  ((size_t) (((k) >> 0) & SMCI_OBJECT_COUNTS_MAX_BI))
+#define SMCI_OBJECT_COUNTS_NUM_BO(k)  ((size_t) (((k) >> 4) & SMCI_OBJECT_COUNTS_MAX_BO))
+#define SMCI_OBJECT_COUNTS_NUM_OI(k)  ((size_t) (((k) >> 8) & SMCI_OBJECT_COUNTS_MAX_OI))
+#define SMCI_OBJECT_COUNTS_NUM_OO(k)  ((size_t) (((k) >> 12) & SMCI_OBJECT_COUNTS_MAX_OO))
+#define SMCI_OBJECT_COUNTS_NUM_BUFFERS(k)	\
+			(SMCI_OBJECT_COUNTS_NUM_BI(k) + SMCI_OBJECT_COUNTS_NUM_BO(k))
+
+#define SMCI_OBJECT_COUNTS_NUM_OBJECTS(k)	\
+			(SMCI_OBJECT_COUNTS_NUM_OI(k) + SMCI_OBJECT_COUNTS_NUM_OO(k))
+
+/* Indices into args[] */
+
+#define SMCI_OBJECT_COUNTS_INDEX_BI(k)   0
+#define SMCI_OBJECT_COUNTS_INDEX_BO(k)		\
+			(SMCI_OBJECT_COUNTS_INDEX_BI(k) + SMCI_OBJECT_COUNTS_NUM_BI(k))
+#define SMCI_OBJECT_COUNTS_INDEX_OI(k)		\
+			(SMCI_OBJECT_COUNTS_INDEX_BO(k) + SMCI_OBJECT_COUNTS_NUM_BO(k))
+#define SMCI_OBJECT_COUNTS_INDEX_OO(k)		\
+			(SMCI_OBJECT_COUNTS_INDEX_OI(k) + SMCI_OBJECT_COUNTS_NUM_OI(k))
+#define SMCI_OBJECT_COUNTS_TOTAL(k)		\
+			(SMCI_OBJECT_COUNTS_INDEX_OO(k) + SMCI_OBJECT_COUNTS_NUM_OO(k))
+
+#define SMCI_OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \
+	((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \
+			((in_objs) << 8) | ((out_objs) << 12)))
+
+#define SMCI_OBJECT_COUNTS_INDEX_BUFFERS(k)   SMCI_OBJECT_COUNTS_INDEX_BI(k)
+
+/* Object_invoke return codes */
+
+#define SMCI_OBJECT_IS_OK(err)        ((err) == 0)
+#define SMCI_OBJECT_IS_ERROR(err)     ((err) != 0)
+
+/* Generic error codes */
+
+#define SMCI_OBJECT_OK                  0   /* non-specific success code */
+#define SMCI_OBJECT_ERROR               1   /* non-specific error */
+#define SMCI_OBJECT_ERROR_INVALID       2   /* unsupported/unrecognized request */
+#define SMCI_OBJECT_ERROR_SIZE_IN       3   /* supplied buffer/string too large */
+#define SMCI_OBJECT_ERROR_SIZE_OUT      4   /* supplied output buffer too small */
+
+#define SMCI_OBJECT_ERROR_USERBASE     10   /* start of user-defined error range */
+
+/* Transport layer error codes */
+
+#define SMCI_OBJECT_ERROR_DEFUNCT     -90   /* smci_object no longer exists */
+#define SMCI_OBJECT_ERROR_ABORT       -91   /* calling thread must exit */
+#define SMCI_OBJECT_ERROR_BADOBJ      -92   /* invalid smci_object context */
+#define SMCI_OBJECT_ERROR_NOSLOTS     -93   /* caller's smci_object table full */
+#define SMCI_OBJECT_ERROR_MAXARGS     -94   /* too many args */
+#define SMCI_OBJECT_ERROR_MAXDATA     -95   /* buffers too large */
+#define SMCI_OBJECT_ERROR_UNAVAIL     -96   /* the request could not be processed */
+#define SMCI_OBJECT_ERROR_KMEM        -97   /* kernel out of memory */
+#define SMCI_OBJECT_ERROR_REMOTE      -98   /* local method sent to remote smci_object */
+#define SMCI_OBJECT_ERROR_BUSY        -99   /* smci_object is busy */
+#define SMCI_OBJECT_ERROR_TIMEOUT     -103  /* Call Back smci_object invocation timed out. */
+
+/* smci_objectop */
+
+#define SMCI_OBJECT_OP_LOCAL	((uint32_t) 0x00008000U)
+
+#define SMCI_OBJECT_OP_IS_LOCAL(op)	(((op) & SMCI_OBJECT_OP_LOCAL) != 0)
+
+
+/* smci_object */
+
+#define SMCI_OBJECTCOUNTS_PACK(nbuffersin, nbuffersout, nobjectsin, nobjectsout) \
+	((uint32_t) ((nbuffersin) |	\
+	((nbuffersout) << 4) |			\
+	((nobjectsin) << 8)  |			\
+	((nobjectsout) << 12)))
+
+#define smci_object_arg ObjectArg
+#define smci_objectinvoke ObjectInvoke
+#define smci_object Object
+#define smci_object_buf ObjectBuf
+#define smci_object_buf_in ObjectBufIn
+
+static inline int32_t smci_object_invoke(struct smci_object o, uint32_t op,
+				union smci_object_arg *args, uint32_t k)
+{
+	return Object_invoke(o, op, args, k);
+}
+
+#define SMCI_OBJECT_NULL		((struct smci_object){NULL, NULL})
+
+
+#define SMCI_OBJECT_NOT_RETAINED
+
+#define SMCI_OBJECT_CONSUMED
+
+static inline int32_t smci_object_release(SMCI_OBJECT_CONSUMED struct smci_object o)
+{
+	return Object_release(o);
+}
+static inline int32_t smci_object_retain(struct smci_object o)
+{
+	return Object_retain(o);
+}
+
+#define SMCI_OBJECT_IS_NULL(o)	((o).invoke == NULL)
+
+#define SMCI_OBJECT_RELEASE_IF(o)				\
+	do {						\
+		struct smci_object o_ = (o);			\
+		if (!SMCI_OBJECT_IS_NULL(o_))			\
+			(void) smci_object_release(o_);	\
+	} while (0)
+
+static inline void smci_object_replace(struct smci_object *loc, struct smci_object obj_new)
+{
+	Object_replace(loc, obj_new);
+}
+
+#define SMCI_OBJECT_ASSIGN_NULL(loc)  smci_object_replace(&(loc), SMCI_OBJECT_NULL)
+
+#endif /* __SMCI_OBJECT_H */

+ 110 - 0
qcom/opensource/securemsm-kernel/include/linux/smcinvoke.h

@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _UAPI_SMCINVOKE_H_
+#define _UAPI_SMCINVOKE_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCINVOKE_USERSPACE_OBJ_NULL	-1
+#define DEFAULT_CB_OBJ_THREAD_CNT	4
+#define SMCINVOKE_TZ_MIN_BUF_SIZE	4096
+
+struct smcinvoke_buf {
+	__u64 addr;
+	__u64 size;
+};
+
+struct smcinvoke_obj {
+	__s64 fd;
+	__s64 cb_server_fd;
+	__s64 reserved;
+};
+
+union smcinvoke_arg {
+	struct smcinvoke_buf b;
+	struct smcinvoke_obj o;
+};
+
+/*
+ * struct smcinvoke_cmd_req: This structure is transparently sent to TEE
+ * @op - Operation to be performed
+ * @counts - number of aruments passed
+ * @result - result of invoke operation
+ * @argsize - size of each of arguments
+ * @args - args is pointer to buffer having all arguments
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0
+ */
+struct smcinvoke_cmd_req {
+	__u32 op;
+	__u32 counts;
+	__s32 result;
+	__u32 argsize;
+	__u64 args;
+	__s64 reserved;
+};
+
+/*
+ * struct smcinvoke_accept: structure to process CB req from TEE
+ * @has_resp: IN: Whether IOCTL is carrying response data
+ * @result: IN: Outcome of operation op
+ * @op: OUT: Operation to be performed on target object
+ * @counts: OUT: Number of arguments, embedded in buffer pointed by
+ *               buf_addr, to complete operation
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0.
+ * @argsize: IN: Size of any argument, all of equal size, embedded
+ *               in buffer pointed by buf_addr
+ * @txn_id: OUT: An id that should be passed as it is for response
+ * @cbobj_id: OUT: Callback object which is target of operation op
+ * @buf_len: IN: Len of buffer pointed by buf_addr
+ * @buf_addr: IN: Buffer containing all arguments which are needed
+ *                to complete operation op
+ */
+struct smcinvoke_accept {
+	__u32 has_resp;
+	__s32 result;
+	__u32 op;
+	__u32 counts;
+	__s32 reserved;
+	__u32 argsize;
+	__u64 txn_id;
+	__s64 cbobj_id;
+	__u64 buf_len;
+	__u64 buf_addr;
+};
+
+/*
+ * @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0
+ */
+struct smcinvoke_server {
+	__u64 cb_buf_size;
+	__s64 reserved;
+};
+
+#define SMCINVOKE_IOC_MAGIC    0x98
+
+#define SMCINVOKE_IOCTL_INVOKE_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
+
+#define SMCINVOKE_IOCTL_ACCEPT_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
+
+#define SMCINVOKE_IOCTL_SERVER_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
+
+#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s64)
+
+/*
+  * smcinvoke logging buffer is for communicating with the smcinvoke driver additional
+  * info for debugging to be included in driver's log (if any)
+  */
+#define SMCINVOKE_LOG_BUF_SIZE 100
+#define SMCINVOKE_IOCTL_LOG \
+	_IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE)
+
+#endif /* _UAPI_SMCINVOKE_H_ */

+ 202 - 0
qcom/opensource/securemsm-kernel/include/linux/smcinvoke_object.h

@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __SMCINVOKE_OBJECT_H
+#define __SMCINVOKE_OBJECT_H
+
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/qtee_shmbridge.h>
+#include "smcinvoke.h"
+
+/*
+ * Method bits are not modified by transport layers.  These describe the
+ * method (member function) being requested by the client.
+ */
+
+#define OBJECT_OP_METHOD_MASK     (0x0000FFFFu)
+#define OBJECT_OP_METHODID(op)    ((op) & OBJECT_OP_METHOD_MASK)
+#define OBJECT_OP_RELEASE       (OBJECT_OP_METHOD_MASK - 0)
+#define OBJECT_OP_RETAIN        (OBJECT_OP_METHOD_MASK - 1)
+#define OBJECT_OP_MAP_REGION    0
+#define OBJECT_OP_YIELD 1
+#define OBJECT_OP_SLEEP 2
+
+#define OBJECT_COUNTS_MAX_BI   0xF
+#define OBJECT_COUNTS_MAX_BO   0xF
+#define OBJECT_COUNTS_MAX_OI   0xF
+#define OBJECT_COUNTS_MAX_OO   0xF
+
+/* unpack counts */
+
+#define OBJECT_COUNTS_NUM_BI(k)  ((size_t) (((k) >> 0) & OBJECT_COUNTS_MAX_BI))
+#define OBJECT_COUNTS_NUM_BO(k)  ((size_t) (((k) >> 4) & OBJECT_COUNTS_MAX_BO))
+#define OBJECT_COUNTS_NUM_OI(k)  ((size_t) (((k) >> 8) & OBJECT_COUNTS_MAX_OI))
+#define OBJECT_COUNTS_NUM_OO(k)  ((size_t) (((k) >> 12) & OBJECT_COUNTS_MAX_OO))
+#define OBJECT_COUNTS_NUM_buffers(k)	\
+			(OBJECT_COUNTS_NUM_BI(k) + OBJECT_COUNTS_NUM_BO(k))
+
+#define OBJECT_COUNTS_NUM_objects(k)	\
+			(OBJECT_COUNTS_NUM_OI(k) + OBJECT_COUNTS_NUM_OO(k))
+
+/* Indices into args[] */
+
+#define OBJECT_COUNTS_INDEX_BI(k)   0
+#define OBJECT_COUNTS_INDEX_BO(k)		\
+			(OBJECT_COUNTS_INDEX_BI(k) + OBJECT_COUNTS_NUM_BI(k))
+#define OBJECT_COUNTS_INDEX_OI(k)		\
+			(OBJECT_COUNTS_INDEX_BO(k) + OBJECT_COUNTS_NUM_BO(k))
+#define OBJECT_COUNTS_INDEX_OO(k)		\
+			(OBJECT_COUNTS_INDEX_OI(k) + OBJECT_COUNTS_NUM_OI(k))
+#define OBJECT_COUNTS_TOTAL(k)		\
+			(OBJECT_COUNTS_INDEX_OO(k) + OBJECT_COUNTS_NUM_OO(k))
+
+#define OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \
+	((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \
+			((in_objs) << 8) | ((out_objs) << 12)))
+
+#define OBJECT_COUNTS_INDEX_buffers(k)   OBJECT_COUNTS_INDEX_BI(k)
+
+/* Object_invoke return codes */
+
+#define OBJECT_isOK(err)        ((err) == 0)
+#define OBJECT_isERROR(err)     ((err) != 0)
+
+/* Generic error codes */
+
+#define OBJECT_OK                  0   /* non-specific success code */
+#define OBJECT_ERROR               1   /* non-specific error */
+#define OBJECT_ERROR_INVALID       2   /* unsupported/unrecognized request */
+#define OBJECT_ERROR_SIZE_IN       3   /* supplied buffer/string too large */
+#define OBJECT_ERROR_SIZE_OUT      4   /* supplied output buffer too small */
+
+#define OBJECT_ERROR_USERBASE     10   /* start of user-defined error range */
+
+/* Transport layer error codes */
+
+#define OBJECT_ERROR_DEFUNCT     -90   /* object no longer exists */
+#define OBJECT_ERROR_ABORT       -91   /* calling thread must exit */
+#define OBJECT_ERROR_BADOBJ      -92   /* invalid object context */
+#define OBJECT_ERROR_NOSLOTS     -93   /* caller's object table full */
+#define OBJECT_ERROR_MAXARGS     -94   /* too many args */
+#define OBJECT_ERROR_MAXDATA     -95   /* buffers too large */
+#define OBJECT_ERROR_UNAVAIL     -96   /* the request could not be processed */
+#define OBJECT_ERROR_KMEM        -97   /* kernel out of memory */
+#define OBJECT_ERROR_REMOTE      -98   /* local method sent to remote object */
+#define OBJECT_ERROR_BUSY        -99   /* Object is busy */
+#define Object_ERROR_TIMEOUT     -103  /* Call Back Object invocation timed out. */
+
+#define FOR_ARGS(ndxvar, counts, section) \
+	for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
+		ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
+		+ OBJECT_COUNTS_NUM_##section(counts)); \
+		++ndxvar)
+
+/* ObjectOp */
+
+#define ObjectOp_METHOD_MASK     ((uint32_t) 0x0000FFFFu)
+#define ObjectOp_methodID(op)    ((op) & ObjectOp_METHOD_MASK)
+
+#define ObjectOp_LOCAL           ((uint32_t) 0x00008000U)
+
+#define ObjectOp_isLocal(op)     (((op) & ObjectOp_LOCAL) != 0)
+
+
+#define Object_OP_release       (ObjectOp_METHOD_MASK - 0)
+#define Object_OP_retain        (ObjectOp_METHOD_MASK - 1)
+
+/* Object */
+
+#define ObjectCounts_pack(nBuffersIn, nBuffersOut, nObjectsIn, nObjectsOut) \
+	((uint32_t) ((nBuffersIn) |	\
+	((nBuffersOut) << 4) |			\
+	((nObjectsIn) << 8)  |			\
+	((nObjectsOut) << 12)))
+
+union ObjectArg;
+struct smcinvoke_cmd_req;
+
+typedef int32_t (*ObjectInvoke)(void *h,
+				uint32_t op,
+				union ObjectArg *args,
+				uint32_t counts);
+
+struct Object {
+	ObjectInvoke invoke;
+	void *context;
+};
+
+struct ObjectBuf {
+	void *ptr;
+	size_t size;
+};
+
+struct ObjectBufIn {
+	const void *ptr;
+	size_t size;
+};
+
+union ObjectArg {
+	struct ObjectBuf b;
+	struct ObjectBufIn bi;
+	struct Object o;
+};
+
+static inline int32_t Object_invoke(struct Object o, uint32_t op,
+				union ObjectArg *args, uint32_t k)
+{
+	return o.invoke(o.context, op, args, k);
+}
+
+#define Object_NULL		((struct Object){NULL, NULL})
+
+
+#define OBJECT_NOT_RETAINED
+
+#define OBJECT_CONSUMED
+
+static inline int32_t Object_release(OBJECT_CONSUMED struct Object o)
+{
+	return Object_invoke((o), Object_OP_release, 0, 0);
+}
+static inline int32_t Object_retain(struct Object o)
+{
+	return Object_invoke((o), Object_OP_retain, 0, 0);
+}
+
+#define Object_isNull(o)	((o).invoke == NULL)
+
+#define Object_RELEASE_IF(o)				\
+	do {						\
+		struct Object o_ = (o);			\
+		if (!Object_isNull(o_))			\
+			(void) Object_release(o_);	\
+	} while (0)
+
+static inline void Object_replace(struct Object *loc, struct Object objNew)
+{
+	if (!Object_isNull(*loc))
+		Object_release(*loc);
+
+	if (!Object_isNull(objNew))
+		Object_retain(objNew);
+	*loc = objNew;
+}
+
+#define Object_ASSIGN_NULL(loc)  Object_replace(&(loc), Object_NULL)
+#define SMCINVOKE_INTERFACE_MAX_RETRY       5
+#define SMCINVOKE_INTERFACE_BUSY_WAIT_MS    5
+
+int smcinvoke_release_from_kernel_client(int fd);
+
+int get_root_fd(int *root_fd);
+int process_invoke_request_from_kernel_client(
+		int fd, struct smcinvoke_cmd_req *req);
+
+char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm);
+
+int32_t get_client_env_object(struct Object *clientEnvObj);
+
+#endif /* __SMCINVOKE_OBJECT_H */

+ 48 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/IAppClient.h

@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+/** @cond */
+
+#pragma once
+
+
+#include "smcinvoke_object.h"
+
+
+#define IAppClient_ERROR_APP_NOT_FOUND INT32_C(10)
+#define IAppClient_ERROR_APP_RESTART_FAILED INT32_C(11)
+#define IAppClient_ERROR_APP_UNTRUSTED_CLIENT INT32_C(12)
+#define IAppClient_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(13)
+#define IAppClient_ERROR_APP_LOAD_FAILED INT32_C(14)
+
+#define IAppClient_OP_getAppObject 0
+
+static inline int32_t
+IAppClient_release(struct Object self)
+{
+  return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IAppClient_retain(struct Object self)
+{
+  return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IAppClient_getAppObject(struct Object self, const void *appDistName_ptr, size_t appDistName_len,struct Object *obj_ptr)
+{
+  int32_t result;
+  union ObjectArg a[2];
+  a[0].bi = (struct ObjectBufIn) { appDistName_ptr, appDistName_len * 1 };
+
+  result = Object_invoke(self, IAppClient_OP_getAppObject, a, ObjectCounts_pack(1, 0, 0, 1));
+
+  *obj_ptr = a[1].o;
+
+  return result;
+}
+
+
+

+ 143 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/IAppController.h

@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/** @cond */
+#pragma once
+
+#include "smcinvoke_object.h"
+
+
+#define IAppController_CBO_INTERFACE_WAIT UINT32_C(1)
+
+#define IAppController_ERROR_APP_SUSPENDED INT32_C(10)
+#define IAppController_ERROR_APP_BLOCKED_ON_LISTENER INT32_C(11)
+#define IAppController_ERROR_APP_UNLOADED INT32_C(12)
+#define IAppController_ERROR_APP_IN_USE INT32_C(13)
+#define IAppController_ERROR_NOT_SUPPORTED INT32_C(14)
+#define IAppController_ERROR_CBO_UNKNOWN INT32_C(15)
+#define IAppController_ERROR_APP_UNLOAD_NOT_ALLOWED INT32_C(16)
+#define IAppController_ERROR_APP_DISCONNECTED INT32_C(17)
+#define IAppController_ERROR_USER_DISCONNECT_REJECTED INT32_C(18)
+#define IAppController_ERROR_STILL_RUNNING INT32_C(19)
+
+#define IAppController_OP_openSession 0
+#define IAppController_OP_unload 1
+#define IAppController_OP_getAppObject 2
+#define IAppController_OP_installCBO 3
+#define IAppController_OP_disconnect 4
+#define IAppController_OP_restart 5
+
+static inline int32_t
+IAppController_release(struct Object self)
+{
+  return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IAppController_retain(struct Object self)
+{
+  return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IAppController_openSession(struct Object self, uint32_t cancelCode_val, uint32_t connectionMethod_val, uint32_t connectionData_val, uint32_t paramTypes_val, uint32_t exParamTypes_val, const void *i1_ptr, size_t i1_len, const void *i2_ptr, size_t i2_len, const void *i3_ptr, size_t i3_len, const void *i4_ptr, size_t i4_len, void *o1_ptr, size_t o1_len, size_t *o1_lenout, void *o2_ptr, size_t o2_len, size_t *o2_lenout, void *o3_ptr, size_t o3_len, size_t *o3_lenout, void *o4_ptr, size_t o4_len, size_t *o4_lenout,struct Object imem1_val,struct Object imem2_val,struct Object imem3_val,struct Object imem4_val, uint32_t *memrefOutSz1_ptr, uint32_t *memrefOutSz2_ptr, uint32_t *memrefOutSz3_ptr, uint32_t *memrefOutSz4_ptr,struct Object *session_ptr, uint32_t *retValue_ptr, uint32_t *retOrigin_ptr)
+{
+  union ObjectArg a[15];
+  struct {
+    uint32_t m_cancelCode;
+    uint32_t m_connectionMethod;
+    uint32_t m_connectionData;
+    uint32_t m_paramTypes;
+    uint32_t m_exParamTypes;
+  } i;
+
+  struct {
+    uint32_t m_memrefOutSz1;
+    uint32_t m_memrefOutSz2;
+    uint32_t m_memrefOutSz3;
+    uint32_t m_memrefOutSz4;
+    uint32_t m_retValue;
+    uint32_t m_retOrigin;
+  } o;
+  int32_t result;
+
+  a[0].b = (struct ObjectBuf) { &i, 20 };
+  a[5].b = (struct ObjectBuf) { &o, 24 };
+  i.m_cancelCode = cancelCode_val;
+  i.m_connectionMethod = connectionMethod_val;
+  i.m_connectionData = connectionData_val;
+  i.m_paramTypes = paramTypes_val;
+  i.m_exParamTypes = exParamTypes_val;
+  a[1].bi = (struct ObjectBufIn) { i1_ptr, i1_len * 1 };
+  a[2].bi = (struct ObjectBufIn) { i2_ptr, i2_len * 1 };
+  a[3].bi = (struct ObjectBufIn) { i3_ptr, i3_len * 1 };
+  a[4].bi = (struct ObjectBufIn) { i4_ptr, i4_len * 1 };
+  a[6].b = (struct ObjectBuf) { o1_ptr, o1_len * 1 };
+  a[7].b = (struct ObjectBuf) { o2_ptr, o2_len * 1 };
+  a[8].b = (struct ObjectBuf) { o3_ptr, o3_len * 1 };
+  a[9].b = (struct ObjectBuf) { o4_ptr, o4_len * 1 };
+  a[10].o = imem1_val;
+  a[11].o = imem2_val;
+  a[12].o = imem3_val;
+  a[13].o = imem4_val;
+
+  result = Object_invoke(self, IAppController_OP_openSession, a, ObjectCounts_pack(5, 5, 4, 1));
+
+  *o1_lenout = a[6].b.size / 1;
+  *o2_lenout = a[7].b.size / 1;
+  *o3_lenout = a[8].b.size / 1;
+  *o4_lenout = a[9].b.size / 1;
+  *memrefOutSz1_ptr = o.m_memrefOutSz1;
+  *memrefOutSz2_ptr = o.m_memrefOutSz2;
+  *memrefOutSz3_ptr = o.m_memrefOutSz3;
+  *memrefOutSz4_ptr = o.m_memrefOutSz4;
+  *session_ptr = a[14].o;
+  *retValue_ptr = o.m_retValue;
+  *retOrigin_ptr = o.m_retOrigin;
+
+  return result;
+}
+
+static inline int32_t
+IAppController_unload(struct Object self)
+{
+  return Object_invoke(self, IAppController_OP_unload, 0, 0);
+}
+
+static inline int32_t
+IAppController_getAppObject(struct Object self,struct  Object *obj_ptr)
+{
+  union ObjectArg a[1];
+  int32_t result = Object_invoke(self, IAppController_OP_getAppObject, a, ObjectCounts_pack(0, 0, 0, 1));
+
+  *obj_ptr = a[0].o;
+
+  return result;
+}
+
+static inline int32_t
+IAppController_installCBO(struct Object self, uint32_t uid_val,struct Object obj_val)
+{
+  union ObjectArg a[2];
+  a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) };
+  a[1].o = obj_val;
+
+  return Object_invoke(self, IAppController_OP_installCBO, a, ObjectCounts_pack(1, 0, 1, 0));
+}
+
+static inline int32_t
+IAppController_disconnect(struct Object self)
+{
+  return Object_invoke(self, IAppController_OP_disconnect, 0, 0);
+}
+
+static inline int32_t
+IAppController_restart(struct Object self)
+{
+  return Object_invoke(self, IAppController_OP_restart, 0, 0);
+}
+
+
+

+ 105 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/IAppLoader.h

@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#pragma once
+
+
+#include "smcinvoke_object.h"
+#include "IAppController.h"
+
+#define IAppLoader_ERROR_INVALID_BUFFER INT32_C(10)
+#define IAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
+#define IAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
+#define IAppLoader_ERROR_METADATA_INVALID INT32_C(13)
+#define IAppLoader_ERROR_MAX_NUM_APPS INT32_C(14)
+#define IAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15)
+#define IAppLoader_ERROR_ALREADY_LOADED INT32_C(16)
+#define IAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
+#define IAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
+#define IAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
+#define IAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
+#define IAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
+#define IAppLoader_ERROR_APP_NOT_LOADED INT32_C(22)
+#define IAppLoader_ERROR_APP_MAX_CLIENT_CONNECTIONS INT32_C(23)
+#define IAppLoader_ERROR_APP_BLACKLISTED INT32_C(24)
+
+#define IAppLoader_OP_loadFromBuffer 0
+#define IAppLoader_OP_loadFromRegion 1
+#define IAppLoader_OP_loadEmbedded 2
+#define IAppLoader_OP_connect 3
+
+static inline int32_t
+IAppLoader_release(struct Object self)
+{
+  return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IAppLoader_retain(struct Object self)
+{
+  return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IAppLoader_loadFromBuffer(struct Object self, const void *appElf_ptr, size_t appElf_len,struct  Object *appController_ptr)
+{
+  union ObjectArg a[2];
+  int32_t result;
+  a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 };
+
+
+  result = Object_invoke(self, IAppLoader_OP_loadFromBuffer, a, ObjectCounts_pack(1, 0, 0, 1));
+
+  *appController_ptr = a[1].o;
+
+  return result;
+}
+
+static inline int32_t
+IAppLoader_loadFromRegion(struct Object self,struct  Object appElf_val,struct  Object *appController_ptr)
+{
+  union ObjectArg a[2];
+  int32_t result;
+  a[0].o = appElf_val;
+
+  result = Object_invoke(self, IAppLoader_OP_loadFromRegion, a, ObjectCounts_pack(0, 0, 1, 1));
+
+  *appController_ptr = a[1].o;
+
+  return result;
+}
+
+static inline int32_t
+IAppLoader_loadEmbedded(struct Object self, const void *appName_ptr, size_t appName_len,struct  Object *appController_ptr)
+{
+  union ObjectArg a[2];
+  int32_t result;
+  a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
+
+  result = Object_invoke(self, IAppLoader_OP_loadEmbedded, a, ObjectCounts_pack(1, 0, 0, 1));
+
+  *appController_ptr = a[1].o;
+
+  return result;
+}
+
+static inline int32_t
+IAppLoader_connect(struct Object self, const void *appName_ptr, size_t appName_len,struct  Object *appController_ptr)
+{
+  union ObjectArg a[2];
+
+  int32_t result;
+  a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
+
+
+  result = Object_invoke(self, IAppLoader_OP_connect, a, ObjectCounts_pack(1, 0, 0, 1));
+
+  *appController_ptr = a[1].o;
+
+  return result;
+}
+
+
+

+ 48 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/IOpener.h

@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/** @cond */
+#pragma once
+
+
+#include "smcinvoke_object.h"
+
+/** 0 is not a valid service ID. */
+#define IOpener_INVALID_ID UINT32_C(0)
+
+#define IOpener_ERROR_NOT_FOUND INT32_C(10)
+#define IOpener_ERROR_PRIVILEGE INT32_C(11)
+#define IOpener_ERROR_NOT_SUPPORTED INT32_C(12)
+
+#define IOpener_OP_open 0
+
+static inline int32_t
+IOpener_release(struct Object self)
+{
+  return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IOpener_retain(struct Object self)
+{
+  return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IOpener_open(struct Object self, uint32_t id_val,struct Object *obj_ptr)
+{
+  union ObjectArg a[2];
+  int32_t result;
+  a[0].b = (struct ObjectBuf) { &id_val, sizeof(uint32_t) };
+
+  result = Object_invoke(self, IOpener_OP_open, a, ObjectCounts_pack(1, 0, 0, 1));
+
+  *obj_ptr = a[1].o;
+
+  return result;
+}
+
+
+

+ 41 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/smci_appclient.h

@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_APPCLIENT_H
+#define __SMCI_APPCLIENT_H
+
+#include "smci_object.h"
+#include "IAppClient.h"
+
+#define SMCI_APPCLIENT_ERROR_APP_NOT_FOUND INT32_C(10)
+#define SMCI_APPCLIENT_ERROR_APP_RESTART_FAILED INT32_C(11)
+#define SMCI_APPCLIENT_ERROR_APP_UNTRUSTED_CLIENT INT32_C(12)
+#define SMCI_APPCLIENT_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(13)
+#define SMCI_APPCLIENT_ERROR_APP_LOAD_FAILED INT32_C(14)
+
+#define SMCI_APPCLIENT_UID (0x97)
+#define SMCI_APPCLIENT_OP_GETAPPOBJECT 0
+
+static inline int32_t
+smci_appclient_release(struct smci_object self)
+{
+	return IAppClient_release(self);
+}
+
+static inline int32_t
+smci_appclient_retain(struct smci_object self)
+{
+	return IAppClient_retain(self);
+}
+
+static inline int32_t
+smci_appclient_getappobject(struct smci_object self, const void *app_dist_name_ptr,
+			size_t app_dist_name_len, struct smci_object *obj_ptr)
+{
+	return IAppClient_getAppObject(self, app_dist_name_ptr,
+		app_dist_name_len, obj_ptr);
+}
+
+#endif /* __SMCI_APPCLIENT_H */

+ 100 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/smci_appcontroller.h

@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_APPCONTROLLER_H
+#define __SMCI_APPCONTROLLER_H
+
+#include "smci_object.h"
+#include "IAppController.h"
+
+#define SMCI_APPCONTROLLER_CBO_INTERFACE_WAIT UINT32_C(1)
+
+#define SMCI_APPCONTROLLER_ERROR_APP_SUSPENDED INT32_C(10)
+#define SMCI_APPCONTROLLER_ERROR_APP_BLOCKED_ON_LISTENER INT32_C(11)
+#define SMCI_APPCONTROLLER_ERROR_APP_UNLOADED INT32_C(12)
+#define SMCI_APPCONTROLLER_ERROR_APP_IN_USE INT32_C(13)
+#define SMCI_APPCONTROLLER_ERROR_NOT_SUPPORTED INT32_C(14)
+#define SMCI_APPCONTROLLER_ERROR_CBO_UNKNOWN INT32_C(15)
+#define SMCI_APPCONTROLLER_ERROR_APP_UNLOAD_NOT_ALLOWED INT32_C(16)
+#define SMCI_APPCONTROLLER_ERROR_APP_DISCONNECTED INT32_C(17)
+#define SMCI_APPCONTROLLER_ERROR_USER_DISCONNECT_REJECTED INT32_C(18)
+#define SMCI_APPCONTROLLER_ERROR_STILL_RUNNING INT32_C(19)
+
+#define SMCI_APPCONTROLLER_OP_OPENSESSION 0
+#define SMCI_APPCONTROLLER_OP_UNLOAD 1
+#define SMCI_APPCONTROLLER_OP_GETAPPOBJECT 2
+#define SMCI_APPCONTROLLER_OP_INSTALLCBO 3
+#define SMCI_APPCONTROLLER_OP_DISCONNECT 4
+#define SMCI_APPCONTROLLER_OP_RESTART 5
+
+static inline int32_t
+smci_appcontroller_release(struct smci_object self)
+{
+	return IAppController_release(self);
+}
+
+static inline int32_t
+smci_appcontroller_retain(struct smci_object self)
+{
+	return IAppController_retain(self);
+}
+
+static inline int32_t
+smci_appcontroller_opensession(struct smci_object self, uint32_t cancel_code_val,
+	uint32_t connection_method_val, uint32_t connection_data_val, uint32_t param_types_val,
+	uint32_t ex_param_types_val, const void *i1_ptr, size_t i1_len, const void *i2_ptr,
+	size_t i2_len, const void *i3_ptr, size_t i3_len, const void *i4_ptr, size_t i4_len,
+	void *o1_ptr, size_t o1_len, size_t *o1_lenout, void *o2_ptr, size_t o2_len,
+	size_t *o2_lenout, void *o3_ptr, size_t o3_len, size_t *o3_lenout, void *o4_ptr,
+	size_t o4_len, size_t *o4_lenout, struct smci_object imem1_val,
+	struct smci_object imem2_val, struct smci_object imem3_val, struct smci_object imem4_val,
+	uint32_t *memref_out_sz1_ptr, uint32_t *memref_out_sz2_ptr, uint32_t *memref_out_sz3_ptr,
+	uint32_t *memref_out_sz4_ptr, struct smci_object *session_ptr, uint32_t *ret_value_ptr,
+	uint32_t *ret_origin_ptr)
+{
+	return IAppController_openSession(self, cancel_code_val,
+		connection_method_val, connection_data_val, param_types_val,
+		ex_param_types_val, i1_ptr, i1_len, i2_ptr,
+		i2_len, i3_ptr, i3_len, i4_ptr, i4_len,
+		o1_ptr, o1_len, o1_lenout, o2_ptr, o2_len,
+		o2_lenout, o3_ptr, o3_len, o3_lenout, o4_ptr,
+		o4_len, o4_lenout, imem1_val,
+		imem2_val, imem3_val, imem4_val,
+		memref_out_sz1_ptr, memref_out_sz2_ptr, memref_out_sz3_ptr,
+		memref_out_sz4_ptr, session_ptr, ret_value_ptr,
+		ret_origin_ptr);
+}
+
+static inline int32_t
+smci_appcontroller_unload(struct smci_object self)
+{
+	return IAppController_unload(self);
+}
+
+static inline int32_t
+smci_appcontroller_getappobject(struct smci_object self, struct smci_object *obj_ptr)
+{
+	return IAppController_getAppObject(self, obj_ptr);
+}
+
+static inline int32_t
+smci_appcontroller_installcbo(struct smci_object self, uint32_t uid_val, struct smci_object obj_val)
+{
+	return IAppController_installCBO(self, uid_val, obj_val);
+}
+
+static inline int32_t
+smci_appcontroller_disconnect(struct smci_object self)
+{
+	return IAppController_disconnect(self);
+}
+
+static inline int32_t
+smci_appcontroller_restart(struct smci_object self)
+{
+	return IAppController_restart(self);
+}
+
+#endif /* __SMCI_APPCONTROLLER_H */

+ 79 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/smci_apploader.h

@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_APPLOADER_H
+#define __SMCI_APPLOADER_H
+
+#include "smci_object.h"
+#include "smci_appcontroller.h"
+#include "IAppLoader.h"
+
+#define SMCI_APPLOADER_ERROR_INVALID_BUFFER INT32_C(10)
+#define SMCI_APPLOADER_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
+#define SMCI_APPLOADER_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
+#define SMCI_APPLOADER_ERROR_METADATA_INVALID INT32_C(13)
+#define SMCI_APPLOADER_ERROR_MAX_NUM_APPS INT32_C(14)
+#define SMCI_APPLOADER_ERROR_NO_NAME_IN_METADATA INT32_C(15)
+#define SMCI_APPLOADER_ERROR_ALREADY_LOADED INT32_C(16)
+#define SMCI_APPLOADER_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
+#define SMCI_APPLOADER_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
+#define SMCI_APPLOADER_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
+#define SMCI_APPLOADER_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
+#define SMCI_APPLOADER_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
+#define SMCI_APPLOADER_ERROR_APP_NOT_LOADED INT32_C(22)
+#define SMCI_APPLOADER_ERROR_APP_MAX_CLIENT_CONNECTIONS INT32_C(23)
+#define SMCI_APPLOADER_ERROR_APP_BLACKLISTED INT32_C(24)
+
+#define SMCI_APPLOADER_OP_LOADFROMBUFFER 0
+#define SMCI_APPLOADER_OP_LOADFROMREGION 1
+#define SMCI_APPLOADER_OP_LOADEMBEDDED 2
+#define SMCI_APPLOADER_OP_CONNECT 3
+#define SMCI_APPLOADER_UID (0x3)
+
+static inline int32_t
+smci_apploader_release(struct smci_object self)
+{
+	return IAppLoader_release(self);
+}
+
+static inline int32_t
+smci_apploader_retain(struct smci_object self)
+{
+	return IAppLoader_retain(self);
+}
+
+static inline int32_t
+smci_apploader_loadfrombuffer(struct smci_object self, const void *appelf_ptr, size_t appelf_len,
+		struct smci_object *appcontroller_ptr)
+{
+	return IAppLoader_loadFromBuffer(self, appelf_ptr, appelf_len,
+		appcontroller_ptr);
+}
+
+static inline int32_t
+smci_apploader_loadfromregion(struct smci_object self, struct smci_object appelf_val,
+		struct smci_object *appcontroller_ptr)
+{
+	return IAppLoader_loadFromRegion(self, appelf_val,
+		appcontroller_ptr);
+}
+
+static inline int32_t
+smci_apploader_loadembedded(struct smci_object self, const void *appname_ptr, size_t appname_len,
+		struct smci_object *appcontroller_ptr)
+{
+	return IAppLoader_loadEmbedded(self, appname_ptr, appname_len,
+		appcontroller_ptr);
+}
+
+static inline int32_t
+smci_apploader_connect(struct smci_object self, const void *appname_ptr, size_t appname_len,
+		struct smci_object *appcontroller_ptr)
+{
+	return IAppLoader_connect(self, appname_ptr, appname_len,
+		appcontroller_ptr);
+}
+
+#endif /* __SMCI_APPLOADER_H */

+ 40 - 0
qcom/opensource/securemsm-kernel/include/smci/interface/smci_opener.h

@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_OPENER_H
+#define __SMCI_OPENER_H
+
+#include "smci_object.h"
+#include "IOpener.h"
+
+
+/** 0 is not a valid service ID. */
+#define SMCI_OPENER_INVALID_ID UINT32_C(0)
+
+#define SMCI_OPENER_ERROR_NOT_FOUND INT32_C(10)
+#define SMCI_OPENER_ERROR_PRIVILEGE INT32_C(11)
+#define SMCI_OPENER_ERROR_NOT_SUPPORTED INT32_C(12)
+
+#define SMCI_OPENER_OP_OPEN 0
+
+static inline int32_t
+smci_opener_release(struct smci_object self)
+{
+	return IOpener_release(self);
+}
+
+static inline int32_t
+smci_opener_retain(struct smci_object self)
+{
+	return IOpener_retain(self);
+}
+
+static inline int32_t
+smci_opener_open(struct smci_object self, uint32_t id_val, struct smci_object *obj_ptr)
+{
+	return IOpener_open(self, id_val, obj_ptr);
+}
+
+#endif /* __SMCI_OPENER_H */

+ 20 - 0
qcom/opensource/securemsm-kernel/include/smci/uid/CAppClient.h

@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/**
+* @addtogroup CAppClient
+* @{
+  Class CAppClient implements \link IAppClient \endlink interface.
+  This class provides an interface to obtain app-provided functionalities.
+
+  The class ID `AppClient` is not included in the default privilege set.
+*/
+#pragma once
+
+#include <smcinvoke_object.h>
+
+#define CAppClient_UID (0x97)
+
+

+ 12 - 0
qcom/opensource/securemsm-kernel/include/smci/uid/CAppLoader.h

@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#pragma once
+
+#include <include/linux/smcinvoke_object.h>
+
+
+// This class provides an interface to load Secure Applications in QSEE
+#define CAppLoader_UID (3)

+ 390 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/qcedev.h

@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _UAPI_QCEDEV__H
+#define _UAPI_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCEDEV_MAX_SHA_BLOCK_SIZE	64
+#define QCEDEV_MAX_BEARER	31
+#define QCEDEV_MAX_KEY_SIZE	64
+#define QCEDEV_MAX_IV_SIZE	32
+
+#define QCEDEV_MAX_BUFFERS      16
+#define QCEDEV_MAX_SHA_DIGEST	32
+
+#define QCEDEV_USE_PMEM		1
+#define QCEDEV_NO_PMEM		0
+
+#define QCEDEV_AES_KEY_128	16
+#define QCEDEV_AES_KEY_192	24
+#define QCEDEV_AES_KEY_256	32
+/**
+ *qcedev_oper_enum: Operation types
+ * @QCEDEV_OPER_ENC:		Encrypt
+ * @QCEDEV_OPER_DEC:		Decrypt
+ * @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+ *				user. Key already set by an external processor.
+ * @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+ *				user. Key already set by an external processor.
+ */
+enum qcedev_oper_enum {
+	QCEDEV_OPER_DEC		= 0,
+	QCEDEV_OPER_ENC		= 1,
+	QCEDEV_OPER_DEC_NO_KEY	= 2,
+	QCEDEV_OPER_ENC_NO_KEY	= 3,
+	QCEDEV_OPER_LAST
+};
+
+/**
+ *qcedev_offload_oper_enum: Offload operation types (uses pipe keys)
+ * @QCEDEV_OFFLOAD_HLOS_HLOS:   Non-secure to non-secure (eg. audio dec).
+ * @QCEDEV_OFFLOAD_HLOS_CPB:    Non-secure to secure (eg. video dec).
+ * @QCEDEV_OFFLOAD_CPB_HLOS:    Secure to non-secure (eg. hdcp video enc).
+ */
+enum qcedev_offload_oper_enum {
+	QCEDEV_OFFLOAD_HLOS_HLOS = 1,
+	QCEDEV_OFFLOAD_HLOS_HLOS_1 = 2,
+	QCEDEV_OFFLOAD_HLOS_CPB = 3,
+	QCEDEV_OFFLOAD_HLOS_CPB_1 = 4,
+	QCEDEV_OFFLOAD_CPB_HLOS = 5,
+	QCEDEV_OFFLOAD_OPER_LAST
+};
+
+/**
+ *qcedev_offload_err_enum: Offload error conditions
+ * @QCEDEV_OFFLOAD_NO_ERROR:        Successful crypto operation.
+ * @QCEDEV_OFFLOAD_GENERIC_ERROR:   Generic error in crypto status.
+ * @QCEDEV_OFFLOAD_TIMER_EXPIRED_ERROR:     Pipe key timer expired.
+ * @QCEDEV_OFFLOAD_KEY_PAUSE_ERROR:     Pipe key pause (means GPCE is paused).
+ */
+enum qcedev_offload_err_enum {
+	QCEDEV_OFFLOAD_NO_ERROR = 0,
+	QCEDEV_OFFLOAD_GENERIC_ERROR = 1,
+	QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR = 2,
+	QCEDEV_OFFLOAD_KEY_PAUSE_ERROR = 3
+};
+
+/**
+ *qcedev_oper_enum: Cipher algorithm types
+ * @QCEDEV_ALG_DES:		DES
+ * @QCEDEV_ALG_3DES:		3DES
+ * @QCEDEV_ALG_AES:		AES
+ */
+enum qcedev_cipher_alg_enum {
+	QCEDEV_ALG_DES		= 0,
+	QCEDEV_ALG_3DES		= 1,
+	QCEDEV_ALG_AES		= 2,
+	QCEDEV_ALG_LAST
+};
+
+/**
+ *qcedev_cipher_mode_enum : AES mode
+ * @QCEDEV_AES_MODE_CBC:		CBC
+ * @QCEDEV_AES_MODE_ECB:		ECB
+ * @QCEDEV_AES_MODE_CTR:		CTR
+ * @QCEDEV_AES_MODE_XTS:		XTS
+ * @QCEDEV_AES_MODE_CCM:		CCM
+ * @QCEDEV_DES_MODE_CBC:		CBC
+ * @QCEDEV_DES_MODE_ECB:		ECB
+ */
+enum qcedev_cipher_mode_enum {
+	QCEDEV_AES_MODE_CBC	= 0,
+	QCEDEV_AES_MODE_ECB	= 1,
+	QCEDEV_AES_MODE_CTR	= 2,
+	QCEDEV_AES_MODE_XTS	= 3,
+	QCEDEV_AES_MODE_CCM	= 4,
+	QCEDEV_DES_MODE_CBC	= 5,
+	QCEDEV_DES_MODE_ECB	= 6,
+	QCEDEV_AES_DES_MODE_LAST
+};
+
+/**
+ *enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+ * @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+ * @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+ * @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+ * @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+ * @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+ */
+enum qcedev_sha_alg_enum {
+	QCEDEV_ALG_SHA1		= 0,
+	QCEDEV_ALG_SHA256	= 1,
+	QCEDEV_ALG_SHA1_HMAC	= 2,
+	QCEDEV_ALG_SHA256_HMAC	= 3,
+	QCEDEV_ALG_AES_CMAC	= 4,
+	QCEDEV_ALG_SHA_ALG_LAST
+};
+
+/**
+ * struct buf_info - Buffer information
+ * @offset:			Offset from the base address of the buffer
+ *				(Used when buffer is allocated using PMEM)
+ * @vaddr:			Virtual buffer address pointer
+ * @len:				Size of the buffer
+ */
+struct	buf_info {
+	union {
+		__u32	offset;
+		__u8		*vaddr;
+	};
+	__u32	len;
+};
+
+/**
+ * struct qcedev_vbuf_info - Source and destination Buffer information
+ * @src:				Array of buf_info for input/source
+ * @dst:				Array of buf_info for output/destination
+ */
+struct	qcedev_vbuf_info {
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct qcedev_pmem_info - Stores PMEM buffer information
+ * @fd_src:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for input/src buffer
+ * @src:				Array of buf_info for input/source
+ * @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for output/dst buffer
+ * @dst:				Array of buf_info for output/destination
+ * @pmem_src_offset:		The offset from input/src buffer
+ *				(allocated by PMEM)
+ */
+struct	qcedev_pmem_info {
+	int		fd_src;
+	struct buf_info	src[QCEDEV_MAX_BUFFERS];
+	int		fd_dst;
+	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct qcedev_cipher_op_req - Holds the ciphering request information
+ * @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+ *			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+ * @pmem (IN):		Stores PMEM buffer information.
+ *			Refer struct qcedev_pmem_info
+ * @vbuf (IN/OUT):	Stores Source and destination Buffer information
+ *			Refer to struct qcedev_vbuf_info
+ * @data_len (IN):	Total Length of input/src and output/dst in bytes
+ * @in_place_op (IN):	Indicates whether the operation is inplace where
+ *			source == destination
+ *			When using PMEM allocated memory, must set this to 1
+ * @enckey (IN):		128 bits of confidentiality key
+ *			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+ *			enckey[15] bit 7-0
+ * @encklen (IN):	Length of the encryption key(set to 128  bits/16
+ *			bytes in the driver)
+ * @iv (IN/OUT):		Initialisation vector data
+ *			This is updated by the driver, incremented by
+ *			number of blocks encrypted/decrypted.
+ * @ivlen (IN):		Length of the IV
+ * @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+ *			for AES-128 CTR mode only)
+ * @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+ * @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+ *			Apllicabel when using AES algorithm only
+ * @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+ *			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+ *
+ *If use_pmem is set to 0, the driver assumes that memory was not allocated
+ * via PMEM, and kernel will need to allocate memory and copy data from user
+ * space buffer (data_src/dta_dst) and process accordingly and copy data back
+ * to the user space buffer
+ *
+ * If use_pmem is set to 1, the driver assumes that memory was allocated via
+ * PMEM.
+ * The kernel driver will use the fd_src to determine the kernel virtual address
+ * base that maps to the user space virtual address base for the  buffer
+ * allocated in user space.
+ * The final input/src and output/dst buffer pointer will be determined
+ * by adding the offsets to the kernel virtual addr.
+ *
+ * If use of hardware key is supported in the target, user can configure the
+ * key parameters (encklen, enckey) to use the hardware key.
+ * In order to use the hardware key, set encklen to 0 and set the enckey
+ * data array to 0.
+ */
+struct	qcedev_cipher_op_req {
+	__u8				use_pmem;
+	union {
+		struct qcedev_pmem_info	pmem;
+		struct qcedev_vbuf_info	vbuf;
+	};
+	__u32			entries;
+	__u32			data_len;
+	__u8				in_place_op;
+	__u8				enckey[QCEDEV_MAX_KEY_SIZE];
+	__u32			encklen;
+	__u8				iv[QCEDEV_MAX_IV_SIZE];
+	__u32			ivlen;
+	__u32			byteoffset;
+	enum qcedev_cipher_alg_enum	alg;
+	enum qcedev_cipher_mode_enum	mode;
+	enum qcedev_oper_enum		op;
+};
+
+/**
+ * struct qcedev_sha_op_req - Holds the hashing request information
+ * @data (IN):			Array of pointers to the data to be hashed
+ * @entries (IN):		Number of buf_info entries in the data array
+ * @data_len (IN):		Length of data to be hashed
+ * @digest (IN/OUT):		Returns the hashed data information
+ * @diglen (OUT):		Size of the hashed/digest data
+ * @authkey (IN):		Pointer to authentication key for HMAC
+ * @authklen (IN):		Size of the authentication key
+ * @alg (IN):			Secure Hash algorithm
+ */
+struct	qcedev_sha_op_req {
+	struct buf_info			data[QCEDEV_MAX_BUFFERS];
+	__u32			entries;
+	__u32			data_len;
+	__u8				digest[QCEDEV_MAX_SHA_DIGEST];
+	__u32			diglen;
+	__u8				*authkey;
+	__u32			authklen;
+	enum qcedev_sha_alg_enum	alg;
+};
+
+/**
+ * struct pattern_info - Holds pattern information for pattern-based
+ * decryption/encryption for AES ECB, counter, and CBC modes.
+ * @patt_sz (IN):       Total number of blocks.
+ * @proc_data_sz (IN):  Number of blocks to be processed.
+ * @patt_offset (IN):   Start of the segment.
+ */
+struct pattern_info {
+	__u8 patt_sz;
+	__u8 proc_data_sz;
+	__u8 patt_offset;
+};
+
+/**
+ * struct qcedev_offload_cipher_op_req - Holds the offload request information
+ * @vbuf (IN/OUT):      Stores Source and destination Buffer information.
+ *                      Refer to struct qcedev_vbuf_info.
+ * @entries (IN):       Number of entries to be processed as part of request.
+ * @data_len (IN):      Total Length of input/src and output/dst in bytes
+ * @in_place_op (IN):   Indicates whether the operation is inplace where
+ *                      source == destination.
+ * @encklen (IN):       Length of the encryption key(set to 128  bits/16
+ *                      bytes in the driver).
+ * @iv (IN/OUT):        Initialisation vector data
+ *                      This is updated by the driver, incremented by
+ *                      number of blocks encrypted/decrypted.
+ * @ivlen (IN):         Length of the IV.
+ * @iv_ctr_size (IN):   IV counter increment mask size.
+ *                      Driver sets the mask value based on this size.
+ * @byteoffset (IN):    Offset in the Cipher BLOCK (applicable and to be set
+ *                      for AES-128 CTR mode only).
+ * @block_offset (IN):  Offset in the block that needs a skip of encrypt/
+ *                      decrypt.
+ * @pattern_valid (IN): Indicates the request contains a valid pattern.
+ * @pattern_info (IN):  The pattern to be used for the offload request.
+ * @is_copy_op (IN):    Offload operations sometimes requires a copy between
+ *                      secure and non-secure buffers without any encrypt/
+ *                      decrypt operations.
+ * @alg (IN):           Type of ciphering algorithm: AES/DES/3DES.
+ * @mode (IN):          Mode use when using AES algorithm: ECB/CBC/CTR.
+ *                      Applicable when using AES algorithm only.
+ * @op (IN):            Type of operation.
+ *                      Refer to qcedev_offload_oper_enum.
+ * @err (OUT):          Error in crypto status.
+ *                      Refer to qcedev_offload_err_enum.
+ */
+struct qcedev_offload_cipher_op_req {
+	struct qcedev_vbuf_info vbuf;
+	__u32 entries;
+	__u32 data_len;
+	__u32 in_place_op;
+	__u32 encklen;
+	__u8 iv[QCEDEV_MAX_IV_SIZE];
+	__u32 ivlen;
+	__u32 iv_ctr_size;
+	__u32 byteoffset;
+	__u8 block_offset;
+	__u8 is_pattern_valid;
+	__u8 is_copy_op;
+	__u8 encrypt;
+	struct pattern_info pattern_info;
+	enum qcedev_cipher_alg_enum alg;
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_offload_oper_enum op;
+	enum qcedev_offload_err_enum err;
+};
+
+/**
+ * struct qfips_verify_t - Holds data for FIPS Integrity test
+ * @kernel_size  (IN):		Size of kernel Image
+ * @kernel       (IN):		pointer to buffer containing the kernel Image
+ */
+struct qfips_verify_t {
+	unsigned int kernel_size;
+	void *kernel;
+};
+
+/**
+ * struct qcedev_map_buf_req - Holds the mapping request information
+ * fd (IN):            Array of fds.
+ * num_fds (IN):       Number of fds in fd[].
+ * fd_size (IN):       Array of sizes corresponding to each fd in fd[].
+ * fd_offset (IN):     Array of offset corresponding to each fd in fd[].
+ * vaddr (OUT):        Array of mapped virtual address corresponding to
+ *			each fd in fd[].
+ */
+struct qcedev_map_buf_req {
+	__s32         fd[QCEDEV_MAX_BUFFERS];
+	__u32        num_fds;
+	__u32        fd_size[QCEDEV_MAX_BUFFERS];
+	__u32        fd_offset[QCEDEV_MAX_BUFFERS];
+	__u64        buf_vaddr[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct qcedev_unmap_buf_req - Holds the hashing request information
+ * fd (IN):            Array of fds to unmap
+ * num_fds (IN):       Number of fds in fd[].
+ */
+struct  qcedev_unmap_buf_req {
+	__s32         fd[QCEDEV_MAX_BUFFERS];
+	__u32        num_fds;
+};
+
+struct file;
+
+long qcedev_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg);
+
+#define QCEDEV_IOC_MAGIC	0x87
+
+#define QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
+#define QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
+#define QCEDEV_IOCTL_MAP_BUF_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
+#define QCEDEV_IOCTL_UNMAP_BUF_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
+#define QCEDEV_IOCTL_OFFLOAD_OP_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 12, struct qcedev_offload_cipher_op_req)
+#endif /* _UAPI_QCEDEV__H */

+ 218 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/qcota.h

@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _UAPI_QCOTA_H
+#define _UAPI_QCOTA_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QCE_OTA_MAX_BEARER   31
+#define OTA_KEY_SIZE 16   /* 128 bits of keys. */
+
+enum qce_ota_dir_enum {
+	QCE_OTA_DIR_UPLINK   = 0,
+	QCE_OTA_DIR_DOWNLINK = 1,
+	QCE_OTA_DIR_LAST
+};
+
+enum qce_ota_algo_enum {
+	QCE_OTA_ALGO_KASUMI = 0,
+	QCE_OTA_ALGO_SNOW3G = 1,
+	QCE_OTA_ALGO_LAST
+};
+
+/**
+ * struct qce_f8_req - qce f8 request
+ * @data_in:	packets input data stream to be ciphered.
+ *		If NULL, streaming mode operation.
+ * @data_out:	ciphered packets output data.
+ * @data_len:	length of data_in and data_out in bytes.
+ * @count_c:	count-C, ciphering sequence number, 32 bit
+ * @bearer:	5 bit of radio bearer identifier.
+ * @ckey:	128 bits of confidentiality key,
+ *		ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
+ * @direction:	uplink or donwlink.
+ * @algorithm:	Kasumi, or Snow3G.
+ *
+ * If data_in is NULL, the engine will run in a special mode called
+ * key stream mode. In this special mode, the engine will generate
+ * key stream output for the number of bytes specified in the
+ * data_len, based on the input parameters of direction, algorithm,
+ * ckey, bearer, and count_c. The data_len is restricted to
+ * the length of multiple of 16 bytes.  Application can then take the
+ * output stream, do a exclusive or to the input data stream, and
+ * generate the final cipher data stream.
+ */
+struct qce_f8_req {
+	__u8  *data_in;
+	__u8  *data_out;
+	__u16  data_len;
+	__u32  count_c;
+	__u8   bearer;
+	__u8   ckey[OTA_KEY_SIZE];
+	enum qce_ota_dir_enum  direction;
+	enum qce_ota_algo_enum algorithm;
+	int current_req_info;
+};
+
+/**
+ * struct qce_f8_multi_pkt_req - qce f8 multiple packet request
+ *			Muliptle packets with uniform size, and
+ *			F8 ciphering parameters can be ciphered in a
+ *			single request.
+ *
+ * @num_pkt:		number of packets.
+ *
+ * @cipher_start:	ciphering starts offset within a packet.
+ *
+ * @cipher_size:	number of bytes to be ciphered within a packet.
+ *
+ * @qce_f8_req:		description of the packet and F8 parameters.
+ *			The following fields have special meaning for
+ *			multiple packet operation,
+ *
+ *	@data_len:	data_len indicates the length of a packet.
+ *
+ *	@data_in:	packets are concatenated together in a byte
+ *			stream started at data_in.
+ *
+ *	@data_out:	The returned ciphered output for multiple
+ *			packets.
+ *			Each packet ciphered output are concatenated
+ *			together into a byte stream started at data_out.
+ *			Note, each ciphered packet output area from
+ *			offset 0 to cipher_start-1, and from offset
+ *			cipher_size to data_len -1 are remained
+ *			unaltered from packet input area.
+ *	@count_c:	count-C of the first packet, 32 bit.
+ *
+ *
+ *   In one request, multiple packets can be ciphered, and output to the
+ *   data_out stream.
+ *
+ *   Packet data are laid out contiguously in sequence in data_in,
+ *   and data_out area. Every packet is identical size.
+ *   If the PDU is not byte aligned, set the data_len value of
+ *   to the rounded up value of the packet size. Eg, PDU size of
+ *   253 bits, set the packet size to 32 bytes. Next packet starts on
+ *   the next byte boundary.
+ *
+ *   For each packet, data from offset 0 to cipher_start
+ *   will be left unchanged and output to the data_out area.
+ *   This area of the packet can be for the RLC header, which is not
+ *   to be ciphered.
+ *
+ *   The ciphering of a packet starts from offset cipher_start, for
+ *   cipher_size bytes of data. Data starting from
+ *   offset cipher_start + cipher_size to the end of packet will be left
+ *   unchanged and output to the dataOut area.
+ *
+ *   For each packet the input arguments of bearer, direction,
+ *   ckey, algorithm have to be the same. count_c is the ciphering sequence
+ *   number of the first packet. The 2nd packet's ciphering sequence
+ *   number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ *   number is count_c + 2.....
+ *
+ */
+struct qce_f8_multi_pkt_req {
+	__u16    num_pkt;
+	__u16    cipher_start;
+	__u16    cipher_size;
+	struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
+ *                      Muliptle packets with variable size, and
+ *                      F8 ciphering parameters can be ciphered in a
+ *                      single request.
+ *
+ * @num_pkt:            number of packets.
+ *
+ * @cipher_iov[]:       array of iov of packets to be ciphered.
+ *
+ *
+ * @qce_f8_req:         description of the packet and F8 parameters.
+ *                      The following fields have special meaning for
+ *                      multiple packet operation,
+ *
+ *      @data_len:      ignored.
+ *
+ *      @data_in:       ignored.
+ *
+ *      @data_out:      ignored.
+ *
+ *      @count_c:       count-C of the first packet, 32 bit.
+ *
+ *
+ *   In one request, multiple packets can be ciphered.
+ *
+ *   The i-th packet are defined in cipher_iov[i-1].
+ *   The ciphering of i-th packet starts from offset 0 of the PDU specified
+ *   by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
+ *   If the PDU is not byte aligned, set the cipher_iov[i-1].size value
+ *   to the rounded up value of the packet size. Eg, PDU size of
+ *   253 bits, set the packet size to 32 bytes.
+ *
+ *   Ciphering are done in place. That is, the ciphering
+ *   input and output data are both in cipher_iov[i-1].addr for the i-th
+ *   packet.
+ *
+ *   For each packet the input arguments of bearer, direction,
+ *   ckey, algorithm have to be the same. count_c is the ciphering sequence
+ *   number of the first packet. The 2nd packet's ciphering sequence
+ *   number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
+ *   number is count_c + 2.....
+ */
+
+#define MAX_NUM_V_MULTI_PKT 20
+struct cipher_iov {
+	unsigned char  *addr;
+	unsigned short  size;
+};
+
+struct qce_f8_variable_multi_pkt_req {
+	unsigned short    num_pkt;
+	struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
+	struct qce_f8_req qce_f8_req;
+};
+
+/**
+ * struct qce_f9_req - qce f9 request
+ * @message:	message
+ * @msize:	message size in bytes (include the last partial byte).
+ * @last_bits:	valid bits in the last byte of message.
+ * @mac_i:	32 bit message authentication code, to be returned.
+ * @fresh:	random 32 bit number, one per user.
+ * @count_i:	32 bit count-I integrity sequence number.
+ * @direction:	uplink or donwlink.
+ * @ikey:	128 bits of integrity key,
+ *		ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
+ * @algorithm:  Kasumi, or Snow3G.
+ */
+struct qce_f9_req {
+	__u8   *message;
+	__u16   msize;
+	__u8    last_bits;
+	__u32   mac_i;
+	__u32   fresh;
+	__u32   count_i;
+	enum qce_ota_dir_enum direction;
+	__u8    ikey[OTA_KEY_SIZE];
+	enum qce_ota_algo_enum algorithm;
+	int current_req_info;
+};
+
+#define QCOTA_IOC_MAGIC     0x85
+
+#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
+#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
+#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
+#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
+				struct qce_f8_variable_multi_pkt_req)
+
+#endif /* _UAPI_QCOTA_H */

+ 17 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/qrng.h

@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _UAPI_QRNG_H_
+#define _UAPI_QRNG_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QRNG_IOC_MAGIC    0x100
+
+#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
+	_IO(QRNG_IOC_MAGIC, 1)
+
+#endif /* _UAPI_QRNG_H_ */

+ 186 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/qseecom.h

@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017, 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _QSEECOM_H_
+#define _QSEECOM_H_
+
+#pragma message("Warning: This header file will be deprecated in future")
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_ION_FD  4
+#define MAX_APP_NAME_SIZE  64
+#define QSEECOM_HASH_SIZE  32
+
+#define ICE_KEY_SIZE 32
+#define ICE_SALT_SIZE 32
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_ion_fd_info {
+	__s32 fd;
+	__u32 cmd_buf_offset;
+};
+
+enum qseecom_key_management_usage_type {
+	QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
+	QSEOS_KM_USAGE_FILE_ENCRYPTION = 0x02,
+	QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION = 0x03,
+	QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION = 0x04,
+	QSEOS_KM_USAGE_MAX
+};
+
+struct qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	int wipe_key_flag;/* 1->remove key from storage(alone with clear key) */
+			  /* 0->do not remove from storage (clear key) */
+};
+
+struct qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+#define SHA256_DIGEST_LENGTH	(256/8)
+/*
+ * struct qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct qseecom_save_partition_hash_req {
+	int partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct qseecom_is_es_activated_req {
+	int is_activated; /* out */
+};
+
+/*
+ * struct qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct qseecom_mdtp_cipher_dip_req {
+	__u8 *in_buf;
+	__u32 in_buf_size;
+	__u8 *out_buf;
+	__u32 out_buf_size;
+	__u32 direction;
+};
+
+struct qseecom_qteec_req {
+	void    *req_ptr;
+	__u32    req_len;
+	void    *resp_ptr;
+	__u32    resp_len;
+};
+
+struct qseecom_qteec_modfd_req {
+	void    *req_ptr;
+	__u32    req_len;
+	void    *resp_ptr;
+	__u32    resp_len;
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+#define MAX_CE_PIPE_PAIR_PER_UNIT 3
+
+struct qseecom_ce_pipe_entry {
+	int valid;
+	unsigned int ce_num;
+	unsigned int ce_pipe_pair;
+};
+
+struct qseecom_ice_data_t {
+	int flag;
+};
+
+#define MAX_CE_INFO_HANDLE_SIZE 32
+struct qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int usage;
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct qseecom_ice_key_data_t {
+	__u8 key[ICE_KEY_SIZE];
+	__u32 key_len;
+	__u8 salt[ICE_SALT_SIZE];
+	__u32 salt_len;
+};
+
+struct file;
+
+
+#define QSEECOM_IOC_MAGIC    0x97
+
+
+#define QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
+
+#define QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
+
+#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
+
+#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
+
+#define QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, struct qseecom_update_key_userinfo_req)
+
+#define QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct qseecom_qteec_req)
+
+#define QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct qseecom_qteec_modfd_req)
+
+#define QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, struct qseecom_ce_info_req)
+
+#define QSEECOM_IOCTL_SET_ICE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
+
+#define QSEECOM_IOCTL_FBE_CLEAR_KEY \
+	_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_ice_key_data_t)
+
+#endif /* _QSEECOM_H_ */

+ 196 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/qseecom_api.h

@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017, 2019, 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _QSEECOM_API_H_
+#define _QSEECOM_API_H_
+
+#pragma message("Warning: This header file will be deprecated in future")
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include "qseecom.h"
+
+/*
+ * struct qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct qseecom_register_listener_req {
+	__u32 listener_id; /* in */
+	__s32 ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	__u32 sb_size; /* in */
+};
+
+/*
+ * struct qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct qseecom_send_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_cmd_req {
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct qseecom_load_img_data - for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ * @app_arch - Architecture of the image, i.e. 32bit or 64bit app
+ */
+struct qseecom_load_img_req {
+	__u32 mdt_len; /* in */
+	__u32 img_len; /* in */
+	__s32  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	__u32 app_arch; /* in */
+	__u32 app_id; /* out*/
+};
+
+struct qseecom_set_sb_mem_param_req {
+	__s32 ifd_data_fd; /* in */
+	void *virt_sb_base; /* in */
+	__u32 sb_len; /* in */
+};
+
+/*
+ * struct qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct qseecom_qseos_version_req {
+	unsigned int qseos_version; /* in */
+};
+
+/*
+ * struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	__u32 app_id; /* out */
+	__u32 app_arch;
+};
+
+struct qseecom_send_svc_cmd_req {
+	__u32 cmd_id;
+	void *cmd_req_buf; /* in */
+	unsigned int cmd_req_len; /* in */
+	void *resp_buf; /* in/out */
+	unsigned int resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct qseecom_send_modfd_listener_resp {
+	void *resp_buf_ptr; /* in */
+	unsigned int resp_len; /* in */
+	struct qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct qseecom_sg_entry {
+	__u32 phys_addr;
+	__u32 len;
+};
+
+struct qseecom_sg_entry_64bit {
+	__u64 phys_addr;
+	__u32 len;
+} __attribute__ ((packed));
+
+
+struct file;
+
+
+#define QSEECOM_IOC_MAGIC    0x97
+
+
+#define QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct qseecom_register_listener_req)
+
+#define QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct qseecom_send_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct qseecom_set_sb_mem_param_req)
+
+#define QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct qseecom_qseos_version_req)
+
+#define QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct qseecom_load_img_req)
+
+#define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
+
+#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, struct qseecom_send_modfd_listener_resp)
+
+#define QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct qseecom_send_modfd_cmd_req)
+
+#define QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, struct qseecom_send_modfd_listener_resp)
+
+#endif /* _QSEECOM_API_H_ */

+ 110 - 0
qcom/opensource/securemsm-kernel/include/uapi/linux/smcinvoke.h

@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _UAPI_SMCINVOKE_H_
+#define _UAPI_SMCINVOKE_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCINVOKE_USERSPACE_OBJ_NULL	-1
+#define DEFAULT_CB_OBJ_THREAD_CNT	4
+#define SMCINVOKE_TZ_MIN_BUF_SIZE	4096
+
+struct smcinvoke_buf {
+	__u64 addr;
+	__u64 size;
+};
+
+struct smcinvoke_obj {
+	__s64 fd;
+	__s64 cb_server_fd;
+	__s64 reserved;
+};
+
+union smcinvoke_arg {
+	struct smcinvoke_buf b;
+	struct smcinvoke_obj o;
+};
+
+/*
+ * struct smcinvoke_cmd_req: This structure is transparently sent to TEE
+ * @op - Operation to be performed
+ * @counts - number of aruments passed
+ * @result - result of invoke operation
+ * @argsize - size of each of arguments
+ * @args - args is pointer to buffer having all arguments
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0
+ */
+struct smcinvoke_cmd_req {
+	__u32 op;
+	__u32 counts;
+	__s32 result;
+	__u32 argsize;
+	__u64 args;
+	__s64 reserved;
+};
+
+/*
+ * struct smcinvoke_accept: structure to process CB req from TEE
+ * @has_resp: IN: Whether IOCTL is carrying response data
+ * @result: IN: Outcome of operation op
+ * @op: OUT: Operation to be performed on target object
+ * @counts: OUT: Number of arguments, embedded in buffer pointed by
+ *               buf_addr, to complete operation
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0.
+ * @argsize: IN: Size of any argument, all of equal size, embedded
+ *               in buffer pointed by buf_addr
+ * @txn_id: OUT: An id that should be passed as it is for response
+ * @cbobj_id: OUT: Callback object which is target of operation op
+ * @buf_len: IN: Len of buffer pointed by buf_addr
+ * @buf_addr: IN: Buffer containing all arguments which are needed
+ *                to complete operation op
+ */
+struct smcinvoke_accept {
+	__u32 has_resp;
+	__s32 result;
+	__u32 op;
+	__u32 counts;
+	__s32 reserved;
+	__u32 argsize;
+	__u64 txn_id;
+	__s64 cbobj_id;
+	__u64 buf_len;
+	__u64 buf_addr;
+};
+
+/*
+ * @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
+ * @reserved: IN/OUT: Usage is not defined but should be set to 0
+ */
+struct smcinvoke_server {
+	__u64 cb_buf_size;
+	__s64 reserved;
+};
+
+#define SMCINVOKE_IOC_MAGIC    0x98
+
+#define SMCINVOKE_IOCTL_INVOKE_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
+
+#define SMCINVOKE_IOCTL_ACCEPT_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
+
+#define SMCINVOKE_IOCTL_SERVER_REQ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
+
+#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
+	_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s64)
+
+/*
+  * smcinvoke logging buffer is for communicating with the smcinvoke driver additional
+  * info for debugging to be included in driver's log (if any)
+  */
+#define SMCINVOKE_LOG_BUF_SIZE 100
+#define SMCINVOKE_IOCTL_LOG \
+	_IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE)
+
+#endif /* _UAPI_SMCINVOKE_H_ */

+ 48 - 0
qcom/opensource/securemsm-kernel/linux/misc/qseecom_kernel.h

@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ *      Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+#if IS_ENABLED(CONFIG_QSEECOM)
+int qseecom_process_listener_from_smcinvoke(uint32_t *result,
+					u64 *response_type, unsigned int *data);
+#else
+static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
+					u64 *response_type, unsigned int *data)
+{
+	return -EOPNOTSUPP;
+}
+#endif
+
+
+#endif /* __QSEECOM_KERNEL_H_ */

+ 26 - 0
qcom/opensource/securemsm-kernel/linux/misc/qseecom_priv.h

@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QSEECOM_PRIV_H_
+#define __QSEECOM_PRIV_H_
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_QSEECOM) || IS_ENABLED(CONFIG_ARCH_SA8155)
+
+int qseecom_process_listener_from_smcinvoke(uint32_t *result,
+                                        u64 *response_type, unsigned int *data);
+#else
+static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
+                                        u64 *response_type, unsigned int *data)
+{
+        return -EOPNOTSUPP;
+}
+
+int get_qseecom_kernel_fun_ops(void);
+#endif
+
+
+#endif

+ 740 - 0
qcom/opensource/securemsm-kernel/linux/misc/qseecomi.h

@@ -0,0 +1,740 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+/* we need to include qseecom.h present in securemsm-kernel */
+#include "../../include/uapi/linux/qseecom.h"
+
+#define QSEECOM_KEY_ID_SIZE   32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP               -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS            -66
+#define QSEOS_RESULT_FAIL_SAVE_KS             -67
+#define QSEOS_RESULT_FAIL_LOAD_KS             -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE     -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE          -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD      -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
+
+#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED	3
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
+	QSEOS_REGISTER_LOG_BUF_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
+	QSEOS_UPDATE_KEY_USERINFO,
+	QSEOS_TEE_OPEN_SESSION,
+	QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_CLOSE_SESSION,
+	QSEOS_TEE_REQUEST_CANCELLATION,
+	QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+	QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+	QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+	QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+	QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+	QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+	QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+	QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+	QSEOS_FSM_IKE_REQ_CMD = 0x203,
+	QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+	QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+	QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+	QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+	QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+	QSEOS_DIAG_FUSE_REQ_CMD = 0x501,
+	QSEOS_DIAG_FUSE_REQ_RSP_CMD = 0x502,
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_CBACK_REQUEST,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+	QSEOS_PIPE_ENC = 0x1,
+	QSEOS_PIPE_ENC_XTS = 0x2,
+	QSEOS_PIPE_AUTH = 0x4,
+	QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+	QSEE_REENTRANCY_PHASE_0 = 0,
+	QSEE_REENTRANCY_PHASE_1,
+	QSEE_REENTRANCY_PHASE_2,
+	QSEE_REENTRANCY_PHASE_3,
+	QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+} __packed;
+
+struct qsee_apps_region_info_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t addr;
+	uint32_t size;
+} __packed;
+
+struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+} __packed;
+
+struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+} __packed;
+
+struct qseecom_load_app_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+} __packed;
+
+struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+} __packed;
+
+struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+} __packed;
+
+struct qseecom_load_lib_image_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+} __packed;
+
+struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+} __packed;
+
+struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t sb_ptr;
+	uint32_t sb_len;
+} __packed;
+
+struct qseecom_register_listener_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint64_t sb_ptr;
+	uint32_t sb_len;
+} __packed;
+
+struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+} __packed;
+
+struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+	uint32_t rsp_len;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __packed;
+
+struct qseecom_client_send_data_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint64_t req_ptr;
+	uint32_t req_len;
+	uint64_t rsp_ptr;
+	uint32_t rsp_len;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __packed;
+
+struct qseecom_reg_log_buf_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t phy_addr;
+	uint32_t len;
+} __packed;
+
+struct qseecom_reg_log_buf_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t phy_addr;
+	uint32_t len;
+} __packed;
+
+/* send_data resp */
+struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __packed;
+
+struct qseecom_client_listener_data_64bit_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+} __packed;
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+} __packed;
+
+struct qseecom_rpmb_provision_key {
+	uint32_t key_type;
+};
+
+struct qseecom_client_send_service_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type; /* in */
+	unsigned int req_len; /* in */
+	uint32_t rsp_ptr; /* in/out */
+	unsigned int rsp_len; /* in/out */
+} __packed;
+
+struct qseecom_client_send_service_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type;
+	unsigned int req_len;
+	uint64_t rsp_ptr;
+	unsigned int rsp_len;
+} __packed;
+
+struct qseecom_key_generate_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+} __packed;
+
+struct qseecom_key_select_ireq {
+	uint32_t qsee_command_id;
+	uint32_t ce;
+	uint32_t pipe;
+	uint32_t pipe_type;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+} __packed;
+
+struct qseecom_key_delete_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+
+} __packed;
+
+struct qseecom_key_userinfo_update_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t current_hash32[QSEECOM_HASH_SIZE];
+	uint8_t new_hash32[QSEECOM_HASH_SIZE];
+} __packed;
+
+struct qseecom_key_max_count_query_ireq {
+	uint32_t flags;
+} __packed;
+
+struct qseecom_key_max_count_query_irsp {
+	uint32_t max_key_count;
+} __packed;
+
+struct qseecom_qteec_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint32_t    req_ptr;
+	uint32_t    req_len;
+	uint32_t    resp_ptr;
+	uint32_t    resp_len;
+	uint32_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+} __packed;
+
+struct qseecom_qteec_64bit_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint64_t    req_ptr;
+	uint32_t    req_len;
+	uint64_t    resp_ptr;
+	uint32_t    resp_len;
+	uint64_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+} __packed;
+
+struct qseecom_client_send_fsm_diag_req {
+	uint32_t qsee_cmd_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;
+	uint32_t rsp_len;
+} __packed;
+
+struct qseecom_continue_blocked_request_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
+} __packed;
+
+/**********      ARMV8 SMC INTERFACE TZ MACRO     *******************/
+
+#define TZ_SVC_APP_MGR                   1     /* Application management */
+#define TZ_SVC_LISTENER                  2     /* Listener service management */
+#define TZ_SVC_EXTERNAL                  3     /* External image loading */
+#define TZ_SVC_RPMB                      4     /* RPMB */
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+#define TZ_SVC_FUSE                      8     /* Fuse services */
+#define TZ_SVC_ES                        16    /* Enterprise Security */
+#define TZ_SVC_MDTP                      18    /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * ---------------------------------------------------------------------------
+ */
+#define TZ_OWNER_ARM                     0     /** ARM Architecture call ID */
+#define TZ_OWNER_CPU                     1     /** CPU service call ID */
+#define TZ_OWNER_SIP                     2     /** SIP service call ID */
+#define TZ_OWNER_OEM                     3     /** OEM service call ID */
+#define TZ_OWNER_STD                     4     /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS                 48
+#define TZ_OWNER_TZ_APPS_RESERVED        49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_OWNER_MOBI_OS                 51
+#define TZ_OWNER_OS_RESERVED_3           52
+#define TZ_OWNER_OS_RESERVED_4           53
+#define TZ_OWNER_OS_RESERVED_5           54
+#define TZ_OWNER_OS_RESERVED_6           55
+#define TZ_OWNER_OS_RESERVED_7           56
+#define TZ_OWNER_OS_RESERVED_8           57
+#define TZ_OWNER_OS_RESERVED_9           58
+#define TZ_OWNER_OS_RESERVED_10          59
+#define TZ_OWNER_OS_RESERVED_11          60
+#define TZ_OWNER_OS_RESERVED_12          61
+#define TZ_OWNER_OS_RESERVED_13          62
+#define TZ_OWNER_OS_RESERVED_14          63
+
+#define TZ_SVC_INFO                      6    /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER        0    /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l)     ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/*
+ * Macro used to define an SMC ID based on the owner ID,
+ * service ID, and function number.
+ */
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK  TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK   TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+	p4, p5, p6, p7, p8, p9, p10) \
+	((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+	((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+	((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+	((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+	((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+	((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+	((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+	((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+	((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+	((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+	((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/*
+ * Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+	TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+	TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+	TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+	TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+	TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+	TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+	TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+	TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+	TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+	TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/*
+ * Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID)        CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0)   ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+			(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL              0x0     /* type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO           0x1     /* type of buffer RO */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW           0x2     /* type of buffer RW */
+
+#define TZ_OS_APP_START_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_SECBOOT_GET_FUSE_INFO \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_FUSE, 0x09)
+
+#define TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4(\
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID				\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID			\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_7( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */

+ 493 - 0
qcom/opensource/securemsm-kernel/qrng/msm_rng.c

@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2013, 2015, 2017-2021 The Linux Foundation. All rights
+ * reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/qrng.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <crypto/internal/rng.h>
+#include <linux/interconnect.h>
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+
+#define DRIVER_NAME "msm_rng"
+
+/* Device specific register offsets */
+#define PRNG_DATA_OUT_OFFSET    0x0000
+#define PRNG_STATUS_OFFSET	0x0004
+#define PRNG_LFSR_CFG_OFFSET	0x0100
+#define PRNG_CONFIG_OFFSET	0x0104
+
+/* Device specific register masks and config values */
+#define PRNG_LFSR_CFG_MASK	0xFFFF0000
+#define PRNG_LFSR_CFG_CLOCKS	0x0000DDDD
+#define PRNG_CONFIG_MASK	0xFFFFFFFD
+#define PRNG_HW_ENABLE		0x00000002
+
+#define MAX_HW_FIFO_DEPTH 16                     /* FIFO is 16 words deep */
+#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide  */
+
+#define RETRY_MAX_CNT		5	/* max retry times to read register */
+#define RETRY_DELAY_INTERVAL	440	/* retry delay interval in us */
+
+struct msm_rng_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	struct clk *prng_clk;
+	struct mutex rng_lock;
+	struct icc_path *icc_path;
+};
+
+static struct msm_rng_device msm_rng_device_info;
+static struct msm_rng_device *msm_rng_dev_cached;
+static struct mutex cached_rng_lock;
+static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
+		pr_debug("calling msm_rng_bus_scale(LOW)\n");
+		ret = icc_set_bw(msm_rng_device_info.icc_path, 0, 0);
+		if (ret)
+			pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
+		break;
+	default:
+		pr_err("Unsupported IOCTL call\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ *
+ *  This function calls hardware random bit generator directory and retuns it
+ *  back to caller
+ *
+ */
+static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
+					void *data, size_t max)
+{
+	struct platform_device *pdev;
+	void __iomem *base;
+	size_t currsize = 0;
+	u32 val = 0;
+	u32 *retdata = data;
+	int ret;
+	int failed = 0;
+
+	pdev = msm_rng_dev->pdev;
+	base = msm_rng_dev->base;
+
+	/* no room for word data */
+	if (max < 4)
+		return 0;
+
+	mutex_lock(&msm_rng_dev->rng_lock);
+
+	if (msm_rng_dev->icc_path) {
+		ret = icc_set_bw(msm_rng_dev->icc_path, 0, 300000);
+		if (ret) {
+			pr_err("bus_scale_client_update_req failed\n");
+			goto bus_err;
+		}
+	}
+	/* enable PRNG clock */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			pr_err("failed to enable prng clock\n");
+			goto err;
+		}
+	}
+	/* read random data from h/w */
+	do {
+		/* check status bit if data is available */
+		if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
+				& 0x00000001)) {
+			if (failed++ == RETRY_MAX_CNT) {
+				if (currsize == 0)
+					pr_err("Data not available\n");
+				break;
+			}
+			udelay(RETRY_DELAY_INTERVAL);
+		} else {
+
+			/* read FIFO */
+			val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
+
+			/* write data back to callers pointer */
+			*(retdata++) = val;
+			currsize += 4;
+			/* make sure we stay on 32bit boundary */
+			if ((max - currsize) < 4)
+				break;
+		}
+
+	} while (currsize < max);
+
+	/* vote to turn off clock */
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+err:
+	if (msm_rng_dev->icc_path) {
+		ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+bus_err:
+	mutex_unlock(&msm_rng_dev->rng_lock);
+
+	val = 0L;
+	return currsize;
+}
+static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+	struct msm_rng_device *msm_rng_dev;
+	int rv = 0;
+
+	msm_rng_dev = (struct msm_rng_device *)rng->priv;
+	rv = msm_rng_direct_read(msm_rng_dev, data, max);
+
+	return rv;
+}
+
+
+static struct hwrng msm_rng = {
+	.name = DRIVER_NAME,
+	.read = msm_rng_read,
+	.quality = 1024,
+};
+
+static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
+{
+	unsigned long val = 0;
+	unsigned long reg_val = 0;
+	int ret = 0;
+
+	if (msm_rng_dev->icc_path) {
+		ret = icc_set_bw(msm_rng_dev->icc_path, 0, 30000);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+	/* Enable the PRNG CLK */
+	if (msm_rng_dev->prng_clk) {
+		ret = clk_prepare_enable(msm_rng_dev->prng_clk);
+		if (ret) {
+			dev_err(&(msm_rng_dev->pdev)->dev,
+				"failed to enable clock in probe\n");
+			return -EPERM;
+		}
+	}
+
+	/* Enable PRNG h/w only if it is NOT ON */
+	val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
+					PRNG_HW_ENABLE;
+	/* PRNG H/W is not ON */
+	if (val != PRNG_HW_ENABLE) {
+		val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+		val &= PRNG_LFSR_CFG_MASK;
+		val |= PRNG_LFSR_CFG_CLOCKS;
+		writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
+
+		/* The PRNG CONFIG register should be first written */
+		mb();
+
+		reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
+						& PRNG_CONFIG_MASK;
+		reg_val |= PRNG_HW_ENABLE;
+		writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
+
+		/* The PRNG clk should be disabled only after we enable the
+		 * PRNG h/w by writing to the PRNG CONFIG register.
+		 */
+		mb();
+	}
+	if (msm_rng_dev->prng_clk)
+		clk_disable_unprepare(msm_rng_dev->prng_clk);
+
+	if (msm_rng_dev->icc_path) {
+		ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0);
+		if (ret)
+			pr_err("bus_scale_client_update_req failed\n");
+	}
+
+	return 0;
+}
+
+static const struct file_operations msm_rng_fops = {
+	.unlocked_ioctl = msm_rng_ioctl,
+};
+static struct class *msm_rng_class;
+static struct cdev msm_rng_cdev;
+
+static int msm_rng_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct msm_rng_device *msm_rng_dev = NULL;
+	void __iomem *base = NULL;
+	bool configure_qrng = true;
+	int error = 0;
+	struct device *dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "invalid address\n");
+		error = -EFAULT;
+		goto err_exit;
+	}
+
+	msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
+	if (!msm_rng_dev) {
+		error = -ENOMEM;
+		goto err_exit;
+	}
+
+	base = ioremap(res->start, resource_size(res));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		error = -ENOMEM;
+		goto err_iomap;
+	}
+	msm_rng_dev->base = base;
+
+	/* create a handle for clock control */
+	if (pdev->dev.of_node) {
+		if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-clock-support"))
+			msm_rng_dev->prng_clk = NULL;
+		else
+			msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							"km_clk_src");
+	}
+
+	if (IS_ERR(msm_rng_dev->prng_clk)) {
+		dev_err(&pdev->dev, "failed to register clock source\n");
+		error = -ENODEV;
+		goto err_clk_get;
+	}
+
+	/* save away pdev and register driver data */
+	msm_rng_dev->pdev = pdev;
+	platform_set_drvdata(pdev, msm_rng_dev);
+
+	if (pdev->dev.of_node) {
+		msm_rng_dev->icc_path = of_icc_get(&pdev->dev, "data_path");
+		msm_rng_device_info.icc_path = msm_rng_dev->icc_path;
+		if (IS_ERR(msm_rng_dev->icc_path)) {
+			error = PTR_ERR(msm_rng_dev->icc_path);
+			dev_err(&pdev->dev, "get icc path err %d\n", error);
+			goto err_icc_get;
+		}
+	}
+
+	/* Enable rng h/w for the targets which can access the entire
+	 * address space of PRNG.
+	 */
+	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-qrng-config")))
+		configure_qrng = false;
+	if (configure_qrng) {
+		error = msm_rng_enable_hw(msm_rng_dev);
+		if (error)
+			goto err_icc_get;
+	}
+
+	mutex_init(&msm_rng_dev->rng_lock);
+	mutex_init(&cached_rng_lock);
+
+	/* register with hwrng framework */
+	msm_rng.priv = (unsigned long) msm_rng_dev;
+	error = hwrng_register(&msm_rng);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register hwrng\n");
+		goto err_reg_hwrng;
+	}
+	error = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register chrdev\n");
+		goto err_reg_chrdev;
+	}
+
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	msm_rng_class = class_create("msm-rng");
+#else
+	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
+#endif
+	if (IS_ERR(msm_rng_class)) {
+		pr_err("class_create failed\n");
+		error = PTR_ERR(msm_rng_class);
+		goto err_create_cls;
+	}
+
+	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
+				NULL, "msm-rng");
+	if (IS_ERR(dev)) {
+		pr_err("Device create failed\n");
+		error = PTR_ERR(dev);
+		goto err_create_dev;
+	}
+	cdev_init(&msm_rng_cdev, &msm_rng_fops);
+	msm_rng_dev_cached = msm_rng_dev;
+	return error;
+
+err_create_dev:
+	class_destroy(msm_rng_class);
+err_create_cls:
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+err_reg_chrdev:
+	hwrng_unregister(&msm_rng);
+err_reg_hwrng:
+	if (msm_rng_dev->icc_path)
+		icc_put(msm_rng_dev->icc_path);
+err_icc_get:
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+err_clk_get:
+	iounmap(msm_rng_dev->base);
+err_iomap:
+	kfree_sensitive(msm_rng_dev);
+err_exit:
+	return error;
+}
+
+static int msm_rng_remove(struct platform_device *pdev)
+{
+	struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
+
+	cdev_del(&msm_rng_cdev);
+	device_destroy(msm_rng_class, MKDEV(QRNG_IOC_MAGIC, 0));
+	class_destroy(msm_rng_class);
+	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
+	hwrng_unregister(&msm_rng);
+	if (msm_rng_dev->prng_clk)
+		clk_put(msm_rng_dev->prng_clk);
+	iounmap(msm_rng_dev->base);
+	platform_set_drvdata(pdev, NULL);
+	if (msm_rng_dev->icc_path)
+		icc_put(msm_rng_dev->icc_path);
+
+	kfree_sensitive(msm_rng_dev);
+	msm_rng_dev_cached = NULL;
+	return 0;
+}
+
+static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
+				unsigned int slen, u8 *rdata,
+				unsigned int dlen)
+{
+	int sizeread = 0;
+	int rv = -EFAULT;
+
+	if (!msm_rng_dev_cached) {
+		pr_err("%s: msm_rng_dev is not initialized\n", __func__);
+		rv = -ENODEV;
+		goto err_exit;
+	}
+
+	if (!rdata) {
+		pr_err("%s: data buffer is null\n", __func__);
+		rv = -EINVAL;
+		goto err_exit;
+	}
+
+	if (signal_pending(current) ||
+		mutex_lock_interruptible(&cached_rng_lock)) {
+		pr_err("%s: mutex lock interrupted\n", __func__);
+		rv = -ERESTARTSYS;
+		goto err_exit;
+	}
+	sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
+
+	if (sizeread == dlen)
+		rv = 0;
+
+	mutex_unlock(&cached_rng_lock);
+err_exit:
+	return rv;
+
+}
+
+static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+	return 0;
+}
+
+static struct rng_alg rng_algs[] = { {
+	.generate	= qrng_get_random,
+	.seed		= qrng_reset,
+	.seedsize	= 0,
+	.base		= {
+		.cra_name		= "qrng",
+		.cra_driver_name	= "fips_hw_qrng",
+		.cra_priority		= 300,
+		.cra_ctxsize		= 0,
+		.cra_module		= THIS_MODULE,
+	}
+} };
+
+static const struct of_device_id qrng_match[] = {
+	{.compatible = "qcom,msm-rng"},
+	{},
+};
+
+static struct platform_driver rng_driver = {
+	.probe      = msm_rng_probe,
+	.remove     = msm_rng_remove,
+	.driver     = {
+		.name   = DRIVER_NAME,
+		.of_match_table = qrng_match,
+	},
+};
+
+static int __init msm_rng_init(void)
+{
+	int ret;
+
+	msm_rng_dev_cached = NULL;
+	ret = platform_driver_register(&rng_driver);
+	if (ret) {
+		pr_err("%s: platform_driver_register error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+	ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	if (ret) {
+		pr_err("%s: crypto_register_algs error:%d\n",
+			__func__, ret);
+		goto err_exit;
+	}
+
+err_exit:
+	return ret;
+}
+
+module_init(msm_rng_init);
+
+static void __exit msm_rng_exit(void)
+{
+	crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
+	platform_driver_unregister(&rng_driver);
+}
+
+module_exit(msm_rng_exit);
+
+MODULE_DESCRIPTION("QTI MSM Random Number Driver");
+MODULE_LICENSE("GPL v2");

+ 145 - 0
qcom/opensource/securemsm-kernel/qseecom/ice.h

@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+	ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+	ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+	ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+	ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+	ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+	ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+	ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
+};
+
+#define QCOM_ICE_TYPE_NAME_LEN 8
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	int saved_vote;
+	bool is_max_bw_needed;
+	struct device_attribute max_bus_bw;
+};
+
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+	struct list_head	list;
+	struct device		*pdev;
+	struct cdev		cdev;
+	dev_t			device_no;
+	struct class		*driver_class;
+	void __iomem		*mmio;
+	struct resource		*res;
+	int			irq;
+	bool			is_ice_enabled;
+	bool			is_ice_disable_fuse_blown;
+	ice_error_cb		error_cb;
+	void			*host_controller_data; /* UFS/EMMC/other? */
+	struct list_head	clk_list_head;
+	u32			ice_hw_version;
+	bool			is_ice_clk_available;
+	char			ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+	struct regulator	*reg;
+	bool			is_regulator_available;
+	struct qcom_ice_bus_vote bus_vote;
+	ktime_t			ice_reset_start_time;
+	ktime_t			ice_reset_complete_time;
+	void             *key_table;
+};
+
+struct ice_crypto_setting {
+	enum ice_crpto_key_size		key_size;
+	enum ice_cryto_algo_mode	algo_mode;
+	enum ice_crpto_key_mode		key_mode;
+	short				key_index;
+
+};
+
+struct ice_data_setting {
+	struct ice_crypto_setting	crypto_data;
+	bool				sw_forced_context_switch;
+	bool				decr_bypass;
+	bool				encr_bypass;
+};
+
+/* MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum ice_crypto_data_unit {
+	ICE_CRYPTO_DATA_UNIT_512_B          = 0,
+	ICE_CRYPTO_DATA_UNIT_1_KB           = 1,
+	ICE_CRYPTO_DATA_UNIT_2_KB           = 2,
+	ICE_CRYPTO_DATA_UNIT_4_KB           = 3,
+	ICE_CRYPTO_DATA_UNIT_8_KB           = 4,
+	ICE_CRYPTO_DATA_UNIT_16_KB          = 5,
+	ICE_CRYPTO_DATA_UNIT_32_KB          = 6,
+	ICE_CRYPTO_DATA_UNIT_64_KB          = 7,
+};
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#if IS_ENABLED(CONFIG_CYRPTO_DEV_QCOM_ICE)
+int enable_ice_setup(struct ice_device *ice_dev);
+int disable_ice_setup(struct ice_device *ice_dev);
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
+void qcom_ice_set_fde_flag(int flag);
+struct list_head *get_ice_dev_list(void);
+#else
+static inline int enable_ice_setup(struct ice_device *ice_dev)
+{
+	return 0;
+}
+static inline int disable_ice_setup(struct ice_device *ice_dev)
+{
+	return 0;
+}
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	return 0;
+}
+static inline void qcom_ice_set_fde_flag(int flag) {}
+static inline struct list_head *get_ice_dev_list(void)
+{
+	return NULL;
+}
+#endif
+
+struct qcom_ice_variant_ops {
+	const char *name;
+	int	(*init)(struct platform_device *device_init, void *init_data,
+				ice_error_cb err);
+	int	(*reset)(struct platform_device *device_reset);
+	int	(*resume)(struct platform_device *device_resume);
+	int	(*suspend)(struct platform_device *device_suspend);
+	int	(*config_start)(struct platform_device *device_start,
+			struct request *req, struct ice_data_setting *setting,
+			bool start);
+	int	(*config_end)(struct platform_device *pdev,
+			struct request *req);
+	int	(*status)(struct platform_device *device_status);
+	void	(*debug)(struct platform_device *device_debug);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */

+ 9885 - 0
qcom/opensource/securemsm-kernel/qseecom/qseecom.c

@@ -0,0 +1,9885 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/qseecom_scm.h>
+#include <asm/cacheflush.h>
+#include <linux/delay.h>
+#include <linux/signal.h>
+#include <linux/compat.h>
+#include <linux/kthread.h>
+#include <linux/dma-map-ops.h>
+#include <linux/cma.h>
+#include <linux/of_platform.h>
+#include <linux/interconnect.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/qtee_shmbridge.h>
+#include <linux/mem-buf.h>
+#include <linux/version.h>
+#include "linux/qseecom_api.h"
+#include "ice.h"
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+#include <linux/qseecom_kernel.h>
+#include "misc/qseecom_priv.h"
+#else
+#include "misc/qseecom_kernel.h"
+#endif
+#include "misc/qseecomi.h"
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
+#define KERNEL_VERSION_LEGACY
+#endif
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_14		0x14
+#define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+#define QSEE_VERSION_03			0x403000
+#define QSEE_VERSION_04			0x404000
+#define QSEE_VERSION_05			0x405000
+#define QSEE_VERSION_20			0x800000
+#define QSEE_VERSION_40			0x1000000  /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ		100000000
+#define CE_CLK_DIV			1000000
+
+#define QSEECOM_MAX_SG_ENTRY			4096
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT	\
+			(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID  0xff
+
+/* Save partition image hash for authentication check */
+#define SCM_SAVE_PARTITION_HASH_ID	0x01
+
+/* Check if enterprise security is activate */
+#define SCM_IS_ACTIVATED_ID		0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP		0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP			0x20000
+
+#define RPMB_SERVICE			0x2000
+#define SSD_SERVICE			0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT	2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT	2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G	(1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY         0
+#define QSEECOM_STATE_SUSPEND           1
+#define QSEECOM_STATE_READY             2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK   2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+#define FDE_FLAG_POS    4
+#define ENABLE_KEY_WRAP_IN_KS    (1 << FDE_FLAG_POS)
+
+/*
+ * sg list buf format version
+ * 1: Legacy format to support only 512 SG list entries
+ * 2: new format to support > 512 entries
+ */
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1    1
+#define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2    2
+
+struct qseecom_sg_list_buf_hdr_64bit {
+	struct qseecom_sg_entry_64bit  blank_entry;     /* must be all 0 */
+	__u32 version;          /* sg list buf format version */
+	__u64 new_buf_phys_addr;        /* PA of new buffer */
+	__u32 nents_total;              /* Total number of SG entries */
+} __packed;
+
+#define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT        \
+			sizeof(struct qseecom_sg_list_buf_hdr_64bit)
+
+#define MAX_CE_PIPE_PAIR_PER_UNIT 3
+#define INVALID_CE_INFO_UNIT_NUM 0xffffffff
+
+#define CE_PIPE_PAIR_USE_TYPE_FDE 0
+#define CE_PIPE_PAIR_USE_TYPE_PFE 1
+
+#define SG_ENTRY_SZ             sizeof(struct qseecom_sg_entry)
+#define SG_ENTRY_SZ_64BIT       sizeof(struct qseecom_sg_entry_64bit)
+
+enum qseecom_bandwidth_request_mode {
+	INACTIVE = 0,
+	LOW,
+	MEDIUM,
+	HIGH,
+};
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+	QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+		(0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+		(1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+		(0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+	QSEECOM_CLIENT_APP = 1,
+	QSEECOM_LISTENER_SERVICE,
+	QSEECOM_SECURE_SERVICE,
+	QSEECOM_GENERIC,
+	QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+	CLK_QSEE = 0,
+	CLK_CE_DRV,
+	CLK_INVALID,
+};
+
+enum qseecom_cache_ops {
+	QSEECOM_CACHE_CLEAN,
+	QSEECOM_CACHE_INVALIDATE,
+};
+
+enum qseecom_listener_unregister_kthread_state {
+	LSNR_UNREG_KT_SLEEP = 0,
+	LSNR_UNREG_KT_WAKEUP,
+};
+
+enum qseecom_unload_app_kthread_state {
+	UNLOAD_APP_KT_SLEEP = 0,
+	UNLOAD_APP_KT_WAKEUP,
+};
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+static DEFINE_MUTEX(listener_access_lock);
+static DEFINE_MUTEX(unload_app_pending_list_lock);
+
+
+struct sglist_info {
+	uint32_t indexAndFlags;
+	uint32_t sizeOrCount;
+};
+
+/*
+ * The 31st bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set,  the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values.  Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i)	\
+	((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE	(sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST	15	/*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+#define MAKE_NULL(sgt, attach, dmabuf) do {\
+				sgt = NULL;\
+				attach = NULL;\
+				dmabuf = NULL;\
+				} while (0)
+
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	void  *user_virt_sb_base;
+	struct dma_buf             *dmabuf;
+	struct dma_buf_attachment  *attach;
+	struct sg_table            *sgt;
+	u8                         *sb_virt;
+	phys_addr_t                sb_phys;
+	size_t                     sb_length;
+	wait_queue_head_t          rcv_req_wq;
+	/* rcv_req_flag: 0: ready and empty; 1: received req */
+	int                        rcv_req_flag;
+	int                        send_resp_flag;
+	bool                       listener_in_use;
+	/* wq for thread blocked on this listener*/
+	wait_queue_head_t          listener_block_app_wq;
+	struct sglist_info         *sglistinfo_ptr;
+	struct qtee_shm            sglistinfo_shm;
+	uint32_t                   sglist_cnt;
+	int                        abort;
+	bool                       unregister_pending;
+};
+
+struct qseecom_unregister_pending_list {
+	struct list_head		list;
+	struct qseecom_dev_handle	*data;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	bool app_blocked;
+	u32  check_block;
+	u32  blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+	struct list_head list;
+	struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry *ce_pipe_entry;
+	bool alloc;
+	uint32_t type;
+};
+
+struct ce_hw_usage_info {
+	uint32_t qsee_ce_hw_instance;
+	uint32_t num_fde;
+	struct qseecom_ce_info_use *fde;
+	uint32_t num_pfe;
+	struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+	enum qseecom_ce_hw_instance instance;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+	struct clk *ce_bus_clk;
+	uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+	struct list_head  registered_listener_list_head;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	struct list_head   registered_kclient_list_head;
+	spinlock_t        registered_kclient_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+	uint32_t          qsee_version;
+	struct device *pdev;        /* class_dev */
+	struct device *dev;         /* platform_dev->dev */
+	struct class *driver_class;
+	dev_t qseecom_device_no;
+
+	bool  whitelist_support;
+	bool  commonlib_loaded;
+	bool  commonlib64_loaded;
+	bool  commonlib_loaded_by_hostvm;
+	struct ce_hw_usage_info ce_info;
+
+	int qsee_bw_count;
+	int qsee_sfpb_bw_count;
+
+	uint32_t qsee_perf_client;
+	struct icc_path *icc_path;
+	uint32_t avg_bw;
+	uint32_t peak_bw;
+	struct qseecom_clk qsee;
+	struct qseecom_clk ce_drv;
+
+	bool support_bus_scaling;
+	bool support_fde;
+	bool support_pfe;
+	bool fde_key_size;
+	uint32_t  cumulative_mode;
+	enum qseecom_bandwidth_request_mode  current_mode;
+	struct timer_list bw_scale_down_timer;
+	struct work_struct bw_inactive_req_ws;
+	struct cdev cdev;
+	bool timer_running;
+	bool no_clock_support;
+	unsigned int ce_opp_freq_hz;
+	bool appsbl_qseecom_support;
+	uint32_t qsee_reentrancy_support;
+	bool enable_key_wrap_in_ks;
+
+	uint32_t app_block_ref_cnt;
+	wait_queue_head_t app_block_wq;
+	atomic_t qseecom_state;
+	int is_apps_region_protected;
+	bool smcinvoke_support;
+	uint64_t qseecom_bridge_handle;
+	uint64_t ta_bridge_handle;
+	uint64_t user_contig_bridge_handle;
+
+	struct list_head  unregister_lsnr_pending_list_head;
+	wait_queue_head_t register_lsnr_pending_wq;
+	struct task_struct *unregister_lsnr_kthread_task;
+	wait_queue_head_t unregister_lsnr_kthread_wq;
+	atomic_t unregister_lsnr_kthread_state;
+
+	struct list_head  unload_app_pending_list_head;
+	struct task_struct *unload_app_kthread_task;
+	struct notifier_block reboot_nb;
+	wait_queue_head_t unload_app_kthread_wq;
+	atomic_t unload_app_kthread_state;
+	bool no_user_contig_mem_support;
+};
+
+struct qseecom_unload_app_pending_list {
+	struct list_head		list;
+	struct qseecom_dev_handle	*data;
+};
+
+struct qseecom_sec_buf_fd_info {
+	bool is_sec_buf_fd;
+	size_t size;
+	void *vbase;
+	phys_addr_t pbase;
+	struct qtee_shm shm;
+};
+
+struct qseecom_param_memref {
+	uint32_t buffer;
+	uint32_t size;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	struct dma_buf *dmabuf;
+	struct dma_buf_attachment  *attach;
+	struct sg_table *sgt;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	size_t sb_length;
+	unsigned long user_virt_sb_base;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+	bool from_smcinvoke;
+	struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
+	bool unload_pending;
+	bool from_loadapp;
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+	bool              register_pending;
+	bool              release_called;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	enum qseecom_client_handle_type type;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+	bool  perf_enabled;
+	bool  fast_load_enabled;
+	enum qseecom_bandwidth_request_mode mode;
+	struct sglist_info *sglistinfo_ptr;
+	struct qtee_shm sglistinfo_shm;
+	uint32_t sglist_cnt;
+	bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+	uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+	unsigned int unit_num;
+	unsigned int ce;
+	unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+	{
+		.desc = "Undefined Usage Index",
+	},
+
+	{
+		.desc = "Full Disk Encryption",
+	},
+
+	{
+		.desc = "Per File Encryption",
+	},
+
+	{
+		.desc = "UFS ICE Full Disk Encryption",
+	},
+
+	{
+		.desc = "SDCC ICE Full Disk Encryption",
+	},
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id);
+
+static int __maybe_unused get_qseecom_keymaster_status(char *str)
+{
+	get_option(&str, &qseecom.is_apps_region_protected);
+	return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int __qseecom_alloc_coherent_buf(
+			uint32_t size, u8 **vaddr, phys_addr_t *paddr);
+static void __qseecom_free_coherent_buf(uint32_t size,
+				u8 *vaddr, phys_addr_t paddr);
+
+#define QSEECOM_SCM_EBUSY_WAIT_MS 30
+#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
+#define QSEE_RESULT_FAIL_APP_BUSY 315
+
+static int __qseecom_scm_call2_locked(uint32_t smc_id, struct qseecom_scm_desc *desc)
+{
+	int ret = 0;
+	int retry_count = 0;
+
+	do {
+		ret = qcom_scm_qseecom_call(smc_id, desc, false);
+		if ((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) {
+			mutex_unlock(&app_access_lock);
+			msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
+			mutex_lock(&app_access_lock);
+		}
+		if (retry_count == 33)
+			pr_warn("secure world has been busy for 1 second!\n");
+	} while (((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) &&
+			(retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
+	return ret;
+}
+
+static char *__qseecom_alloc_tzbuf(uint32_t size,
+				phys_addr_t *pa, struct qtee_shm *shm)
+{
+	char *tzbuf = NULL;
+	int ret = qtee_shmbridge_allocate_shm(size, shm);
+
+	if (ret)
+		return NULL;
+	tzbuf = shm->vaddr;
+	memset(tzbuf, 0, size);
+	*pa = shm->paddr;
+	return tzbuf;
+}
+
+static void __qseecom_free_tzbuf(struct qtee_shm *shm)
+{
+	qtee_shmbridge_free_shm(shm);
+}
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+			const void *req_buf, void *resp_buf)
+{
+	int      ret = 0;
+	uint32_t smc_id = 0;
+	uint32_t qseos_cmd_id = 0;
+	struct qseecom_scm_desc desc = {0};
+	struct qseecom_command_scm_resp *scm_resp = NULL;
+	struct qtee_shm shm = {0};
+	phys_addr_t pa;
+
+	if (!req_buf || !resp_buf) {
+		pr_err("Invalid buffer pointer\n");
+		return -EINVAL;
+	}
+	qseos_cmd_id = *(uint32_t *)req_buf;
+	scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+	switch (svc_id) {
+	case SCM_SVC_INFO: {
+		if (tz_cmd_id == 3) {
+			smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+			desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+			desc.args[0] = *(uint32_t *)req_buf;
+		} else {
+			pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+				svc_id, tz_cmd_id);
+			return -EINVAL;
+		}
+		ret = __qseecom_scm_call2_locked(smc_id, &desc);
+		break;
+	}
+	case SCM_SVC_ES: {
+		switch (tz_cmd_id) {
+		case SCM_SAVE_PARTITION_HASH_ID: {
+			u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+			struct qseecom_save_partition_hash_req *p_hash_req =
+				(struct qseecom_save_partition_hash_req *)
+				req_buf;
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, p_hash_req->digest,
+				SHA256_DIGEST_LENGTH);
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+			desc.args[0] = p_hash_req->partition_id;
+			desc.args[1] = pa;
+			desc.args[2] = SHA256_DIGEST_LENGTH;
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		default: {
+			pr_err("tz_cmd_id %d is not supported\n", tz_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /* end of switch (tz_cmd_id) */
+		break;
+	} /* end of case SCM_SVC_ES */
+	case SCM_SVC_TZSCHEDULER: {
+		switch (qseos_cmd_id) {
+		case QSEOS_APP_START_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_START_ID;
+			desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+					(struct qseecom_load_app_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_SHUTDOWN_COMMAND: {
+			struct qseecom_unload_app_ireq *req;
+
+			req = (struct qseecom_unload_app_ireq *)req_buf;
+			smc_id = TZ_OS_APP_SHUTDOWN_ID;
+			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = qcom_scm_qseecom_call(smc_id, &desc, true);
+			break;
+		}
+		case QSEOS_APP_LOOKUP_COMMAND: {
+			struct qseecom_check_app_ireq *req;
+			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			req = (struct qseecom_check_app_ireq *)req_buf;
+			pr_debug("Lookup app_name = %s\n", req->app_name);
+			strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_OS_APP_LOOKUP_ID;
+			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+			desc.args[0] = pa;
+			desc.args[1] = strlen(req->app_name);
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		case QSEOS_APP_REGION_NOTIFICATION: {
+			struct qsee_apps_region_info_ireq *req;
+			struct qsee_apps_region_info_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+			desc.arginfo =
+				TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qsee_apps_region_info_ireq *)
+					req_buf;
+				desc.args[0] = req->addr;
+				desc.args[1] = req->size;
+			} else {
+				req_64bit =
+				(struct qsee_apps_region_info_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->addr;
+				desc.args[1] = req_64bit->size;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+			struct qseecom_load_lib_image_ireq *req;
+			struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_lib_image_ireq *)
+					req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_lib_image_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_REGISTER_LISTENER: {
+			struct qseecom_register_listener_ireq *req;
+			struct qseecom_register_listener_64bit_ireq *req_64bit;
+
+			desc.arginfo =
+				TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_register_listener_ireq *)
+					req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->sb_ptr;
+				desc.args[2] = req->sb_len;
+			} else {
+				req_64bit =
+				(struct qseecom_register_listener_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->listener_id;
+				desc.args[1] = req_64bit->sb_ptr;
+				desc.args[2] = req_64bit->sb_len;
+			}
+			qseecom.smcinvoke_support = true;
+			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			if (ret == -EIO) {
+				/* smcinvoke is not supported */
+				qseecom.smcinvoke_support = false;
+				smc_id = TZ_OS_REGISTER_LISTENER_ID;
+				ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			}
+			break;
+		}
+		case QSEOS_DEREGISTER_LISTENER: {
+			struct qseecom_unregister_listener_ireq *req;
+
+			req = (struct qseecom_unregister_listener_ireq *)
+				req_buf;
+			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+			struct qseecom_client_listener_data_irsp *req;
+
+			req = (struct qseecom_client_listener_data_irsp *)
+				req_buf;
+			smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+			desc.arginfo =
+				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			desc.args[1] = req->status;
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+			struct qseecom_client_listener_data_irsp *req;
+			struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+			smc_id =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req =
+				(struct qseecom_client_listener_data_irsp *)
+				req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->status;
+				desc.args[2] = req->sglistinfo_ptr;
+				desc.args[3] = req->sglistinfo_len;
+			} else {
+				req_64 =
+			(struct qseecom_client_listener_data_64bit_irsp *)
+				req_buf;
+				desc.args[0] = req_64->listener_id;
+				desc.args[1] = req_64->status;
+				desc.args[2] = req_64->sglistinfo_ptr;
+				desc.args[3] = req_64->sglistinfo_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_app_64bit_ireq *)req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+			}
+
+		case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+			desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+			struct qseecom_client_send_service_ireq *req;
+
+			req = (struct qseecom_client_send_service_ireq *)
+				req_buf;
+			smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+			desc.args[0] = req->key_type;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_ERASE_COMMAND: {
+			smc_id = TZ_OS_RPMB_ERASE_ID;
+			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_DIAG_FUSE_REQ_CMD:
+		case QSEOS_DIAG_FUSE_REQ_RSP_CMD: {
+			struct qseecom_client_send_fsm_diag_req *req;
+
+			smc_id = TZ_SECBOOT_GET_FUSE_INFO;
+			desc.arginfo = TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID;
+
+			req = (struct qseecom_client_send_fsm_diag_req *) req_buf;
+			desc.args[0] = req->req_ptr;
+			desc.args[1] = req->req_len;
+			desc.args[2] = req->rsp_ptr;
+			desc.args[3] = req->rsp_len;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+
+		}
+		case QSEOS_GENERATE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t)));
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_OS_KS_GEN_KEY_ID;
+			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+			desc.args[0] = pa;
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		case QSEOS_DELETE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t)));
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_OS_KS_DEL_KEY_ID;
+			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+			desc.args[0] = pa;
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		case QSEOS_SET_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t)));
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+			desc.args[0] = pa;
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		case QSEOS_UPDATE_KEY_USERINFO: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = __qseecom_alloc_tzbuf(
+						tzbuflen, &pa, &shm);
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t)));
+			qtee_shmbridge_flush_shm_buf(&shm);
+			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+			desc.args[0] = pa;
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			__qseecom_free_tzbuf(&shm);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+			desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_CLOSE_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_REQUEST_CANCELLATION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+			desc.arginfo =
+				TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+			struct qseecom_continue_blocked_request_ireq *req =
+				(struct qseecom_continue_blocked_request_ireq *)
+				req_buf;
+			if (qseecom.smcinvoke_support)
+				smc_id =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+			else
+				smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+			desc.arginfo =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+			desc.args[0] = req->app_or_session_id;
+			ret = __qseecom_scm_call2_locked(smc_id, &desc);
+			break;
+		}
+		default: {
+			pr_err("qseos_cmd_id %d is not supported.\n",
+						qseos_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /*end of switch (qsee_cmd_id)  */
+	break;
+	} /*end of case SCM_SVC_TZSCHEDULER*/
+	default: {
+		pr_err("svc_id 0x%x is not supported.\n", svc_id);
+		ret = -EINVAL;
+		break;
+	}
+	} /*end of switch svc_id */
+	scm_resp->result = desc.ret[0];
+	scm_resp->resp_type = desc.ret[1];
+	scm_resp->data = desc.ret[2];
+	pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+		svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+	pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+		scm_resp->result, scm_resp->resp_type, scm_resp->data);
+	return ret;
+}
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+
+	list_for_each_entry(entry,
+			&qseecom.registered_listener_list_head, list) {
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+		pr_debug("Service id: %u is not found\n", listener_id);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf,
+					enum qseecom_cache_ops cache_op)
+{
+	int ret = 0;
+
+	if (!dmabuf) {
+		pr_err("dmabuf is NULL\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	switch (cache_op) {
+	case QSEECOM_CACHE_CLEAN: /* Doing CLEAN and INVALIDATE */
+		dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+		dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+		break;
+	case QSEECOM_CACHE_INVALIDATE:
+		dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
+		break;
+	default:
+		pr_err("cache (%d) operation not supported\n",
+			 cache_op);
+		ret = -EINVAL;
+		goto exit;
+	}
+exit:
+	return ret;
+}
+
+static int qseecom_destroy_bridge_callback(void *dtor_data)
+{
+	int ret = 0;
+	uint64_t handle = (uint64_t)dtor_data;
+
+	pr_debug("to destroy shm bridge %lld\n", handle);
+	ret = qtee_shmbridge_deregister(handle);
+	if (ret) {
+		pr_err("failed to destroy shm bridge %lld\n", handle);
+		return ret;
+	}
+	return ret;
+}
+
+static int qseecom_create_bridge_for_secbuf(int ion_fd, struct dma_buf *dmabuf,
+				struct sg_table *sgt)
+{
+	int ret = 0;
+	phys_addr_t phys;
+	size_t size = 0;
+	uint64_t handle = 0;
+	int tz_perm = PERM_READ|PERM_WRITE;
+	uint32_t *vmid_list;
+	uint32_t *perms_list;
+	uint32_t nelems = 0;
+	struct scatterlist *sg = sgt->sgl;
+
+	if (!qtee_shmbridge_is_enabled())
+		return 0;
+
+	phys = sg_phys(sg);
+	size = sg->length;
+
+	ret = qtee_shmbridge_query(phys);
+	if (ret) {
+		pr_debug("bridge exists\n");
+		return 0;
+	}
+
+	if (mem_buf_dma_buf_exclusive_owner(dmabuf) || (sgt->nents != 1)) {
+		pr_debug("just create bridge for contiguous secure buf\n");
+		return 0;
+	}
+
+	ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
+		(int **)&perms_list, (int *)&nelems);
+	if (ret) {
+		pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
+		return ret;
+	}
+
+	ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
+				      tz_perm, &handle);
+
+	if (ret && ret != -EEXIST) {
+		pr_err("creation of shm bridge failed with ret: %d\n",
+		       ret);
+		goto exit;
+	}
+
+	pr_debug("created shm bridge %lld\n", handle);
+	mem_buf_dma_buf_set_destructor(dmabuf, qseecom_destroy_bridge_callback,
+			       (void *)handle);
+
+exit:
+	kfree(perms_list);
+	kfree(vmid_list);
+	return ret;
+}
+
+static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
+				struct dma_buf_attachment **attach,
+				struct dma_buf **dmabuf)
+{
+	struct dma_buf *new_dma_buf = NULL;
+	struct dma_buf_attachment *new_attach = NULL;
+	struct sg_table *new_sgt = NULL;
+	int ret = 0;
+
+	new_dma_buf = dma_buf_get(ion_fd);
+	if (IS_ERR_OR_NULL(new_dma_buf)) {
+		pr_err("dma_buf_get() for ion_fd %d failed\n", ion_fd);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	new_attach = dma_buf_attach(new_dma_buf, qseecom.dev);
+	if (IS_ERR_OR_NULL(new_attach)) {
+		pr_err("dma_buf_attach() for ion_fd %d failed\n", ion_fd);
+		ret = -ENOMEM;
+		goto err_put;
+	}
+
+	new_sgt = dma_buf_map_attachment(new_attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(new_sgt)) {
+		ret = PTR_ERR(new_sgt);
+		pr_err("dma_buf_map_attachment for ion_fd %d failed ret = %d\n",
+				ion_fd, ret);
+		goto err_detach;
+	}
+
+	ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt);
+	if (ret) {
+		pr_err("failed to create bridge for fd %d\n", ion_fd);
+		goto err_unmap_attachment;
+	}
+	*sgt = new_sgt;
+	*attach = new_attach;
+	*dmabuf = new_dma_buf;
+	return ret;
+
+err_unmap_attachment:
+	dma_buf_unmap_attachment(new_attach, new_sgt, DMA_BIDIRECTIONAL);
+err_detach:
+	dma_buf_detach(new_dma_buf, new_attach);
+err_put:
+	dma_buf_put(new_dma_buf);
+err:
+	return ret;
+}
+
+static void qseecom_dmabuf_unmap(struct sg_table *sgt,
+			struct dma_buf_attachment *attach,
+			struct dma_buf *dmabuf)
+{
+	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+	dma_buf_detach(dmabuf, attach);
+	dma_buf_put(dmabuf);
+}
+
+/* convert ion_fd to phys_adds and virt_addr*/
+static int qseecom_vaddr_map(int ion_fd,
+			phys_addr_t *paddr, void **vaddr,
+			struct sg_table **sgt,
+			struct dma_buf_attachment **attach,
+			size_t *sb_length, struct dma_buf **dmabuf)
+{
+	struct dma_buf *new_dma_buf = NULL;
+	struct dma_buf_attachment *new_attach = NULL;
+#ifdef KERNEL_VERSION_LEGACY
+	struct dma_buf_map new_dma_buf_map = {0};
+#else
+	struct iosys_map new_dma_buf_map = {0};
+#endif
+	struct sg_table *new_sgt = NULL;
+	void *new_va = NULL;
+	int ret = 0;
+
+	ret = qseecom_dmabuf_map(ion_fd, &new_sgt, &new_attach, &new_dma_buf);
+	if (ret) {
+		pr_err("qseecom_dmabuf_map for ion_fd %d failed ret = %d\n",
+				ion_fd, ret);
+		goto err;
+	}
+	ret = 0;
+
+	*paddr = sg_dma_address(new_sgt->sgl);
+	*sb_length = new_sgt->sgl->length;
+	//Invalidate the Buffer
+	dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
+	ret = dma_buf_vmap(new_dma_buf, &new_dma_buf_map);
+	new_va = ret ? NULL : new_dma_buf_map.vaddr;
+	if (!new_va) {
+		pr_err("dma_buf_vmap failed\n");
+		ret = -ENOMEM;
+		goto err_unmap;
+	}
+	*dmabuf = new_dma_buf;
+	*attach = new_attach;
+	*sgt = new_sgt;
+	*vaddr = new_va;
+	return ret;
+
+err_unmap:
+	//Flush the buffer (i.e. Clean and invalidate)
+	dma_buf_end_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
+	dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
+	qseecom_dmabuf_unmap(new_sgt, new_attach, new_dma_buf);
+	MAKE_NULL(*sgt, *attach, *dmabuf);
+err:
+	return ret;
+}
+
+static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt,
+		struct dma_buf_attachment *attach,
+		struct dma_buf *dmabuf)
+{
+#ifdef KERNEL_VERSION_LEGACY
+	struct dma_buf_map  dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr);
+#else
+	struct iosys_map  dmabufmap = IOSYS_MAP_INIT_VADDR(vaddr);
+#endif
+
+	if (!dmabuf || !vaddr || !sgt || !attach)
+		return;
+	pr_err("Trying to unmap vaddr");
+	dma_buf_vunmap(dmabuf, &dmabufmap);
+	dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+	qseecom_dmabuf_unmap(sgt, attach, dmabuf);
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_register_listener_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	ret = qseecom_vaddr_map(listener->ifd_data_fd,
+				&svc->sb_phys, (void **)&svc->sb_virt,
+				&svc->sgt, &svc->attach,
+				&svc->sb_length, &svc->dmabuf);
+	if (ret) {
+		pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
+			listener->ifd_data_fd, svc->svc.listener_id, ret);
+		return -EINVAL;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (uint32_t)svc->sb_phys;
+		cmd_buf = (void *)&req;
+		cmd_len = sizeof(struct qseecom_register_listener_ireq);
+	} else {
+		req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req_64bit.listener_id = svc->svc.listener_id;
+		req_64bit.sb_len = svc->sb_length;
+		req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+		cmd_buf = (void *)&req_64bit;
+		cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+	}
+
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	mutex_unlock(&listener_access_lock);
+	mutex_lock(&app_access_lock);
+	__qseecom_reentrancy_check_if_no_app_blocked(
+				TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+					 &resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
+	mutex_lock(&listener_access_lock);
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Error SB registration req: resp.result = %d\n",
+			resp.result);
+		ret = -EPERM;
+		goto err;
+	}
+	return 0;
+err:
+	if (svc->dmabuf) {
+		qseecom_vaddr_unmap(svc->sb_virt, svc->sgt, svc->attach,
+			svc->dmabuf);
+		MAKE_NULL(svc->sgt, svc->attach, svc->dmabuf);
+	}
+	return ret;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+	struct qseecom_registered_listener_list *ptr_svc;
+
+	if (data->listener.register_pending) {
+		pr_err("Already a listner registration is in process on this FD\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (!access_ok((void __user *)rcvd_lstnr.virt_sb_base,
+			rcvd_lstnr.sb_size))
+		return -EFAULT;
+
+	ptr_svc = __qseecom_find_svc(data->listener.id);
+	if (ptr_svc) {
+		pr_err("Already a listener registered on this data: lid=%d\n", data->listener.id);
+		return -EINVAL;
+	}
+
+	ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
+	if (ptr_svc) {
+		if (!ptr_svc->unregister_pending) {
+			pr_err("Service %d is not unique\n",
+				rcvd_lstnr.listener_id);
+		data->released = true;
+		return -EBUSY;
+		} else {
+			/*wait until listener is unregistered*/
+			pr_debug("register %d has to wait\n",
+				rcvd_lstnr.listener_id);
+			mutex_unlock(&listener_access_lock);
+			ret = wait_event_interruptible(
+				qseecom.register_lsnr_pending_wq,
+				list_empty(
+				&qseecom.unregister_lsnr_pending_list_head));
+			if (ret) {
+				pr_err("interrupted register_pending_wq %d\n",
+						rcvd_lstnr.listener_id);
+				mutex_lock(&listener_access_lock);
+				return -ERESTARTSYS;
+			}
+			mutex_lock(&listener_access_lock);
+		}
+	}
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->sglistinfo_ptr =
+				(struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&new_entry->sglistinfo_shm.paddr,
+				&new_entry->sglistinfo_shm);
+	if (!new_entry->sglistinfo_ptr) {
+		kfree(new_entry);
+		return -ENOMEM;
+	}
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+	data->listener.register_pending = true;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
+				rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
+		__qseecom_free_tzbuf(&new_entry->sglistinfo_shm);
+		kfree_sensitive(new_entry);
+		data->listener.register_pending = false;
+		return -ENOMEM;
+	}
+	data->listener.register_pending = false;
+
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+	init_waitqueue_head(&new_entry->listener_block_app_wq);
+	new_entry->send_resp_flag = 0;
+	new_entry->listener_in_use = false;
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+
+	data->listener.id = rcvd_lstnr.listener_id;
+	pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
+	return ret;
+}
+
+static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_command_scm_resp resp;
+
+	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+	req.listener_id = data->listener.id;
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	mutex_unlock(&listener_access_lock);
+	mutex_lock(&app_access_lock);
+	__qseecom_reentrancy_check_if_no_app_blocked(
+				TZ_OS_DEREGISTER_LISTENER_ID);
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+					sizeof(req), &resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
+	mutex_lock(&listener_access_lock);
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+				ret, data->listener.id);
+		return ret;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+				resp.result, data->listener.id);
+		ret = -EPERM;
+		goto exit;
+	}
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_interruptible(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+		}
+	}
+
+exit:
+	if (ptr_svc->dmabuf) {
+		qseecom_vaddr_unmap(ptr_svc->sb_virt,
+			ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
+		MAKE_NULL(ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
+	}
+	__qseecom_free_tzbuf(&ptr_svc->sglistinfo_shm);
+	list_del(&ptr_svc->list);
+	kfree_sensitive(ptr_svc);
+
+	data->released = true;
+	pr_debug("Service %d is unregistered\n", data->listener.id);
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_unregister_pending_list *entry = NULL;
+
+	if (data->released) {
+		pr_err("Don't unregister lsnr %d\n", data->listener.id);
+		return -EINVAL;
+	}
+
+	ptr_svc = __qseecom_find_svc(data->listener.id);
+	if (!ptr_svc) {
+		pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
+		return -ENODATA;
+	}
+	/* stop CA thread waiting for listener response */
+	ptr_svc->abort = 1;
+	wake_up_interruptible_all(&qseecom.send_resp_wq);
+
+	/* stop listener thread waiting for listener request */
+	data->abort = 1;
+	wake_up_all(&ptr_svc->rcv_req_wq);
+
+	/* return directly if pending*/
+	if (ptr_svc->unregister_pending)
+		return 0;
+
+	/*add unregistration into pending list*/
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->data = data;
+	list_add_tail(&entry->list,
+		&qseecom.unregister_lsnr_pending_list_head);
+	ptr_svc->unregister_pending = true;
+	pr_debug("unregister %d pending\n", data->listener.id);
+	return 0;
+}
+
+static void __qseecom_processing_pending_lsnr_unregister(void)
+{
+	struct qseecom_unregister_pending_list *entry = NULL;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct list_head *pos;
+	int ret = 0;
+
+	mutex_lock(&listener_access_lock);
+	while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
+		pos = qseecom.unregister_lsnr_pending_list_head.next;
+		entry = list_entry(pos,
+				struct qseecom_unregister_pending_list, list);
+		if (entry && entry->data) {
+			pr_debug("process pending unregister %d\n",
+					entry->data->listener.id);
+			/* don't process the entry if qseecom_release is not called*/
+			if (!entry->data->listener.release_called) {
+				list_del(pos);
+				list_add_tail(&entry->list,
+					&qseecom.unregister_lsnr_pending_list_head);
+				break;
+			}
+			ptr_svc = __qseecom_find_svc(
+						entry->data->listener.id);
+			if (ptr_svc) {
+				ret = __qseecom_unregister_listener(
+						entry->data, ptr_svc);
+				if (ret) {
+					pr_debug("unregister %d pending again\n",
+						entry->data->listener.id);
+					mutex_unlock(&listener_access_lock);
+					return;
+				}
+			} else
+				pr_err("invalid listener %d\n",
+					entry->data->listener.id);
+			__qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
+			kfree_sensitive(entry->data);
+		}
+		list_del(pos);
+		kfree_sensitive(entry);
+	}
+	mutex_unlock(&listener_access_lock);
+	wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
+}
+
+static void __wakeup_unregister_listener_kthread(void)
+{
+	atomic_set(&qseecom.unregister_lsnr_kthread_state,
+				LSNR_UNREG_KT_WAKEUP);
+	wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
+}
+
+static int __qseecom_unregister_listener_kthread_func(void *data)
+{
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			qseecom.unregister_lsnr_kthread_wq,
+			atomic_read(&qseecom.unregister_lsnr_kthread_state)
+				== LSNR_UNREG_KT_WAKEUP);
+		pr_debug("kthread to unregister listener is called %d\n",
+			atomic_read(&qseecom.unregister_lsnr_kthread_state));
+		__qseecom_processing_pending_lsnr_unregister();
+		atomic_set(&qseecom.unregister_lsnr_kthread_state,
+				LSNR_UNREG_KT_SLEEP);
+	}
+	pr_warn("kthread to unregister listener stopped\n");
+	return 0;
+}
+
+static int qseecom_bus_scale_update_request(
+					int client, int mode)
+{
+	pr_debug("client %d, mode %d\n", client, mode);
+	/*TODO: get ab/ib from device tree for different mode*/
+	if (!mode)
+		return icc_set_bw(qseecom.icc_path, 0, 0);
+	else
+		return icc_set_bw(qseecom.icc_path,
+			qseecom.avg_bw, qseecom.peak_bw);
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qclk->ce_core_src_clk != NULL) {
+		if (mode == INACTIVE) {
+			__qseecom_disable_clk(CLK_QSEE);
+		} else {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				pr_err("CLK enabling failed (%d) MODE (%d)\n",
+							ret, mode);
+		}
+	}
+
+	if ((!ret) && (qseecom.current_mode != mode)) {
+		ret = qseecom_bus_scale_update_request(
+					qseecom.qsee_perf_client, mode);
+		if (ret) {
+			pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+							ret, mode);
+			if (qclk->ce_core_src_clk != NULL) {
+				if (mode == INACTIVE) {
+					ret = __qseecom_enable_clk(CLK_QSEE);
+					if (ret)
+						pr_err("CLK enable failed\n");
+				} else
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+		}
+		qseecom.current_mode = mode;
+	}
+	return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+	mutex_lock(&app_access_lock);
+	mutex_lock(&qsee_bw_mutex);
+	if (qseecom.timer_running)
+		__qseecom_set_msm_bus_request(INACTIVE);
+	pr_debug("current_mode = %d, cumulative_mode = %d\n",
+				qseecom.current_mode, qseecom.cumulative_mode);
+	qseecom.timer_running = false;
+	mutex_unlock(&qsee_bw_mutex);
+	mutex_unlock(&app_access_lock);
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(struct timer_list *data)
+{
+	schedule_work(&qseecom.bw_inactive_req_ws);
+}
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+	int ret = 0;
+
+	mutex_lock(&clk_access_lock);
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt--;
+	} else {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	return ret;
+}
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+	int32_t ret = 0;
+	int32_t request_mode = INACTIVE;
+
+	mutex_lock(&qsee_bw_mutex);
+	if (mode == 0) {
+		if (qseecom.cumulative_mode > MEDIUM)
+			request_mode = HIGH;
+		else
+			request_mode = qseecom.cumulative_mode;
+	} else {
+		request_mode = mode;
+	}
+
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
+	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
+	mutex_unlock(&qsee_bw_mutex);
+	return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+					struct qseecom_dev_handle *data)
+{
+	qseecom.cumulative_mode -= data->mode;
+	data->mode = INACTIVE;
+
+	return 0;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+			struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+	if (data->mode == INACTIVE) {
+		qseecom.cumulative_mode += request_mode;
+		data->mode = request_mode;
+	} else {
+		if (data->mode != request_mode) {
+			qseecom.cumulative_mode -= data->mode;
+			qseecom.cumulative_mode += request_mode;
+			data->mode = request_mode;
+		}
+	}
+	return 0;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	ret = qsee_vote_for_clock(data, CLK_DFAB);
+	if (ret) {
+		pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+	ret = qsee_vote_for_clock(data, CLK_SFPB);
+	if (ret) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+
+perf_enable_exit:
+	return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	if (qseecom.no_clock_support)
+		return;
+
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+	if (!qseecom.support_bus_scaling)
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
+	} else {
+		ret = qsee_vote_for_clock(data, CLK_SFPB);
+		if (ret)
+			pr_err("Fail vote for clk SFPB ret %d\n", ret);
+	}
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret;
+	struct qseecom_set_sb_mem_param_req req;
+	size_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+					(req.sb_len == 0)) {
+		pr_err("Invalid input(s)ion_fd(%d), sb_len(%d)\n",
+			req.ifd_data_fd, req.sb_len);
+		return -EFAULT;
+	}
+	if (!access_ok((void __user *)req.virt_sb_base,
+			req.sb_len))
+		return -EFAULT;
+
+	ret = qseecom_vaddr_map(req.ifd_data_fd, &data->client.sb_phys,
+				(void **)&data->client.sb_virt,
+				 &data->client.sgt, &data->client.attach,
+				&len, &data->client.dmabuf);
+	if (ret) {
+		pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
+			req.ifd_data_fd, data->client.app_id, ret);
+		return -EINVAL;
+	}
+
+	if (len < req.sb_len) {
+		pr_err("Requested length (0x%x) is > allocated (%zu)\n",
+			req.sb_len, len);
+		ret = -EINVAL;
+		goto exit;
+	}
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+
+	return ret;
+exit:
+	if (data->client.dmabuf) {
+		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
+			 data->client.attach, data->client.dmabuf);
+		MAKE_NULL(data->client.sgt,
+			data->client.attach, data->client.dmabuf);
+	}
+	return ret;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort || ptr_svc->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+			struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (ptr_svc->send_resp_flag != 0);
+	return ret || data->abort || ptr_svc->abort;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	if (ptr_svc->sglist_cnt) {
+		memset(ptr_svc->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		ptr_svc->sglist_cnt = 0;
+	}
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	struct qseecom_client_listener_data_irsp send_data_rsp = {0};
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
+									= {0};
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	qseecom.app_block_ref_cnt++;
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		mutex_lock(&listener_access_lock);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				ret = qseecom_dmabuf_cache_operations(
+					ptr_svc->dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+				if (ret) {
+					rc = -EINVAL;
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (!ptr_svc->dmabuf) {
+			pr_err("Client dmabuf is not initialized\n");
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_err("Service %d does not exist\n",
+						lstnr);
+			rc = -ERESTARTSYS;
+			ptr_svc = NULL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->abort == 1) {
+			pr_debug("Service %d abort %d\n",
+						lstnr, ptr_svc->abort);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+		/* block all signals */
+		mutex_unlock(&listener_access_lock);
+		do {
+			/*
+			 * When reentrancy is not supported, check global
+			 * send_resp_flag; otherwise, check this listener's
+			 * send_resp_flag.
+			 */
+			if (!qseecom.qsee_reentrancy_support &&
+				!wait_event_interruptible(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+
+			if (qseecom.qsee_reentrancy_support &&
+				!wait_event_interruptible(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		mutex_lock(&listener_access_lock);
+		/* restore signal mask */
+		if (data->abort || ptr_svc->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+		} else {
+			status = QSEOS_RESULT_SUCCESS;
+		}
+err_resp:
+		qseecom.send_resp_flag = 0;
+		if (ptr_svc) {
+			ptr_svc->send_resp_flag = 0;
+			table = ptr_svc->sglistinfo_ptr;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			if (table) {
+				send_data_rsp.sglistinfo_ptr =
+					(uint32_t)virt_to_phys(table);
+				send_data_rsp.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				qtee_shmbridge_flush_shm_buf(
+						&ptr_svc->sglistinfo_shm);
+			}
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			if (table) {
+				send_data_rsp_64bit.sglistinfo_ptr =
+					virt_to_phys(table);
+				send_data_rsp_64bit.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				qtee_shmbridge_flush_shm_buf(
+						&ptr_svc->sglistinfo_shm);
+			}
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (!qseecom.whitelist_support || table == NULL)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				goto exit;
+		}
+
+		if (ptr_svc) {
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+							QSEECOM_CACHE_CLEAN);
+			if (ret)
+				goto exit;
+
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			ptr_svc->listener_in_use = false;
+			__qseecom_clean_listener_sglistinfo(ptr_svc);
+
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+
+		} else {
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+		}
+
+		pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
+			status, resp->result, data->client.app_id, lstnr);
+		if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+		}
+exit:
+		mutex_unlock(&listener_access_lock);
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	qseecom.app_block_ref_cnt--;
+	wake_up_interruptible_all(&qseecom.app_block_wq);
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	unsigned int session_id;
+	sigset_t new_sigset;
+	unsigned long flags;
+	bool found_app = false;
+	struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
+
+	if (!resp || !data) {
+		pr_err("invalid resp or data pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* find app_id & img_name from list */
+	if (!ptr_app) {
+		if (data->client.from_smcinvoke || data->client.from_loadapp) {
+			pr_debug("This request is from %s\n",
+				(data->client.from_smcinvoke ? "smcinvoke" : "load_app"));
+			ptr_app = &dummy_app_entry;
+			ptr_app->app_id = data->client.app_id;
+		} else {
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+						flags);
+			list_for_each_entry(ptr_app,
+				&qseecom.registered_app_list_head, list) {
+				if ((ptr_app->app_id == data->client.app_id) &&
+					(!strcmp(ptr_app->app_name,
+						data->client.app_name))) {
+					found_app = true;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			if (!found_app) {
+				pr_err("app_id %d (%s) is not found\n",
+					data->client.app_id,
+					(char *)data->client.app_name);
+				ret = -ENOENT;
+				goto exit;
+			}
+		}
+	}
+
+	do {
+		session_id = resp->resp_type;
+		mutex_lock(&listener_access_lock);
+		list_ptr = __qseecom_find_svc(resp->data);
+		if (!list_ptr) {
+			pr_err("Invalid listener ID %d\n", resp->data);
+			ret = -ENODATA;
+			mutex_unlock(&listener_access_lock);
+			goto exit;
+		}
+		ptr_app->blocked_on_listener_id = resp->data;
+
+		pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
+			resp->data, list_ptr->listener_in_use,
+			session_id, data->client.app_id);
+
+		/* sleep until listener is available */
+		sigfillset(&new_sigset);
+		do {
+			qseecom.app_block_ref_cnt++;
+			ptr_app->app_blocked = true;
+			mutex_unlock(&listener_access_lock);
+			mutex_unlock(&app_access_lock);
+			wait_event_interruptible(
+				list_ptr->listener_block_app_wq,
+				!list_ptr->listener_in_use);
+			mutex_lock(&app_access_lock);
+			mutex_lock(&listener_access_lock);
+			ptr_app->app_blocked = false;
+			qseecom.app_block_ref_cnt--;
+		}  while (list_ptr->listener_in_use);
+		ptr_app->blocked_on_listener_id = 0;
+		pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
+			resp->data, session_id, data->client.app_id);
+
+		/* notify TZ that listener is available */
+		ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+
+		if (qseecom.smcinvoke_support)
+			ireq.app_or_session_id = session_id;
+		else
+			ireq.app_or_session_id = data->client.app_id;
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					&ireq, sizeof(ireq),
+					&continue_resp, sizeof(continue_resp));
+
+		if (ret && qseecom.smcinvoke_support) {
+			/* retry with legacy cmd */
+			pr_warn("falling back to legacy method\n");
+			qseecom.smcinvoke_support = false;
+			ireq.app_or_session_id = data->client.app_id;
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				&ireq, sizeof(ireq),
+				&continue_resp, sizeof(continue_resp));
+			qseecom.smcinvoke_support = true;
+			if (ret) {
+				pr_err("unblock app %d or session %d fail\n",
+					data->client.app_id, session_id);
+				mutex_unlock(&listener_access_lock);
+				goto exit;
+			}
+		}
+		mutex_unlock(&listener_access_lock);
+		resp->result = continue_resp.result;
+		resp->resp_type = continue_resp.resp_type;
+		resp->data = continue_resp.data;
+		pr_err("unblock resp = %d\n", resp->result);
+	} while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
+
+	if (resp->result != QSEOS_RESULT_INCOMPLETE) {
+		pr_err("Unexpected unblock resp %d\n", resp->result);
+		ret = -EINVAL;
+	}
+exit:
+	return ret;
+}
+
+static int __qseecom_reentrancy_process_incomplete_cmd(
+					struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	struct qseecom_client_listener_data_irsp send_data_rsp = {0};
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
+									= {0};
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		mutex_lock(&listener_access_lock);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				ret = qseecom_dmabuf_cache_operations(
+					ptr_svc->dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+				if (ret) {
+					rc = -EINVAL;
+					status = QSEOS_RESULT_FAILURE;
+					goto err_resp;
+				}
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (!ptr_svc->dmabuf) {
+			pr_err("Client dmabuf is not initialized\n");
+			rc = -EINVAL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_err("Service %d does not exist\n",
+						lstnr);
+			rc = -ERESTARTSYS;
+			ptr_svc = NULL;
+			table = NULL;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		if (ptr_svc->abort == 1) {
+			pr_debug("Service %d abort %d\n",
+						lstnr, ptr_svc->abort);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+			goto err_resp;
+		}
+
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+
+		/* block all signals */
+		/* unlock mutex btw waking listener and sleep-wait */
+		mutex_unlock(&listener_access_lock);
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_interruptible(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		/* lock mutex again after resp sent */
+		mutex_lock(&app_access_lock);
+		mutex_lock(&listener_access_lock);
+		ptr_svc->send_resp_flag = 0;
+		qseecom.send_resp_flag = 0;
+
+		/* restore signal mask */
+		if (data->abort || ptr_svc->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status  = QSEOS_RESULT_FAILURE;
+		} else {
+			status  = QSEOS_RESULT_SUCCESS;
+		}
+err_resp:
+		if (ptr_svc)
+			table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			if (table) {
+				send_data_rsp.sglistinfo_ptr =
+					(uint32_t)virt_to_phys(table);
+				send_data_rsp.sglistinfo_len =
+						SGLISTINFO_TABLE_SIZE;
+				qtee_shmbridge_flush_shm_buf(
+						&ptr_svc->sglistinfo_shm);
+			}
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			if (table) {
+				send_data_rsp_64bit.sglistinfo_ptr =
+					virt_to_phys(table);
+				send_data_rsp_64bit.sglistinfo_len =
+					SGLISTINFO_TABLE_SIZE;
+				qtee_shmbridge_flush_shm_buf(
+						&ptr_svc->sglistinfo_shm);
+			}
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (!qseecom.whitelist_support || table == NULL)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+
+		if (lstnr == RPMB_SERVICE) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				goto exit;
+		}
+
+		if (ptr_svc) {
+			ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
+						QSEECOM_CACHE_CLEAN);
+			if (ret)
+				goto exit;
+
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			ptr_svc->listener_in_use = false;
+			__qseecom_clean_listener_sglistinfo(ptr_svc);
+			wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+		} else {
+			ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+			if (ret) {
+				pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+				goto exit;
+			}
+		}
+
+		switch (resp->result) {
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+					lstnr, data->client.app_id, resp->data);
+			if (lstnr == resp->data) {
+				pr_err("lstnr %d should not be blocked!\n",
+					lstnr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			mutex_unlock(&listener_access_lock);
+			ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, NULL, data);
+			mutex_lock(&listener_access_lock);
+			if (ret) {
+				pr_err("failed to process App(%d) %s blocked on listener %d\n",
+					data->client.app_id,
+					data->client.app_name, resp->data);
+				goto exit;
+			}
+			fallthrough;
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_INCOMPLETE:
+			break;
+		case QSEOS_RESULT_CBACK_REQUEST:
+			pr_warn("get cback req app_id = %d, resp->data = %d\n",
+				data->client.app_id, resp->data);
+			resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED;
+			break;
+		default:
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+			goto exit;
+		}
+exit:
+		mutex_unlock(&listener_access_lock);
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+	if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+		qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+		IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+		/* thread sleep until this app unblocked */
+		while (qseecom.app_block_ref_cnt > 0) {
+			mutex_unlock(&app_access_lock);
+			wait_event_interruptible(qseecom.app_block_wq,
+				(!qseecom.app_block_ref_cnt));
+			mutex_lock(&app_access_lock);
+		}
+	}
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+			struct qseecom_registered_app_list *ptr_app)
+{
+	if (qseecom.qsee_reentrancy_support) {
+		ptr_app->check_block++;
+		while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+			/* thread sleep until this app unblocked */
+			mutex_unlock(&app_access_lock);
+			wait_event_interruptible(qseecom.app_block_wq,
+				(!ptr_app->app_blocked &&
+				qseecom.app_block_ref_cnt <= 1));
+			mutex_lock(&app_access_lock);
+		}
+		ptr_app->check_block--;
+	}
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+					uint32_t *app_id)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+	bool found_app = false;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+
+	if (!app_id) {
+		pr_err("Null pointer to app_id\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+
+	/* check if app exists and has been registered locally */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_app_list_head, list) {
+		if (!strcmp(entry->app_name, req.app_name)) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (found_app) {
+		pr_debug("Found app with id %d\n", entry->app_id);
+		*app_id = entry->app_id;
+		return 0;
+	}
+
+	memset((void *)&resp, 0, sizeof(resp));
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+
+	switch (resp.resp_type) {
+	/*qsee returned listener type response */
+	case QSEOS_LISTENER_ID:
+		pr_err("resp type is of listener type instead of app\n");
+		return -EINVAL;
+	case QSEOS_APP_ID:
+		*app_id = resp.data;
+		return 0;
+	default:
+		pr_err("invalid resp type (%d) from qsee\n",
+				resp.resp_type);
+		return -ENODEV;
+	}
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret = 0;
+	phys_addr_t pa = 0;
+	void *vaddr = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct dma_buf *dmabuf = NULL;
+	struct sg_table *sgt = NULL;
+
+	size_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	bool first_time = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!(qseecom.commonlib_loaded ||
+				qseecom.commonlib_loaded_by_hostvm) &&
+				load_img_req.app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!(qseecom.commonlib64_loaded ||
+				qseecom.commonlib_loaded_by_hostvm) &&
+				load_img_req.app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret)
+			return ret;
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret)
+		goto enable_clk_err;
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret < 0)
+		goto checkapp_err;
+
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, req.app_name);
+					ret = -EINVAL;
+					goto loadapp_err;
+				}
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+			&qseecom.registered_app_list_lock, flags);
+		ret = 0;
+	} else {
+		first_time = true;
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+			(char *)(load_img_req.img_name));
+
+		ret = qseecom_vaddr_map(load_img_req.ifd_data_fd,
+				&pa, &vaddr, &sgt, &attach, &len, &dmabuf);
+		if (ret) {
+			pr_err("Ion client could not retrieve the handle\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+
+		if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+			pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+					len, load_img_req.mdt_len,
+					load_img_req.img_len);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		/* Populate the structure for sending scm call to load image */
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req.mdt_len = load_img_req.mdt_len;
+			load_req.img_len = load_img_req.img_len;
+			strlcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req.phy_addr = (uint32_t)pa;
+			cmd_buf = (void *)&load_req;
+			cmd_len = sizeof(struct qseecom_load_app_ireq);
+		} else {
+			load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req_64bit.mdt_len = load_img_req.mdt_len;
+			load_req_64bit.img_len = load_img_req.img_len;
+			strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req_64bit.phy_addr = (uint64_t)pa;
+			cmd_buf = (void *)&load_req_64bit;
+			cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+		}
+
+		ret = qseecom_dmabuf_cache_operations(dmabuf,
+						QSEECOM_CACHE_CLEAN);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+			cmd_len, &resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		ret = qseecom_dmabuf_cache_operations(dmabuf,
+						QSEECOM_CACHE_INVALIDATE);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		do {
+			if (resp.result == QSEOS_RESULT_FAILURE) {
+				pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+				ret = -EFAULT;
+				goto loadapp_err;
+			}
+
+			if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+				ret = __qseecom_process_incomplete_cmd(data, &resp);
+				if (ret) {
+					/* TZ has created app_id, need to unload it */
+					pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+						ret, resp.result, resp.data,
+						load_img_req.img_name);
+					__qseecom_unload_app(data, resp.data);
+					ret = -EFAULT;
+					goto loadapp_err;
+				}
+			}
+
+			if (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
+				pr_err("load app blocked on listener\n");
+				data->client.app_id = resp.result;
+				data->client.from_loadapp = true;
+				ret = __qseecom_process_reentrancy_blocked_on_listener(&resp,
+					NULL, data);
+				if (ret) {
+					pr_err("load app fail proc block on listener,ret :%d\n",
+						ret);
+					ret = -EFAULT;
+					goto loadapp_err;
+				}
+			}
+
+		} while ((resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) ||
+			(resp.result == QSEOS_RESULT_INCOMPLETE));
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+				resp.result);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		entry->app_arch = load_img_req.app_arch;
+		/*
+		 * keymaster app may be first loaded as "keymaste" by qseecomd,
+		 * and then used as "keymaster" on some targets. To avoid app
+		 * name checking error, register "keymaster" into app_list and
+		 * thread private data.
+		 */
+		if (!strcmp(load_img_req.img_name, "keymaste"))
+			strlcpy(entry->app_name, "keymaster",
+					MAX_APP_NAME_SIZE);
+		else
+			strlcpy(entry->app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		entry->check_block = 0;
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+
+		pr_warn("App with id %u (%s) now loaded\n", app_id,
+		(char *)(load_img_req.img_name));
+	}
+	data->client.app_id = app_id;
+	data->client.app_arch = load_img_req.app_arch;
+	if (!strcmp(load_img_req.img_name, "keymaste"))
+		strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+	else
+		strlcpy(data->client.app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+		if (first_time) {
+			spin_lock_irqsave(
+				&qseecom.registered_app_list_lock, flags);
+			list_del(&entry->list);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			kfree_sensitive(entry);
+		}
+	}
+
+loadapp_err:
+	if (dmabuf) {
+		qseecom_vaddr_unmap(vaddr, sgt, attach, dmabuf);
+		MAKE_NULL(sgt, attach, dmabuf);
+	}
+checkapp_err:
+	__qseecom_disable_clk_scale_down(data);
+enable_clk_err:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+	return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	int ret = 0;	/* Set unload app */
+
+	wake_up_all(&qseecom.send_resp_wq);
+	if (qseecom.qsee_reentrancy_support)
+		mutex_unlock(&app_access_lock);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_interruptible(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	if (qseecom.qsee_reentrancy_support)
+		mutex_lock(&app_access_lock);
+	return ret;
+}
+
+static int __qseecom_unload_app(struct qseecom_dev_handle *data,
+				uint32_t app_id)
+{
+	struct qseecom_unload_app_ireq req;
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	/* Populate the structure for sending scm call to load image */
+	req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+	req.app_id = app_id;
+	/* SCM_CALL to unload the app */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload app (id = %d) failed ret: %d\n",
+			app_id, ret);
+		return ret;
+	}
+
+	do {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			pr_warn("App (%d) is unloaded\n", app_id);
+			break;
+		case QSEOS_RESULT_INCOMPLETE:
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret)
+				pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
+					app_id, ret, resp.result, resp.data);
+			else
+				pr_warn("App (%d) is unloaded\n", app_id);
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("app (%d) unload_failed!!\n", app_id);
+			ret = -EFAULT;
+			break;
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_err("unload app (%d) blocked on listener\n", app_id);
+			ret = __qseecom_process_reentrancy_blocked_on_listener(&resp, NULL, data);
+			if (ret) {
+				pr_err("unload app fail proc block on listener cmd,ret :%d\n",
+					ret);
+				ret = -EFAULT;
+			}
+			break;
+		default:
+			pr_err("unload app %d get unknown resp.result %d\n",
+					app_id, resp.result);
+			ret = -EFAULT;
+			break;
+		}
+	} while ((resp.result == QSEOS_RESULT_INCOMPLETE) ||
+			(resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER));
+	return ret;
+}
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+				bool app_crash)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qseecom_registered_app_list *ptr_app = NULL;
+	bool found_app = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id,
+			data->client.app_name, app_crash);
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_debug("Do not unload keymaster app from tz\n");
+		goto unload_exit;
+	}
+
+	ret = __qseecom_cleanup_app(data);
+	if (ret && !app_crash) {
+		pr_err("cleanup app failed, pending ioctl:%d\n", data->ioctl_count.counter);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+	/* ignore app_id 0, it happens when close qseecom_fd if load app fail*/
+	if (!data->client.app_id)
+		goto unload_exit;
+
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+								list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			(!strcmp(ptr_app->app_name, data->client.app_name))) {
+			pr_debug("unload app %d (%s), ref_cnt %d\n",
+				ptr_app->app_id, ptr_app->app_name,
+				ptr_app->ref_cnt);
+			ptr_app->ref_cnt--;
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+							flags);
+	if (!found_app) {
+		pr_err("Cannot find app with id = %d (%s)\n",
+			data->client.app_id, data->client.app_name);
+		ret = -EINVAL;
+		goto unload_exit;
+	}
+
+	if (!ptr_app->ref_cnt) {
+		ret = __qseecom_unload_app(data, data->client.app_id);
+		if (ret == -EBUSY) {
+			/*
+			 * If unload failed due to EBUSY, don't free mem
+			 * just restore app ref_cnt and return -EBUSY
+			 */
+			pr_warn("unload ta %d(%s) EBUSY\n",
+				data->client.app_id, data->client.app_name);
+			ptr_app->ref_cnt++;
+			return ret;
+		}
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_del(&ptr_app->list);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+		kfree_sensitive(ptr_app);
+	}
+
+unload_exit:
+	if (data->client.dmabuf) {
+		qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
+			data->client.attach, data->client.dmabuf);
+		MAKE_NULL(data->client.sgt,
+			data->client.attach, data->client.dmabuf);
+	}
+	data->released = true;
+	return ret;
+}
+
+static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+
+	pr_debug("prepare to unload app(%d)(%s), pending %d\n",
+		data->client.app_id, data->client.app_name,
+		data->client.unload_pending);
+	if (data->client.unload_pending)
+		return 0;
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->data = data;
+	list_add_tail(&entry->list,
+		&qseecom.unload_app_pending_list_head);
+	data->client.unload_pending = true;
+	pr_debug("unload ta %d pending\n", data->client.app_id);
+	return 0;
+}
+
+static void __wakeup_unload_app_kthread(void)
+{
+	atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_WAKEUP);
+	wake_up_interruptible(&qseecom.unload_app_kthread_wq);
+}
+
+static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	bool found = false;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
+					list) {
+		if ((entry->data->client.app_id == app_id) &&
+			(!strcmp(entry->data->client.app_name, app_name))) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+	return found;
+}
+
+static void __qseecom_processing_pending_unload_app(void)
+{
+	struct qseecom_unload_app_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
+
+	mutex_lock(&unload_app_pending_list_lock);
+	while (!list_empty(&qseecom.unload_app_pending_list_head)) {
+		pos = qseecom.unload_app_pending_list_head.next;
+		entry = list_entry(pos,
+			struct qseecom_unload_app_pending_list, list);
+		if (entry && entry->data) {
+			pr_debug("process pending unload app %d (%s)\n",
+				entry->data->client.app_id,
+				entry->data->client.app_name);
+			mutex_unlock(&unload_app_pending_list_lock);
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(entry->data, true);
+			if (ret)
+				pr_err("unload app %d pending failed %d\n",
+					entry->data->client.app_id, ret);
+			mutex_unlock(&app_access_lock);
+			mutex_lock(&unload_app_pending_list_lock);
+			__qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
+			kfree_sensitive(entry->data);
+		}
+		list_del(pos);
+		kfree_sensitive(entry);
+	}
+	mutex_unlock(&unload_app_pending_list_lock);
+}
+
+static int __qseecom_unload_app_kthread_func(void *data)
+{
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			qseecom.unload_app_kthread_wq,
+			atomic_read(&qseecom.unload_app_kthread_state)
+				== UNLOAD_APP_KT_WAKEUP);
+		pr_debug("kthread to unload app is called, state %d\n",
+			atomic_read(&qseecom.unload_app_kthread_state));
+		__qseecom_processing_pending_unload_app();
+		atomic_set(&qseecom.unload_app_kthread_state,
+				UNLOAD_APP_KT_SLEEP);
+	}
+	pr_warn("kthread to unload app stopped\n");
+	return 0;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return (uintptr_t)data->client.sb_virt +
+				(virt - data->client.user_virt_sb_base);
+}
+
+static int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	void *req_buf = NULL;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uintptr_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
+		return -EINVAL;
+	}
+
+	if (data_ptr->client.sb_length <
+			sizeof(struct qseecom_rpmb_provision_key)) {
+		pr_err("shared buffer is too small to hold key type\n");
+		return -EINVAL;
+	}
+	req_buf = data_ptr->client.sb_virt;
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->key_type =
+		((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	return ret;
+}
+
+static int __qseecom_process_fsm_key_svc_cmd(
+		struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_fsm_diag_req *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+	if (reqd_len_sb_in > data_ptr->client.sb_length) {
+		pr_err("Not enough memory to fit cmd_buf and resp_buf.\n");
+		pr_err("Required: %u, Available: %zu\n",
+				reqd_len_sb_in, data_ptr->client.sb_length);
+		return -ENOMEM;
+	}
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+	return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_svc_cmd_req *req)
+{
+	if (!req || !req->resp_buf || !req->cmd_req_buf) {
+		pr_err("req or cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_client_send_service_ireq send_svc_ireq;
+	struct qseecom_client_send_fsm_diag_req send_fsm_diag_svc_ireq;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_send_svc_cmd_req req;
+	void   *send_req_ptr;
+	size_t req_buf_size;
+
+	/*struct qseecom_command_scm_resp resp;*/
+
+	if (copy_from_user(&req,
+				(void __user *)argp,
+				sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	if (__validate_send_service_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	data->type = QSEECOM_SECURE_SERVICE;
+
+	switch (req.cmd_id) {
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+		send_req_ptr = &send_svc_ireq;
+		req_buf_size = sizeof(send_svc_ireq);
+		if (__qseecom_process_rpmb_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	case QSEOS_FSM_LTEOTA_REQ_CMD:
+	case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+	case QSEOS_FSM_IKE_REQ_CMD:
+	case QSEOS_FSM_IKE_REQ_RSP_CMD:
+	case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+	case QSEOS_FSM_OEM_FUSE_READ_ROW:
+	case QSEOS_FSM_ENCFS_REQ_CMD:
+	case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+	case QSEOS_DIAG_FUSE_REQ_CMD:
+	case QSEOS_DIAG_FUSE_REQ_RSP_CMD:
+
+		send_req_ptr = &send_fsm_diag_svc_ireq;
+		req_buf_size = sizeof(send_fsm_diag_svc_ireq);
+		if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+		return -EINVAL;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+		if (ret) {
+			pr_err("Fail to set bw HIGH\n");
+			return ret;
+		}
+	} else {
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clocks with err %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				(const void *)send_req_ptr,
+				req_buf_size, &resp, sizeof(resp));
+
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		goto exit;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_debug("qseos_result_incomplete\n");
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd fail with result: %d\n",
+				resp.result);
+		}
+		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+			pr_warn("RPMB key status is 0x%x\n", resp.result);
+			if (put_user(resp.result,
+				(uint32_t __user *)req.resp_buf)) {
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = 0;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with resp.result: %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	default:
+		pr_err("Response result %d not supported\n",
+				resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit:
+	if (!qseecom.support_bus_scaling) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	} else {
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	}
+	return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+
+{
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+	if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+						(req->cmd_req_buf == NULL)) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	switch (resp->result) {
+	case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+		pr_warn("App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name,
+			resp->data);
+		ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, ptr_app, data);
+		if (ret) {
+			pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name, resp->data);
+			return ret;
+		}
+		fallthrough;
+		/* fall through to process incomplete request */
+	case QSEOS_RESULT_INCOMPLETE:
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+		wake_up_interruptible_all(&qseecom.app_block_wq);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+		return ret;
+	case QSEOS_RESULT_SUCCESS:
+		return ret;
+	default:
+		pr_err("Response result %d not supported\n",
+						resp->result);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+			struct qseecom_send_cmd_req *req,
+			bool is_phys_adr)
+{
+	int ret = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req = {0};
+	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		send_data_req.app_id = data->client.app_id;
+
+		if (!is_phys_adr) {
+			send_data_req.req_ptr =
+				(uint32_t)(__qseecom_uvirt_to_kphys
+				(data, (uintptr_t)req->cmd_req_buf));
+			send_data_req.rsp_ptr =
+				(uint32_t)(__qseecom_uvirt_to_kphys(
+				data, (uintptr_t)req->resp_buf));
+		} else {
+			send_data_req.req_ptr = (uint32_t)(uintptr_t)req->cmd_req_buf;
+			send_data_req.rsp_ptr = (uint32_t)(uintptr_t)req->resp_buf;
+		}
+
+		send_data_req.req_len = req->cmd_req_len;
+		send_data_req.rsp_len = req->resp_len;
+		send_data_req.sglistinfo_ptr =
+				(uint32_t)data->sglistinfo_shm.paddr;
+		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+		cmd_buf = (void *)&send_data_req;
+		cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+	} else {
+		send_data_req_64bit.app_id = data->client.app_id;
+
+		if (!is_phys_adr) {
+			send_data_req_64bit.req_ptr =
+				 __qseecom_uvirt_to_kphys(data,
+				(uintptr_t)req->cmd_req_buf);
+			send_data_req_64bit.rsp_ptr =
+				__qseecom_uvirt_to_kphys(data,
+				(uintptr_t)req->resp_buf);
+		} else {
+			send_data_req_64bit.req_ptr =
+				(uintptr_t)req->cmd_req_buf;
+			send_data_req_64bit.rsp_ptr =
+				(uintptr_t)req->resp_buf;
+		}
+		send_data_req_64bit.req_len = req->cmd_req_len;
+		send_data_req_64bit.rsp_len = req->resp_len;
+		/* check if 32bit app's phys_addr region is under 4GB.*/
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((send_data_req_64bit.req_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+			(send_data_req_64bit.rsp_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+			pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+				data->client.app_name,
+				send_data_req_64bit.req_ptr,
+				send_data_req_64bit.req_len,
+				send_data_req_64bit.rsp_ptr,
+				send_data_req_64bit.rsp_len);
+			return -EFAULT;
+		}
+		send_data_req_64bit.sglistinfo_ptr =
+				(uint64_t)data->sglistinfo_shm.paddr;
+		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+		cmd_buf = (void *)&send_data_req_64bit;
+		cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+	}
+
+	if (!qseecom.whitelist_support || data->use_legacy_cmd)
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+	if (data->client.dmabuf) {
+		ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+
+	if (data->client.dmabuf) {
+		ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto exit;
+		}
+	}
+exit:
+	return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	ret = __qseecom_send_cmd(data, &req, false);
+
+	return ret;
+}
+
+static int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+			struct qseecom_send_modfd_listener_resp *lstnr_resp,
+			struct qseecom_dev_handle *data, int i, size_t size)
+{
+	char *curr_field = NULL;
+	char *temp_field = NULL;
+	int j = 0;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+		if ((req->cmd_req_len < size) ||
+			(req->ifd_data[i].cmd_buf_offset >
+			req->cmd_req_len - size)) {
+			pr_err("Invalid offset (req len) 0x%x\n",
+				req->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+
+		curr_field = (char *) (req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset);
+		for (j = 0; j < MAX_ION_FD; j++) {
+			if ((req->ifd_data[j].fd > 0) && i != j) {
+				temp_field = (char *) (req->cmd_req_buf +
+						req->ifd_data[j].cmd_buf_offset);
+				if (temp_field >= curr_field && temp_field <
+					(curr_field + size)) {
+					pr_err("Invalid field offset 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					return -EINVAL;
+				}
+			}
+		}
+	} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+		if ((lstnr_resp->resp_len < size) ||
+			(lstnr_resp->ifd_data[i].cmd_buf_offset >
+			lstnr_resp->resp_len - size)) {
+			pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+				lstnr_resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+
+		curr_field = (char *) (lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset);
+		for (j = 0; j < MAX_ION_FD; j++) {
+			if ((lstnr_resp->ifd_data[j].fd > 0) && i != j) {
+				temp_field = (char *) lstnr_resp->resp_buf_ptr +
+						lstnr_resp->ifd_data[j].cmd_buf_offset;
+				if (temp_field >= curr_field && temp_field <
+					(curr_field + size)) {
+					pr_err("Invalid lstnr field offset 0x%x\n",
+					lstnr_resp->ifd_data[i].cmd_buf_offset);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr = NULL;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ion_fd = req->ifd_data[i].fd;
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ion_fd = lstnr_resp->ifd_data[i].fd;
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_err("Num of scattered entries\n");
+			pr_err(" (%d) is greater than max supported %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint32_t *update;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint32_t)))
+				goto err;
+
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				/*
+				 * Check if sg list phy add region is under 4GB
+				 */
+				if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+					(!cleanup) &&
+					((uint64_t)sg_dma_address(sg_ptr->sgl)
+					>= PHY_ADDR_4G - sg->length)) {
+					pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+						data->client.app_name,
+						&(sg_dma_address(sg_ptr->sgl)),
+						sg->length);
+					goto err;
+				}
+				update = (uint32_t *) field;
+				*update = cleanup ? 0 :
+					(uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+				goto err;
+			}
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry *update;
+			int j = 0;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i,
+				(SG_ENTRY_SZ * sg_ptr->nents)))
+				goto err;
+
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				update = (struct qseecom_sg_entry *)field;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					/*
+					 * Check if sg list PA is under 4GB
+					 */
+					if ((qseecom.qsee_version >=
+						QSEE_VERSION_40) &&
+						(!cleanup) &&
+						((uint64_t)(sg_dma_address(sg))
+						>= PHY_ADDR_4G - sg->length)) {
+						pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+							data->client.app_name,
+							&(sg_dma_address(sg)),
+							sg->length);
+						goto err;
+					}
+					update->phys_addr = cleanup ? 0 :
+						(uint32_t)sg_dma_address(sg);
+					update->len = cleanup ? 0 : sg->length;
+					update++;
+					len += sg->length;
+					sg = sg_next(sg);
+				}
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+					goto err;
+			}
+		}
+
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the kbuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(sg_ptr)) {
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		MAKE_NULL(sg_ptr, attach, dmabuf);
+	}
+	return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+		char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry_64bit *sg_entry;
+	struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+	memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+	/* Allocate a contiguous kernel buffer */
+	size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	/* update qseecom_sg_list_buf_hdr_64bit */
+	buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+	buf_hdr->new_buf_phys_addr = coh_pmem;
+	buf_hdr->nents_total = sg_ptr->nents;
+	/* save the left sg entries into new allocated buf */
+	sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ion_fd = req->ifd_data[i].fd;
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ion_fd = lstnr_resp->ifd_data[i].fd;
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_warn("Num of scattered entries\n");
+			pr_warn(" (%d) is greater than %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			if (cleanup) {
+				if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+					data->client.sec_buf_fd[i].vbase)
+					dma_free_coherent(qseecom.dev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			} else {
+				ret = __qseecom_allocate_sg_list_buffer(data,
+						field, i, sg_ptr);
+				if (ret) {
+					pr_err("Failed to allocate sg list buffer\n");
+					goto err;
+				}
+			}
+			len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+			sg = sg_ptr->sgl;
+			goto cleanup;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint64_t *update_64bit;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint64_t)))
+				goto err;
+
+				/* 64bit app uses 64bit address */
+			update_64bit = (uint64_t *) field;
+			*update_64bit = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg_ptr->sgl);
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry_64bit *update_64bit;
+			int j = 0;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i,
+				(SG_ENTRY_SZ_64BIT * sg_ptr->nents)))
+				goto err;
+			/* 64bit app uses 64bit address */
+			update_64bit = (struct qseecom_sg_entry_64bit *)field;
+			for (j = 0; j < sg_ptr->nents; j++) {
+				update_64bit->phys_addr = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg);
+				update_64bit->len = cleanup ? 0 :
+						(uint32_t)sg->length;
+				update_64bit++;
+				len += sg->length;
+				sg = sg_next(sg);
+			}
+		}
+cleanup:
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* unmap the dmabuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	for (i = 0; i < MAX_ION_FD; i++)
+		if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			data->client.sec_buf_fd[i].vbase)
+			dma_free_coherent(qseecom.dev,
+				data->client.sec_buf_fd[i].size,
+				data->client.sec_buf_fd[i].vbase,
+				data->client.sec_buf_fd[i].pbase);
+	if (!IS_ERR_OR_NULL(sg_ptr)) {
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		MAKE_NULL(sg_ptr, attach, dmabuf);
+	}
+	return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp,
+					bool is_64bit_addr)
+{
+	int ret = 0;
+	int i;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+	void *origin_req_buf_kvirt, *origin_rsp_buf_kvirt;
+	phys_addr_t pa;
+	u8 *va = NULL;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	if (__validate_send_cmd_inputs(data, &send_cmd_req))
+		return -EINVAL;
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, req.ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	/*Back up original address */
+	origin_req_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
+				(uintptr_t)req.cmd_req_buf);
+	origin_rsp_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
+				(uintptr_t)req.resp_buf);
+
+	/* Allocate kernel buffer for request and response*/
+	ret = __qseecom_alloc_coherent_buf(req.cmd_req_len + req.resp_len,
+					&va, &pa);
+	if (ret) {
+		pr_err("Failed to allocate coherent buf, ret %d\n", ret);
+		return ret;
+	}
+
+	req.cmd_req_buf = va;
+	send_cmd_req.cmd_req_buf = (void *)pa;
+
+	req.resp_buf = va + req.cmd_req_len;
+	send_cmd_req.resp_buf = (void *)pa + req.cmd_req_len;
+
+	/* Copy the data to kernel request and response buffers*/
+	memcpy(req.cmd_req_buf, origin_req_buf_kvirt, req.cmd_req_len);
+	memcpy(req.resp_buf, origin_rsp_buf_kvirt, req.resp_len);
+
+	if (!is_64bit_addr) {
+		ret = __qseecom_update_cmd_buf(&req, false, data);
+		if (ret)
+			goto out;
+		ret = __qseecom_send_cmd(data, &send_cmd_req, true);
+		if (ret)
+			goto out;
+		ret = __qseecom_update_cmd_buf(&req, true, data);
+		if (ret)
+			goto out;
+	} else {
+		ret = __qseecom_update_cmd_buf_64(&req, false, data);
+		if (ret)
+			goto out;
+		ret = __qseecom_send_cmd(data, &send_cmd_req, true);
+		if (ret)
+			goto out;
+		ret = __qseecom_update_cmd_buf_64(&req, true, data);
+		if (ret)
+			goto out;
+	}
+
+	/*Copy the response back to the userspace buffer*/
+	memcpy(origin_rsp_buf_kvirt, req.resp_buf, req.resp_len);
+	memcpy(origin_req_buf_kvirt, req.cmd_req_buf, req.cmd_req_len);
+
+out:
+	if (req.cmd_req_buf)
+		__qseecom_free_coherent_buf(req.cmd_req_len + req.resp_len,
+			req.cmd_req_buf, (phys_addr_t)send_cmd_req.cmd_req_buf);
+
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+
+	ret = (svc->rcv_req_flag == 1);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	mutex_lock(&listener_access_lock);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (!this_lstnr) {
+		pr_err("Invalid listener ID\n");
+		mutex_unlock(&listener_access_lock);
+		return -ENODATA;
+	}
+	mutex_unlock(&listener_access_lock);
+
+	while (1) {
+		if (wait_event_interruptible(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting Listener Service = %d\n",
+					(uint32_t)data->listener.id);
+			return -ENODEV;
+		}
+		mutex_lock(&listener_access_lock);
+		this_lstnr->rcv_req_flag = 0;
+		mutex_unlock(&listener_access_lock);
+		break;
+	}
+	return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+	unsigned char app_arch = 0;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+	switch (app_arch) {
+	case ELFCLASS32: {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr)) {
+			pr_err("%s: Not big enough to be an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+		    sizeof(struct elf32_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	case ELFCLASS64: {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr64)) {
+			pr_err("%s: Not big enough to be an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr64->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+		    sizeof(struct elf64_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	default: {
+		pr_err("QSEE app arch %u is not supported\n", app_arch);
+		return false;
+	}
+	}
+	return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+					uint32_t *app_arch)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = firmware_request_nowarn(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		pr_err("error with firmware_request_nowarn, rc = %d\n", rc);
+		ret = -EIO;
+		goto err;
+	}
+	if (!__qseecom_is_fw_image_valid(fw_entry)) {
+		ret = -EIO;
+		goto err;
+	}
+	*app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	*fw_size = fw_entry->size;
+	if (*app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (*app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, *app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		memset(fw_name, 0, sizeof(fw_name));
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
+		if (ret)
+			goto err;
+		if (*fw_size > U32_MAX - fw_entry->size) {
+			pr_err("QSEE %s app file size overflow\n", appname);
+			ret = -EINVAL;
+			goto err;
+		}
+		*fw_size += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+
+	return ret;
+err:
+	if (fw_entry)
+		release_firmware(fw_entry);
+	*fw_size = 0;
+	return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+				uint32_t fw_size,
+				struct qseecom_load_app_ireq *load_req)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	u8 *img_data_ptr = img_data;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+	unsigned char app_arch = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = firmware_request_nowarn(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	load_req->img_len = fw_entry->size;
+	if (load_req->img_len > fw_size) {
+		pr_err("app %s size %zu is larger than buf size %u\n",
+			appname, fw_entry->size, fw_size);
+		ret = -EINVAL;
+		goto err;
+	}
+	memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+	img_data_ptr = img_data_ptr + fw_entry->size;
+	load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	if (app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
+		if (ret) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			goto err;
+		}
+		if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+			(fw_entry->size + load_req->img_len > fw_size)) {
+			pr_err("Invalid file size for %s\n", fw_name);
+			ret = -EINVAL;
+			goto err;
+		}
+		memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+		img_data_ptr = img_data_ptr + fw_entry->size;
+		load_req->img_len += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	return ret;
+err:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int __qseecom_alloc_coherent_buf(
+			uint32_t size, u8 **vaddr, phys_addr_t *paddr)
+{
+	dma_addr_t coh_pmem;
+	void *buf = NULL;
+
+	/* Allocate a contiguous kernel buffer */
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	*vaddr = buf;
+	*paddr = coh_pmem;
+	return 0;
+}
+
+static void __qseecom_free_coherent_buf(uint32_t size,
+				u8 *vaddr, phys_addr_t paddr)
+{
+	if (!vaddr)
+		return;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	dma_free_coherent(qseecom.dev, size, vaddr, paddr);
+}
+
+
+#if IS_ENABLED(CONFIG_QSEECOM)
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+				uint32_t *app_id)
+{
+	int ret = -1;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!data || !appname || !app_id) {
+		pr_err("Null pointer to data or appname or appid\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+	if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+		return -EIO;
+	data->client.app_arch = app_arch;
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!(qseecom.commonlib_loaded ||
+				qseecom.commonlib_loaded_by_hostvm) &&
+				app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!(qseecom.commonlib64_loaded ||
+				qseecom.commonlib_loaded_by_hostvm) &&
+				app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
+	if (ret)
+		return ret;
+
+	ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+
+	/* Populate the load_req parameters */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_req.mdt_len;
+		load_req.img_len = load_req.img_len;
+		strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		load_req_64bit.img_len = load_req.img_len;
+		strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("incomp_cmd err %d, %d, unload %d %s\n",
+				ret, resp.result, resp.data, appname);
+			__qseecom_unload_app(data, resp.data);
+			ret = -EFAULT;
+		} else {
+			*app_id = resp.data;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	if (img_data)
+		__qseecom_free_coherent_buf(fw_size, img_data, pa);
+	return ret;
+}
+#endif
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name)
+{
+	int ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!cmnlib_name) {
+		pr_err("cmnlib_name is NULL\n");
+		return -EINVAL;
+	}
+	if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+			cmnlib_name, strlen(cmnlib_name));
+		return -EINVAL;
+	}
+
+	if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+		return -EIO;
+
+	ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
+	if (ret)
+		return -EIO;
+
+	ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.phy_addr = (uint32_t)pa;
+		load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+	} else {
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		load_req_64bit.img_len = load_req.img_len;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+							&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed w/response result%d\n", resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	case  QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			goto exit_disable_clk_vote;
+		}
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n",	resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	if (img_data)
+		__qseecom_free_coherent_buf(fw_size, img_data, pa);
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_QSEECOM)
+static int __qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size)
+{
+	int32_t ret = 0;
+	unsigned long flags = 0;
+	struct qseecom_dev_handle *data = NULL;
+	struct qseecom_check_app_ireq app_ireq;
+	struct qseecom_registered_app_list *entry = NULL;
+	struct qseecom_registered_kclient_list *kclient_entry = NULL;
+	bool found_app = false;
+	phys_addr_t pa = 0;
+	u8 *va = NULL;
+	uint32_t fw_size, app_arch;
+	uint32_t app_id = 0;
+
+	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+	if (!app_name) {
+		pr_err("failed to get the app name\n");
+		return -EINVAL;
+	}
+
+	if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
+		return -EINVAL;
+	}
+
+	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+	if (!(*handle))
+		return -ENOMEM;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		kfree(*handle);
+		*handle = NULL;
+		return -ENOMEM;
+	}
+
+	mutex_lock(&app_access_lock);
+
+	data->abort = 0;
+	data->type = QSEECOM_CLIENT_APP;
+	data->released = false;
+	data->client.sb_length = size;
+	data->client.user_virt_sb_base = 0;
+	data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&data->sglistinfo_shm.paddr,
+				&data->sglistinfo_shm);
+	if (!data->sglistinfo_ptr) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	init_waitqueue_head(&data->abort_wq);
+
+	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+	ret = __qseecom_check_app_exists(app_ireq, &app_id);
+	if (ret)
+		goto err;
+
+	strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+	if (app_id) {
+		pr_warn("App id %d for [%s] app exists\n", app_id,
+			(char *)app_ireq.app_name);
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, app_ireq.app_name);
+					ret = -EINVAL;
+					goto err;
+				}
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		if (!found_app)
+			pr_warn("App_id %d [%s] was loaded but not registered\n",
+					ret, (char *)app_ireq.app_name);
+	} else {
+		/* load the app and get the app_id  */
+		pr_debug("%s: Loading app for the first time'\n",
+				qseecom.pdev->init_name);
+		ret = __qseecom_load_fw(data, app_name, &app_id);
+		if (ret < 0)
+			goto err;
+	}
+	data->client.app_id = app_id;
+	if (!found_app) {
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret =  -ENOMEM;
+			goto err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+			ret = -EIO;
+			kfree(entry);
+			goto err;
+		}
+		entry->app_arch = app_arch;
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		entry->check_block = 0;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+	}
+
+	/* Get the physical address of the req/resp buffer */
+	ret = __qseecom_alloc_coherent_buf(size, &va, &pa);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		goto err;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = va;
+	data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+	data->client.sb_phys = (phys_addr_t)pa;
+	(*handle)->dev = (void *)data;
+	(*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+	(*handle)->sbuf_len = data->client.sb_length;
+
+	kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+	if (!kclient_entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	kclient_entry->handle = *handle;
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_add_tail(&kclient_entry->list,
+			&qseecom.registered_kclient_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
+	return 0;
+
+err:
+	__qseecom_free_coherent_buf(size, va, pa);
+	__qseecom_free_tzbuf(&data->sglistinfo_shm);
+	kfree(data);
+	kfree(*handle);
+	*handle = NULL;
+	mutex_unlock(&app_access_lock);
+	__wakeup_unload_app_kthread();
+	return ret;
+}
+
+static int __qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+	int ret = -EINVAL;
+	struct qseecom_dev_handle *data;
+
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	bool found_handle = false;
+
+	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = (struct qseecom_dev_handle *) ((*handle)->dev);
+	mutex_lock(&app_access_lock);
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+				list) {
+		if (kclient->handle == (*handle)) {
+			list_del(&kclient->list);
+			found_handle = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+	if (!found_handle)
+		pr_err("Unable to find the handle, exiting\n");
+	else
+		ret = qseecom_unload_app(data, false);
+
+	mutex_unlock(&app_access_lock);
+	if (ret == 0) {
+		if (data->client.sb_virt)
+			__qseecom_free_coherent_buf(data->client.sb_length,
+				data->client.sb_virt, data->client.sb_phys);
+		__qseecom_free_tzbuf(&data->sglistinfo_shm);
+		kfree_sensitive(data);
+		kfree_sensitive(*handle);
+		kfree_sensitive(kclient);
+		*handle = NULL;
+	}
+	__wakeup_unload_app_kthread();
+	return ret;
+}
+
+static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req = {NULL, 0, NULL, 0};
+	struct qseecom_dev_handle *data;
+	bool perf_enabled = false;
+
+	__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if (handle == NULL) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = handle->dev;
+
+	req.cmd_req_len = sbuf_len;
+	req.resp_len = rbuf_len;
+	req.cmd_req_buf = send_buf;
+	req.resp_buf = resp_buf;
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
+	}
+	/*
+	 * On targets where crypto clock is handled by HLOS,
+	 * if clk_access_cnt is zero and perf_enabled is false,
+	 * then the crypto clock was not enabled before sending cmd
+	 * to tz, qseecom will enable the clock to avoid service failure.
+	 */
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled!\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+						ret);
+			mutex_unlock(&app_access_lock);
+			return -EINVAL;
+		}
+		perf_enabled = true;
+	}
+	if (!strcmp(data->client.app_name, "securemm"))
+		data->use_legacy_cmd = true;
+
+	ret = __qseecom_send_cmd(data, &req, false);
+
+	data->use_legacy_cmd = false;
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+
+	mutex_unlock(&app_access_lock);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+
+#if IS_ENABLED(CONFIG_QSEECOM)
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+const static struct qseecom_drv_ops qseecom_driver_ops = {
+       .qseecom_send_command = __qseecom_send_command,
+       .qseecom_start_app = __qseecom_start_app,
+       .qseecom_shutdown_app = __qseecom_shutdown_app,
+};
+
+int get_qseecom_kernel_fun_ops(void)
+{
+    return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
+}
+
+#else
+
+int qseecom_start_app(struct qseecom_handle **handle,
+                    char *app_name, uint32_t size)
+{
+    return __qseecom_start_app(handle, app_name, size);
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+    return __qseecom_shutdown_app(handle);
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+            uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+    return __qseecom_send_command(handle, send_buf, sbuf_len,
+                        resp_buf, rbuf_len);
+}
+EXPORT_SYMBOL(qseecom_send_command);
+#endif
+#endif
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	int ret = 0;
+
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high) {
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(handle->dev,
+									HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(handle->dev);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		}
+	} else {
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(uint32_t *result,
+		u64 *response_type, unsigned int *data)
+{
+	struct qseecom_registered_app_list dummy_app_entry;
+	struct qseecom_dev_handle dummy_private_data = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	if (!result || !response_type || !data) {
+		pr_err("input parameter NULL\n");
+		return -EINVAL;
+	}
+
+	memset((void *)&dummy_app_entry, 0, sizeof(dummy_app_entry));
+	/*
+	 * smcinvoke expects result in scm call resp.ret[1] and type in ret[0],
+	 * while qseecom expects result in ret[0] and type in ret[1].
+	 * To simplify API interface and code changes in smcinvoke, here
+	 * internally switch result and resp_type to let qseecom work with
+	 * smcinvoke and upstream scm driver protocol.
+	 */
+	resp.result = *response_type;
+	resp.resp_type = *result;
+	resp.data = *data;
+
+	dummy_private_data.client.app_id = *response_type;
+	dummy_private_data.client.from_smcinvoke = true;
+	dummy_app_entry.app_id = *response_type;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.qsee_reentrancy_support)
+		ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+					&dummy_private_data);
+	else
+		ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
+					&resp);
+	mutex_unlock(&app_access_lock);
+	if (ret)
+		pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
+			resp.result, resp.data, resp.resp_type, ret);
+	*result = resp.resp_type;
+	*response_type = resp.result;
+	*data = resp.data;
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+#endif
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+			struct qseecom_send_modfd_listener_resp *resp,
+			struct qseecom_registered_listener_list *this_lstnr)
+{
+	int i;
+
+	if (!data || !resp || !this_lstnr) {
+		pr_err("listener handle or resp msg is null\n");
+		return -EINVAL;
+	}
+
+	if (resp->resp_buf_ptr == NULL) {
+		pr_err("resp buffer is null\n");
+		return -EINVAL;
+	}
+	/* validate resp buf length */
+	if ((resp->resp_len == 0) ||
+			(resp->resp_len > this_lstnr->sb_length)) {
+		pr_err("resp buf length %d not valid\n", resp->resp_len);
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)this_lstnr->user_virt_sb_base >
+					(ULONG_MAX - this_lstnr->sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	/* validate resp buf */
+	if (((uintptr_t)resp->resp_buf_ptr <
+		(uintptr_t)this_lstnr->user_virt_sb_base) ||
+		((uintptr_t)resp->resp_buf_ptr >=
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+				this_lstnr->sb_length)) ||
+		(((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+						this_lstnr->sb_length))) {
+		pr_err("resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+				void __user *argp, bool is_64bit_addr)
+{
+	struct qseecom_send_modfd_listener_resp resp;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	if (copy_from_user(&resp, argp, sizeof(resp))) {
+		pr_err("copy_from_user failed\n");
+		return -EINVAL;
+	}
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+
+	if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+		return -EINVAL;
+
+	resp.resp_buf_ptr = this_lstnr->sb_virt +
+		(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+	if (!is_64bit_addr)
+		__qseecom_update_cmd_buf(&resp, false, data);
+	else
+		__qseecom_update_cmd_buf_64(&resp, false, data);
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct qseecom_clk *qclk = NULL;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	if (ce == CLK_CE_DRV)
+		qclk = &qseecom.ce_drv;
+
+	if (qclk == NULL) {
+		pr_err("CLK type not supported\n");
+		return -EINVAL;
+	}
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == ULONG_MAX) {
+		pr_err("clk_access_cnt beyond limitation\n");
+		goto err;
+	}
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt++;
+		mutex_unlock(&clk_access_lock);
+		return rc;
+	}
+
+	/* Enable CE core clk */
+	if (qclk->ce_core_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto err;
+		}
+	}
+	/* Enable CE clk */
+	if (qclk->ce_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto ce_clk_err;
+		}
+	}
+	/* Enable AXI clk */
+	if (qclk->ce_bus_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE bus clk\n");
+			goto ce_bus_clk_err;
+		}
+	}
+	qclk->clk_access_cnt++;
+	mutex_unlock(&clk_access_lock);
+	return 0;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk != NULL)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk != NULL)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == 0) {
+		mutex_unlock(&clk_access_lock);
+		return;
+	}
+
+	if (qclk->clk_access_cnt == 1) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+	qclk->clk_access_cnt--;
+	mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	qclk = &qseecom.qsee;
+	if (!qseecom.qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_bw_count) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = qseecom_bus_scale_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					qseecom_bus_scale_update_request(
+						qseecom.qsee_perf_client, 1);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count++;
+				data->perf_enabled = true;
+			}
+		} else {
+			qseecom.qsee_bw_count++;
+			data->perf_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_sfpb_bw_count) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = qseecom_bus_scale_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					qseecom_bus_scale_update_request(
+						qseecom.qsee_perf_client, 2);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count++;
+				data->fast_load_enabled = true;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count++;
+			data->fast_load_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int32_t ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		return;
+	if (!qseecom.qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_bw_count == 0) {
+			pr_err("Client error.Extra call to disable DFAB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+
+		if (qseecom.qsee_bw_count == 1) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = qseecom_bus_scale_update_request(
+					qseecom.qsee_perf_client, 2);
+			else {
+				ret = qseecom_bus_scale_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count--;
+				data->perf_enabled = false;
+			}
+		} else {
+			qseecom.qsee_bw_count--;
+			data->perf_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_sfpb_bw_count == 0) {
+			pr_err("Client error.Extra call to disable SFPB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+		if (qseecom.qsee_sfpb_bw_count == 1) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = qseecom_bus_scale_update_request(
+						qseecom.qsee_perf_client, 1);
+			else {
+				ret = qseecom_bus_scale_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count--;
+				data->fast_load_enabled = false;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count--;
+			data->fast_load_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_load_img_req load_img_req;
+	int uret = 0;
+	int ret = 0;
+	phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sg_table *sgt = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct dma_buf *dmabuf = NULL;
+	void *va = NULL;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, &pa, &va,
+					&sgt, &attach, &len, &dmabuf);
+	if (ret) {
+		pr_err("Failed to map vaddr for ion_fd %d\n",
+			load_img_req.ifd_data_fd);
+		return -ENOMEM;
+	}
+	if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+		pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+				len, load_img_req.mdt_len,
+				load_img_req.img_len);
+		ret = -EINVAL;
+		goto exit_cpu_restore;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req_64bit.mdt_len = load_img_req.mdt_len;
+		load_req_64bit.img_len = load_img_req.img_len;
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_cpu_restore;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_register_bus_bandwidth_needs;
+	}
+	ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+	/*  SCM_CALL to load the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto exit_disable_clock;
+	}
+
+	ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_err("%s: qseos result incomplete\n", __func__);
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("scm_call response result %d not supported\n",
+							resp.result);
+		ret = -EFAULT;
+		break;
+	}
+
+exit_disable_clock:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		uret = qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+		if (uret)
+			pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+								uret, ret);
+	}
+
+exit_cpu_restore:
+	if (dmabuf) {
+		qseecom_vaddr_unmap(va, sgt, attach, dmabuf);
+		MAKE_NULL(sgt, attach, dmabuf);
+	}
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+
+	/* unavailable client app */
+	data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL to unload the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+	return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int32_t ret = 0;
+	struct qseecom_qseos_app_load_query query_req = { {0} };
+	struct qseecom_check_app_ireq req;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	uint32_t app_arch = 0, app_id = 0;
+	bool found_app = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&query_req, (void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		ret = -EFAULT;
+		goto exit_free;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret) {
+		pr_err(" scm call to check if app is loaded failed\n");
+		goto exit_free;
+	}
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				app_arch = entry->app_arch;
+				if (entry->ref_cnt == U32_MAX) {
+					pr_err("App %d (%s) ref_cnt overflow\n",
+						app_id, req.app_name);
+					ret = -EINVAL;
+					spin_unlock_irqrestore(
+					&qseecom.registered_app_list_lock,
+					flags);
+					goto exit_free;
+				}
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		data->client.app_id = app_id;
+		query_req.app_id = app_id;
+		if (app_arch) {
+			data->client.app_arch = app_arch;
+			query_req.app_arch = app_arch;
+		} else {
+			data->client.app_arch = 0;
+			query_req.app_arch = 0;
+		}
+		strlcpy(data->client.app_name, query_req.app_name,
+				MAX_APP_NAME_SIZE);
+		/*
+		 * If app was loaded by appsbl before and was not registered,
+		 * regiser this app now.
+		 */
+		if (!found_app) {
+			pr_debug("Register app %d [%s] which was loaded before\n",
+					ret, (char *)query_req.app_name);
+			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry) {
+				ret = -ENOMEM;
+				goto exit_free;
+			}
+			entry->app_id = app_id;
+			entry->ref_cnt = 1;
+			entry->app_arch = data->client.app_arch;
+			strlcpy(entry->app_name, data->client.app_name,
+				MAX_APP_NAME_SIZE);
+			entry->app_blocked = false;
+			entry->blocked_on_listener_id = 0;
+			entry->check_block = 0;
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+				flags);
+			list_add_tail(&entry->list,
+				&qseecom.registered_app_list_head);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		}
+		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+			pr_err("copy_to_user failed\n");
+			ret = -EFAULT;
+			goto exit_free;
+		}
+		ret = -EEXIST;	/* app already loaded */
+		goto exit_free;
+	}
+
+exit_free:
+	return ret;	/* app not loaded */
+}
+
+static int __qseecom_get_ce_pipe_info(
+			enum qseecom_key_management_usage_type usage,
+			uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+	int ret = -EINVAL;
+	int i, j;
+	struct qseecom_ce_info_use *p = NULL;
+	int total = 0;
+	struct qseecom_ce_pipe_entry *pcepipe;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < total; j++) {
+		if (p->unit_num == unit) {
+			pcepipe =  p->ce_pipe_entry;
+			for (i = 0; i < p->num_ce_pipe_entries; i++) {
+				(*ce_hw)[i] = pcepipe->ce_num;
+				*pipe = pcepipe->ce_pipe_pair;
+				pcepipe++;
+			}
+			ret = 0;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_generate_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_generate_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+			pr_debug("Key ID exists.\n");
+			ret = 0;
+		} else {
+			pr_err("scm call to generate key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto generate_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+		pr_debug("Key ID exists.\n");
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+				pr_debug("Key ID exists.\n");
+				ret = 0;
+			} else {
+				pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("gen key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+generate_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_delete_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_delete_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else {
+			pr_err("scm call to delete key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto del_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Delete key scm call failed resp.result %d\n",
+							resp.result);
+		ret = -EINVAL;
+		break;
+	}
+del_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_select_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+		ret = __qseecom_enable_clk(CLK_CE_DRV);
+		if (ret)
+			return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_select_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+				ret);
+			ret = -EFAULT;
+		}
+		goto set_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result ==
+				QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+				pr_debug("Set Key operation under processing...\n");
+				ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+			}
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Set Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+set_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+		__qseecom_disable_clk(CLK_CE_DRV);
+	return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+			struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_userinfo_update_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+				usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+		ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+		&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to update key userinfo failed: %d\n",
+									ret);
+			__qseecom_disable_clk(CLK_QSEE);
+			return -EFAULT;
+		}
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (resp.result ==
+			QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		}
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Update Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", true);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+	return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", false);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+	return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	int i;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		p = qseecom.ce_info.fde;
+		total = qseecom.ce_info.num_fde;
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		p = qseecom.ce_info.pfe;
+		total = qseecom.ce_info.num_pfe;
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+
+	for (i = 0; i < total; i++) {
+		if (p->unit_num == unit) {
+			pce_info_use = p;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use) {
+		pr_err("can not find %d\n", unit);
+		return -EINVAL;
+	}
+	return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int i;
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_create_key_req create_key_req;
+	struct qseecom_key_generate_ireq generate_key_ireq;
+	struct qseecom_key_select_ireq set_key_ireq;
+	int32_t entries = 0;
+
+	ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", create_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					create_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+			DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	if (qseecom.enable_key_wrap_in_ks)
+		flags |= ENABLE_KEY_WRAP_IN_KS;
+
+	generate_key_ireq.flags = flags;
+	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+	memset((void *)generate_key_ireq.key_id,
+			0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)generate_key_ireq.hash32,
+			0, QSEECOM_HASH_SIZE);
+	memcpy((void *)generate_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)generate_key_ireq.hash32,
+			(void *)create_key_req.hash32,
+			QSEECOM_HASH_SIZE);
+
+	ret = __qseecom_generate_and_save_key(data,
+			create_key_req.usage, &generate_key_ireq);
+	if (ret) {
+		pr_err("Failed to generate key on storage: %d\n", ret);
+		goto free_buf;
+	}
+
+	for (i = 0; i < entries; i++) {
+		set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else if (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else {
+			set_key_ireq.ce = ce_hw[i];
+			set_key_ireq.pipe = pipe;
+		}
+		set_key_ireq.flags = flags;
+
+		/* set both PIPE_ENC and PIPE_ENC_XTS*/
+		set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+		memcpy((void *)set_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)set_key_ireq.hash32,
+				(void *)create_key_req.hash32,
+				QSEECOM_HASH_SIZE);
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		ret = qseecom_enable_ice_setup(create_key_req.usage);
+		if (ret)
+			goto free_buf;
+
+		do {
+			ret = __qseecom_set_clear_ce_key(data,
+					create_key_req.usage,
+					&set_key_ireq);
+			/*
+			 * wait a little before calling scm again to let other
+			 * processes run
+			 */
+			if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+				msleep(50);
+
+		} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+		qseecom_disable_ice_setup(create_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[i], ret);
+			goto free_buf;
+		} else {
+			pr_err("Set the key successfully\n");
+			if ((create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+			     (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+				goto free_buf;
+		}
+	}
+
+free_buf:
+	kfree_sensitive(ce_hw);
+	return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	int i, j;
+	struct qseecom_wipe_key_req wipe_key_req;
+	struct qseecom_key_delete_ireq delete_key_ireq;
+	struct qseecom_key_select_ireq clear_key_ireq;
+	int32_t entries = 0;
+
+	ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", wipe_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					wipe_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+				DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (wipe_key_req.wipe_key_flag) {
+		delete_key_ireq.flags = flags;
+		delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+		memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)delete_key_ireq.key_id,
+			(void *)key_id_array[wipe_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+					&delete_key_ireq);
+		if (ret) {
+			pr_err("Failed to delete key from ssd storage: %d\n",
+				ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+	for (j = 0; j < entries; j++) {
+		clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (wipe_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else if (wipe_key_req.usage ==
+			QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else {
+			clear_key_ireq.ce = ce_hw[j];
+			clear_key_ireq.pipe = pipe;
+		}
+		clear_key_ireq.flags = flags;
+		clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+			clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+		memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		ret = qseecom_enable_ice_setup(wipe_key_req.usage);
+		if (ret)
+			goto free_buf;
+
+		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+					&clear_key_ireq);
+
+		qseecom_disable_ice_setup(wipe_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[j], ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+free_buf:
+	kfree_sensitive(ce_hw);
+	return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_update_key_userinfo_req update_key_req;
+	struct qseecom_key_userinfo_update_ireq ireq;
+
+	ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+		return -EFAULT;
+	}
+
+	ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	ireq.flags = flags;
+	memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+	memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.key_id,
+		(void *)key_id_array[update_key_req.usage].desc,
+		QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)ireq.current_hash32,
+		(void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.new_hash32,
+		(void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+	do {
+		ret = __qseecom_update_current_key_user_info(data,
+						update_key_req.usage,
+						&ireq);
+		/*
+		 * wait a little before calling scm again to let other
+		 * processes run
+		 */
+		if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+			msleep(50);
+
+	} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+	if (ret) {
+		pr_err("Failed to update key info: %d\n", ret);
+		return ret;
+	}
+	return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+	struct qseecom_is_es_activated_req req = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+		&req, sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call failed\n");
+		return ret;
+	}
+
+	req.is_activated = resp.result;
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		pr_err("copy_to_user failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+	struct qseecom_save_partition_hash_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	memset(&resp, 0x00, sizeof(resp));
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+		       (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+	struct qseecom_mdtp_cipher_dip_req req;
+	u32 tzbuflenin, tzbuflenout;
+	char *tzbufin = NULL, *tzbufout = NULL;
+	struct qseecom_scm_desc desc = {0};
+	int ret;
+	phys_addr_t pain, paout;
+	struct qtee_shm shmin = {0}, shmout = {0};
+
+	do {
+		/* Copy the parameters from userspace */
+		if (argp == NULL) {
+			pr_err("arg is null\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = copy_from_user(&req, argp, sizeof(req));
+		if (ret) {
+			pr_err("copy_from_user failed, ret= %d\n", ret);
+			break;
+		}
+
+		if (req.in_buf == NULL || req.out_buf == NULL ||
+			req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+			req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+				req.direction > 1) {
+			pr_err("invalid parameters\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* Copy the input buffer from userspace to kernel space */
+		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+		tzbufin = __qseecom_alloc_tzbuf(tzbuflenin, &pain, &shmin);
+		if (!tzbufin) {
+			pr_err("error allocating in buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = copy_from_user(tzbufin, (void __user *)req.in_buf,
+					req.in_buf_size);
+		if (ret) {
+			pr_err("copy_from_user failed, ret=%d\n", ret);
+			break;
+		}
+
+		qtee_shmbridge_flush_shm_buf(&shmin);
+
+		/* Prepare the output buffer in kernel space */
+		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+		tzbufout = __qseecom_alloc_tzbuf(tzbuflenout, &paout, &shmout);
+		if (!tzbufout) {
+			pr_err("error allocating out buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		qtee_shmbridge_flush_shm_buf(&shmout);
+
+		/* Send the command to TZ */
+		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+		desc.args[0] = pain;
+		desc.args[1] = req.in_buf_size;
+		desc.args[2] = paout;
+		desc.args[3] = req.out_buf_size;
+		desc.args[4] = req.direction;
+
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			break;
+
+		ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+		__qseecom_disable_clk(CLK_QSEE);
+
+		if (ret) {
+			pr_err("failed for SCM_SVC_MDTP, ret=%d\n",
+				ret);
+			break;
+		}
+
+		/* Copy the output buffer from kernel space to userspace */
+		qtee_shmbridge_flush_shm_buf(&shmout);
+		ret = copy_to_user((void __user *)req.out_buf,
+				tzbufout, req.out_buf_size);
+		if (ret) {
+			pr_err("copy_to_user failed, ret=%d\n", ret);
+			break;
+		}
+	} while (0);
+
+	__qseecom_free_tzbuf(&shmin);
+	__qseecom_free_tzbuf(&shmout);
+
+	return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req)
+{
+	if (!data || !data->client.sb_virt) {
+		pr_err("Client or client buf is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->type != QSEECOM_CLIENT_APP)
+		return -EFAULT;
+
+	if (req->req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if (req->req_len + req->resp_len > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+		(req->req_len + req->resp_len), data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->req_ptr <
+			data->client.user_virt_sb_base) ||
+		((uintptr_t)req->req_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->resp_ptr <
+			data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if ((req->req_len == 0) || (req->resp_len == 0)) {
+		pr_err("cmd buf lengtgh/response buf length not valid\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+		pr_err("Integer overflow in req_len & req_ptr\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_ptr\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->req_ptr + req->req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_ptr + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+				uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry *sg_entry;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	/*
+	 * Allocate a buffer, populate it with number of entry plus
+	 * each sg entry's phy addr and length; then return the
+	 * phy_addr of the buffer.
+	 */
+	size = sizeof(uint32_t) +
+		sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.dev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	*(uint32_t *)buf = sg_ptr->nents;
+	sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+	return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+			struct qseecom_dev_handle *data, bool cleanup)
+{
+	int ret = 0;
+	int i = 0;
+	uint32_t *update;
+	struct sg_table *sg_ptr = NULL;
+	struct scatterlist *sg;
+	struct qseecom_param_memref *memref;
+	int ion_fd = -1;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+
+	if (req == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			ion_fd = req->ifd_data[i].fd;
+			if ((req->req_len <
+				sizeof(struct qseecom_param_memref)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->req_len -
+				sizeof(struct qseecom_param_memref))) {
+				pr_err("Invalid offset/req len 0x%x/0x%x\n",
+					req->req_len,
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+			update = (uint32_t *)((char *) req->req_ptr +
+				req->ifd_data[i].cmd_buf_offset);
+			if (!update) {
+				pr_err("update pointer is NULL\n");
+				return -EINVAL;
+			}
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
+		if (ret) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg == NULL) {
+			pr_err("sg is NULL\n");
+			goto err;
+		}
+		if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+			pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+					sg_ptr->nents, sg->length);
+			goto err;
+		}
+		/* clean up buf for pre-allocated fd */
+		if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			(*update)) {
+			if (data->client.sec_buf_fd[i].vbase)
+				dma_free_coherent(qseecom.dev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			memset((void *)update, 0,
+				sizeof(struct qseecom_param_memref));
+			memset(&(data->client.sec_buf_fd[i]), 0,
+				sizeof(struct qseecom_sec_buf_fd_info));
+			goto clean;
+		}
+
+		if (*update == 0) {
+			/* update buf for pre-allocated fd from secure heap*/
+			ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+				sg_ptr);
+			if (ret) {
+				pr_err("Failed to handle buf for fd[%d]\n", i);
+				goto err;
+			}
+			memref = (struct qseecom_param_memref *)update;
+			memref->buffer =
+				(uint32_t)(data->client.sec_buf_fd[i].pbase);
+			memref->size =
+				(uint32_t)(data->client.sec_buf_fd[i].size);
+		} else {
+			/* update buf for fd from non-secure qseecom heap */
+			if (sg_ptr->nents != 1) {
+				pr_err("Num of scat entr (%d) invalid\n",
+					sg_ptr->nents);
+				goto err;
+			}
+			if (cleanup)
+				*update = 0;
+			else
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+		}
+clean:
+		if (cleanup) {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = qseecom_dmabuf_cache_operations(dmabuf,
+					QSEECOM_CACHE_CLEAN);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			data->sglistinfo_ptr[i].indexAndFlags =
+				SGLISTINFO_SET_INDEX_FLAG(
+				(sg_ptr->nents == 1), 0,
+				req->ifd_data[i].cmd_buf_offset);
+			data->sglistinfo_ptr[i].sizeOrCount =
+				(sg_ptr->nents == 1) ?
+				sg->length : sg_ptr->nents;
+			data->sglist_cnt = i + 1;
+		}
+		/* unmap the dmabuf */
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		sg_ptr = NULL;
+		dmabuf = NULL;
+		attach = NULL;
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(sg_ptr)) {
+		qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
+		MAKE_NULL(sg_ptr, attach, dmabuf);
+	}
+	return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int ret2 = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret  = __qseecom_qteec_validate_msg(data, req);
+	if (ret)
+		return ret;
+
+	req_ptr = req->req_ptr;
+	resp_ptr = req->resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
+
+	req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->req_ptr);
+	req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req->resp_ptr);
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, false);
+		if (ret)
+			return ret;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req->req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req->resp_len;
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req->req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req->resp_len;
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((ireq_64bit.req_ptr >=
+				PHY_ADDR_4G - ireq_64bit.req_len) ||
+			(ireq_64bit.resp_ptr >=
+				PHY_ADDR_4G - ireq_64bit.resp_len))){
+			pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+				data->client.app_name, data->client.app_id);
+			pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+				ireq_64bit.req_ptr, ireq_64bit.req_len,
+				ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+			return -EFAULT;
+		}
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+	}
+	if (qseecom.whitelist_support
+		&& cmd_id == QSEOS_TEE_OPEN_SESSION)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = cmd_id;
+
+	reqd_len_sb_in = req->req_len + req->resp_len;
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+				QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		goto exit;
+	}
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				goto exit;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+				goto exit;
+			}
+		}
+	}
+exit:
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret2 = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, true);
+		if (ret2)
+			return ret2;
+	}
+	return ret;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+							QSEOS_TEE_OPEN_SESSION);
+
+	return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+	return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int i = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret = copy_from_user(&req, argp,
+			sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_validate_msg(data,
+					(struct qseecom_qteec_req *)(&req));
+	if (ret)
+		return ret;
+	req_ptr = req.req_ptr;
+	resp_ptr = req.resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+	if (__qseecom_find_pending_unload_app(data->client.app_id,
+						data->client.app_name)) {
+		pr_err("app %d (%s) unload is pending\n",
+			data->client.app_id, data->client.app_name);
+		return -ENOENT;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].fd) {
+			if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+				return -EINVAL;
+		}
+	}
+	req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.req_ptr);
+	req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_ptr);
+	ret = __qseecom_update_qteec_req_buf(&req, data, false);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req.req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req.req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
+	}
+	reqd_len_sb_in = req.req_len + req.resp_len;
+	if (qseecom.whitelist_support)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_CLEAN);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+	ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
+					QSEECOM_CACHE_INVALIDATE);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = __qseecom_update_qteec_req_buf(&req, data, true);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+						QSEOS_TEE_REQUEST_CANCELLATION);
+
+	return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+	if (data->sglist_cnt) {
+		memset(data->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		data->sglist_cnt = 0;
+	}
+}
+
+long qseecom_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	bool perf_enabled = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		/* since the state is not ready returning device not configured yet
+		 * i.e operation can't be performed on device yet.
+		 */
+		return -ENXIO;
+	}
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+	if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
+		cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
+		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
+		cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
+		__wakeup_unregister_listener_kthread();
+	__wakeup_unload_app_kthread();
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("reg lstnr req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl register_listener_req()\n");
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		data->type = QSEECOM_LISTENER_SERVICE;
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl unregister_listener_req()\n");
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			/* register bus bw in case the client doesn't do it */
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				ret = -EINVAL;
+				mutex_unlock(&app_access_lock);
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+			ret = qseecom_send_modfd_cmd(data, argp);
+		else
+			ret = qseecom_send_modfd_cmd_64(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret && (ret != -ERESTARTSYS))
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.qsee_reentrancy_support)
+			ret = qseecom_send_resp();
+		else
+			ret = qseecom_reentrancy_send_resp(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		if ((data->type != QSEECOM_CLIENT_APP) &&
+			(data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_SECURE_SERVICE)) {
+			pr_err("set mem param req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_set_client_mem_param(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("load app req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data, false);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		__wakeup_unload_app_kthread();
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("load ext elf req: invalid client handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+			pr_err("unload ext elf req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("app loaded query req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("send cmd svc req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_SECURE_SERVICE;
+		if (qseecom.qsee_version < QSEE_VERSION_03) {
+			pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_service_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("create key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Create Key feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_create_key(data, argp);
+		if (ret)
+			pr_err("failed to create encryption key: %d\n", ret);
+
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("wipe key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_wipe_key(data, argp);
+		if (ret)
+			pr_err("failed to wipe encryption key: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("update key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Update Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_update_key_user_info(data, argp);
+		if (ret)
+			pr_err("failed to update key user info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("save part hash req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_save_partition_hash(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("ES activated req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_is_es_activated(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_mdtp_cipher_dip(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&listener_access_lock);
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+			ret = qseecom_send_modfd_resp(data, argp);
+		else
+			ret = qseecom_send_modfd_resp_64(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&listener_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Open session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_open_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed open_session_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Close session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_close_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed close_session_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Invoke cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_request_cancellation(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed request_cancellation: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_free_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_ICE_INFO: {
+		struct qseecom_ice_data_t ice_data;
+
+		ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
+		if (ret) {
+			pr_err("copy_from_user failed\n");
+			return -EFAULT;
+		}
+		qcom_ice_set_fde_flag(ice_data.flag);
+		break;
+	}
+	case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
+		pr_err("QSEECOM_IOCTL_FBE_CLEAR_KEY IOCTL is deprecated\n");
+		return -EINVAL;
+	}
+	default:
+		pr_err("Invalid IOCTL: 0x%x\n", cmd);
+		return -ENOIOCTLCMD;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+	{
+		return -ENOMEM;
+	}
+	file->private_data = data;
+	data->abort = 0;
+	data->type = QSEECOM_GENERIC;
+	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	data->mode = INACTIVE;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
+				sizeof(struct sglist_info) * MAX_ION_FD,
+				&data->sglistinfo_shm.paddr,
+				&data->sglistinfo_shm);
+	if (!data->sglistinfo_ptr)
+	{
+		return -ENOMEM;
+	}
+	return ret;
+}
+
+static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
+{
+	if (qseecom.no_clock_support)
+		return;
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE)
+				__qseecom_set_msm_bus_request(INACTIVE);
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+	bool free_private_data = true;
+
+	__qseecom_release_disable_clk(data);
+	if (!data->released) {
+		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+			data->type, data->mode, data);
+		switch (data->type) {
+		case QSEECOM_LISTENER_SERVICE:
+			pr_debug("release lsnr svc %d\n", data->listener.id);
+			mutex_lock(&listener_access_lock);
+			ret = qseecom_unregister_listener(data);
+			if (!ret)
+				free_private_data = false;
+			data->listener.release_called = true;
+			mutex_unlock(&listener_access_lock);
+			__wakeup_unregister_listener_kthread();
+			break;
+		case QSEECOM_CLIENT_APP:
+			pr_debug("release app %d (%s)\n",
+				data->client.app_id, data->client.app_name);
+			if (data->client.app_id) {
+				free_private_data = false;
+				mutex_lock(&unload_app_pending_list_lock);
+				ret = qseecom_prepare_unload_app(data);
+				mutex_unlock(&unload_app_pending_list_lock);
+				__wakeup_unload_app_kthread();
+			}
+			break;
+		case QSEECOM_SECURE_SERVICE:
+		case QSEECOM_GENERIC:
+			if (data->client.dmabuf) {
+				qseecom_vaddr_unmap(data->client.sb_virt,
+					data->client.sgt, data->client.attach,
+					data->client.dmabuf);
+				MAKE_NULL(data->client.sgt, data->client.attach,
+					data->client.dmabuf);
+			}
+			break;
+		case QSEECOM_UNAVAILABLE_CLIENT_APP:
+			break;
+		default:
+			pr_err("Unsupported clnt_handle_type %d\n",
+				data->type);
+			break;
+		}
+	}
+
+	if (free_private_data) {
+		__qseecom_free_tzbuf(&data->sglistinfo_shm);
+		kfree(data);
+	}
+	return ret;
+}
+
+static const struct file_operations qseecom_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qseecom_ioctl,
+	.open = qseecom_open,
+	.release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct device *pdev;
+	struct qseecom_clk *qclk;
+	char *core_clk_src = NULL;
+	char *core_clk = NULL;
+	char *iface_clk = NULL;
+	char *bus_clk = NULL;
+
+	switch (ce) {
+	case CLK_QSEE: {
+		core_clk_src = "core_clk_src";
+		core_clk = "core_clk";
+		iface_clk = "iface_clk";
+		bus_clk = "bus_clk";
+		qclk = &qseecom.qsee;
+		qclk->instance = CLK_QSEE;
+		break;
+	};
+	case CLK_CE_DRV: {
+		core_clk_src = "ce_drv_core_clk_src";
+		core_clk = "ce_drv_core_clk";
+		iface_clk = "ce_drv_iface_clk";
+		bus_clk = "ce_drv_bus_clk";
+		qclk = &qseecom.ce_drv;
+		qclk->instance = CLK_CE_DRV;
+		break;
+	};
+	default:
+		pr_err("Invalid ce hw instance: %d!\n", ce);
+		return -EIO;
+	}
+
+	if (qseecom.no_clock_support) {
+		qclk->ce_core_clk = NULL;
+		qclk->ce_clk = NULL;
+		qclk->ce_bus_clk = NULL;
+		qclk->ce_core_src_clk = NULL;
+		return 0;
+	}
+
+	pdev = qseecom.pdev;
+
+	/* Get CE3 src core clk. */
+	qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+	if (!IS_ERR(qclk->ce_core_src_clk)) {
+		rc = clk_set_rate(qclk->ce_core_src_clk,
+					qseecom.ce_opp_freq_hz);
+		if (rc) {
+			clk_put(qclk->ce_core_src_clk);
+			qclk->ce_core_src_clk = NULL;
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+				qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+			return -EIO;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		qclk->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	qclk->ce_core_clk = clk_get(pdev, core_clk);
+	if (IS_ERR(qclk->ce_core_clk)) {
+		rc = PTR_ERR(qclk->ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		return -EIO;
+	}
+
+	/* Get CE Interface clk */
+	qclk->ce_clk = clk_get(pdev, iface_clk);
+	if (IS_ERR(qclk->ce_clk)) {
+		rc = PTR_ERR(qclk->ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		return -EIO;
+	}
+
+	/* Get CE AXI clk */
+	qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+	if (IS_ERR(qclk->ce_bus_clk)) {
+		rc = PTR_ERR(qclk->ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		clk_put(qclk->ce_clk);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->ce_clk != NULL) {
+		clk_put(qclk->ce_clk);
+		qclk->ce_clk = NULL;
+	}
+	if (qclk->ce_core_clk != NULL) {
+		clk_put(qclk->ce_core_clk);
+		qclk->ce_core_clk = NULL;
+	}
+	if (qclk->ce_bus_clk != NULL) {
+		clk_put(qclk->ce_bus_clk);
+		qclk->ce_bus_clk = NULL;
+	}
+	if (qclk->ce_core_src_clk != NULL) {
+		clk_put(qclk->ce_core_src_clk);
+		qclk->ce_core_src_clk = NULL;
+	}
+	qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t hlos_num_ce_hw_instances;
+	uint32_t disk_encrypt_pipe;
+	uint32_t file_encrypt_pipe;
+	uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
+	int i;
+	const int *tbl;
+	int size;
+	int entry;
+	struct qseecom_crypto_info *pfde_tbl = NULL;
+	struct qseecom_crypto_info *p;
+	int tbl_size;
+	int j;
+	bool old_db = true;
+	struct qseecom_ce_info_use *pce_info_use;
+	uint32_t *unit_tbl = NULL;
+	int total_units = 0;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+	qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,qsee-ce-hw-instance",
+				&qseecom.ce_info.qsee_ce_hw_instance)) {
+		pr_err("Fail to get qsee ce hw instance information.\n");
+		rc = -EINVAL;
+		goto out;
+	} else {
+		pr_debug("qsee-ce-hw-instance=0x%x\n",
+			qseecom.ce_info.qsee_ce_hw_instance);
+	}
+
+	qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-fde");
+	qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-pfe");
+
+	if (!qseecom.support_pfe && !qseecom.support_fde) {
+		pr_warn("Device does not support PFE/FDE\n");
+		goto out;
+	}
+
+	if (qseecom.support_fde)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("full-disk-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+
+		if (!pfde_tbl || !unit_tbl) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read full-disk-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_fde = total_units;
+		pce_info_use = qseecom.ce_info.fde = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (qseecom.support_pfe)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("per-file-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+		if (!pfde_tbl || !unit_tbl) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read per-file-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_pfe = total_units;
+		pce_info_use = qseecom.ce_info.pfe = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (!old_db)
+		goto out1;
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-multiple-ce-hw-instance")) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,hlos-num-ce-hw-instances",
+				&hlos_num_ce_hw_instances)) {
+			pr_err("Fail: get hlos number of ce hw instance\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		hlos_num_ce_hw_instances = 1;
+	}
+
+	if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+		pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+			MAX_CE_PIPE_PAIR_PER_UNIT);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+			hlos_num_ce_hw_instances)) {
+		pr_err("Fail: get hlos ce hw instance info\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (qseecom.support_fde) {
+		pce_info_use = qseecom.ce_info.fde =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+		pce_info_use->ce_pipe_entry = NULL;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,disk-encrypt-pipe-pair",
+				&disk_encrypt_pipe)) {
+			pr_err("Fail to get FDE pipe information.\n");
+			rc = -EINVAL;
+				goto out;
+		} else {
+			pr_debug("disk-encrypt-pipe-pair=0x%x\n",
+				disk_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+				hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support FDE\n");
+		disk_encrypt_pipe = 0xff;
+	}
+	if (qseecom.support_pfe) {
+		pce_info_use = qseecom.ce_info.pfe =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+		pce_info_use->ce_pipe_entry = NULL;
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,file-encrypt-pipe-pair",
+				&file_encrypt_pipe)) {
+			pr_err("Fail to get PFE pipe information.\n");
+			rc = -EINVAL;
+			goto out;
+		} else {
+			pr_debug("file-encrypt-pipe-pair=0x%x\n",
+				file_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+						hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = file_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support PFE\n");
+		file_encrypt_pipe = 0xff;
+	}
+
+out1:
+	qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+	qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+	if (rc) {
+		if (qseecom.ce_info.fde) {
+			pce_info_use = qseecom.ce_info.fde;
+			for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.fde);
+		qseecom.ce_info.fde = NULL;
+		if (qseecom.ce_info.pfe) {
+			pce_info_use = qseecom.ce_info.pfe;
+			for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.pfe);
+		qseecom.ce_info.pfe = NULL;
+	}
+	kfree(unit_tbl);
+	kfree(pfde_tbl);
+	return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	for (i = 0; i < total; i++) {
+		if (!p->alloc)
+			pce_info_use = p;
+		else if (!memcmp(p->handle, pinfo->handle,
+						MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+
+	if (pce_info_use == NULL)
+		return -EBUSY;
+
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (!pce_info_use->alloc) {
+		pce_info_use->alloc = true;
+		memcpy(pce_info_use->handle,
+			pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+	}
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	struct qseecom_ce_info_use *p;
+	int total = 0;
+	int i;
+	bool found = false;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < total; i++) {
+		if (p->alloc &&
+			!memcmp(p->handle, pinfo->handle,
+					MAX_CE_INFO_HANDLE_SIZE)) {
+			memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+			p->alloc = false;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+	pinfo->num_ce_pipe_entries  = 0;
+	for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	for (i = 0; i < total; i++) {
+
+		if (p->alloc && !memcmp(p->handle,
+				pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use)
+		goto out;
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+out:
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+#define GET_FEAT_VERSION_CMD	3
+static int qseecom_check_whitelist_feature(void)
+{
+	struct qseecom_scm_desc desc = {0};
+	int version = 0;
+	int ret = 0;
+
+	desc.args[0] = FEATURE_ID_WHITELIST;
+	desc.arginfo = SCM_ARGS(1);
+	mutex_lock(&app_access_lock);
+	ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO,
+		GET_FEAT_VERSION_CMD), &desc);
+	mutex_unlock(&app_access_lock);
+	if (!ret)
+		version = desc.ret[0];
+
+	return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+}
+
+static int qseecom_init_clk(void)
+{
+	int rc;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	rc = __qseecom_init_clk(CLK_QSEE);
+	if (rc)
+		return rc;
+
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+			(qseecom.support_pfe || qseecom.support_fde)) {
+		rc = __qseecom_init_clk(CLK_CE_DRV);
+		if (rc) {
+			__qseecom_deinit_clk(CLK_QSEE);
+			return rc;
+		}
+	} else {
+		qseecom.ce_drv.ce_core_clk = qseecom.qsee.ce_core_clk;
+		qseecom.ce_drv.ce_clk = qseecom.qsee.ce_clk;
+		qseecom.ce_drv.ce_core_src_clk = qseecom.qsee.ce_core_src_clk;
+		qseecom.ce_drv.ce_bus_clk = qseecom.qsee.ce_bus_clk;
+	}
+
+	return rc;
+}
+
+static void qseecom_deinit_clk(void)
+{
+	if (qseecom.no_clock_support)
+		return;
+	__qseecom_deinit_clk(CLK_QSEE);
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+		(qseecom.support_pfe || qseecom.support_fde))
+		__qseecom_deinit_clk(CLK_CE_DRV);
+}
+
+static int qseecom_init_bus(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (!qseecom.support_bus_scaling)
+		return 0;
+
+	if (qseecom.no_clock_support) {
+		pr_err("Can not support bus_scalling if no clock support\n");
+		return -EINVAL;
+	}
+
+	timer_setup(&(qseecom.bw_scale_down_timer),
+			qseecom_scale_bus_bandwidth_timer_callback, 0);
+	INIT_WORK(&qseecom.bw_inactive_req_ws,
+				qseecom_bw_inactive_req_work);
+	qseecom.timer_running = false;
+	qseecom.icc_path = of_icc_get(&pdev->dev, "data_path");
+	if (IS_ERR(qseecom.icc_path)) {
+		ret = PTR_ERR(qseecom.icc_path);
+		if (ret != -EPROBE_DEFER)
+			pr_err("Unable to get Interconnect path\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void qseecom_deinit_bus(void)
+{
+	if (!qseecom.support_bus_scaling || qseecom.no_clock_support)
+		return;
+	qseecom_bus_scale_update_request(qseecom.qsee_perf_client, 0);
+	icc_put(qseecom.icc_path);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+	del_timer_sync(&qseecom.bw_scale_down_timer);
+}
+
+static int qseecom_send_app_region(struct platform_device *pdev)
+{
+	struct resource *resource = NULL;
+	struct qsee_apps_region_info_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	int rc = 0;
+
+	if (qseecom.qsee_version < QSEE_VERSION_02 ||
+		qseecom.is_apps_region_protected ||
+		qseecom.appsbl_qseecom_support)
+		return 0;
+
+	resource = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "secapp-region");
+	if (!resource) {
+		pr_err("Fail to get secure app region info\n");
+		return -ENOMEM;
+	}
+
+	req_64bit.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
+	req_64bit.addr = resource->start;
+	req_64bit.size = resource_size(resource);
+	cmd_buf = (void *)&req_64bit;
+	cmd_len = sizeof(struct qsee_apps_region_info_64bit_ireq);
+	pr_warn("secure app region addr=0x%llx size=0x%x\n",
+			req_64bit.addr, req_64bit.size);
+
+	rc = __qseecom_enable_clk(CLK_QSEE);
+	if (rc) {
+		pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+		return rc;
+	}
+	mutex_lock(&app_access_lock);
+	rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
+	__qseecom_disable_clk(CLK_QSEE);
+	if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+		pr_err("send secapp reg fail %d resp.res %d\n",
+					rc, resp.result);
+		return -EINVAL;
+	}
+	return rc;
+}
+
+static void qseecom_release_ce_data(void)
+{
+	int i;
+	struct qseecom_ce_info_use *pce_info_use = NULL;
+
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			kfree_sensitive(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.fde);
+	}
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			kfree_sensitive(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.pfe);
+	}
+}
+
+static int qseecom_reboot_worker(struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct qseecom_registered_listener_list *entry;
+
+	/* Mark all the listener as abort since system is going
+	 * for a reboot so every pending listener request should
+	 * be aborted.
+	 */
+	list_for_each_entry(entry,
+			&qseecom.registered_listener_list_head, list) {
+		entry->abort = 1;
+	}
+
+	/* stop CA thread waiting for listener response */
+	wake_up_interruptible_all(&qseecom.send_resp_wq);
+
+	/* Assumption is system going in reboot
+	 * every registered listener from userspace waiting
+	 * on event interruptible will receive interrupt as
+	 * TASK_INTERRUPTIBLE flag will be set for them
+	 */
+
+	return 0;
+}
+static int qseecom_register_reboot_notifier(void)
+{
+	int rc = 0;
+
+	/* Registering reboot notifier for resource cleanup at reboot.
+	 * Current implementation is for listener use case,
+	 * it can be extended to App also in case of any corner
+	 * case issue found.
+	 */
+
+	qseecom.reboot_nb.notifier_call = qseecom_reboot_worker;
+	rc = register_reboot_notifier(&(qseecom.reboot_nb));
+	if (rc)
+		pr_err("failed to register reboot notifier\n");
+	return rc;
+}
+
+static int qseecom_init_dev(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	rc = alloc_chrdev_region(&qseecom.qseecom_device_no,
+					0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	qseecom.driver_class = class_create(QSEECOM_DEV);
+#else
+	qseecom.driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+#endif
+	if (IS_ERR(qseecom.driver_class)) {
+		rc = PTR_ERR(qseecom.driver_class);
+		pr_err("class_create failed %x\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+	qseecom.pdev = device_create(qseecom.driver_class, NULL,
+			qseecom.qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (IS_ERR(qseecom.pdev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = PTR_ERR(qseecom.pdev);
+		goto exit_destroy_class;
+	}
+	cdev_init(&qseecom.cdev, &qseecom_fops);
+	qseecom.cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom.cdev,
+			MKDEV(MAJOR(qseecom.qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+	qseecom.dev = &pdev->dev;
+	rc = dma_set_mask(qseecom.dev, DMA_BIT_MASK(64));
+	if (rc) {
+		pr_err("qseecom failed to set dma mask %d\n", rc);
+		goto exit_del_cdev;
+	}
+	if (!qseecom.dev->dma_parms) {
+		qseecom.dev->dma_parms =
+			kzalloc(sizeof(*qseecom.dev->dma_parms), GFP_KERNEL);
+		if (!qseecom.dev->dma_parms) {
+			rc = -ENOMEM;
+			goto exit_del_cdev;
+		}
+	}
+	dma_set_max_seg_size(qseecom.dev, DMA_BIT_MASK(32));
+	rc = of_reserved_mem_device_init_by_idx(&pdev->dev,
+					(&pdev->dev)->of_node, 0);
+	if (rc) {
+		pr_err("Failed to initialize reserved mem, ret %d\n", rc);
+		goto exit_del_cdev;
+	}
+
+	rc = qseecom_register_reboot_notifier();
+	if (rc) {
+		pr_err("failed in registering reboot notifier %d\n", rc);
+		/* exit even if notifier registration fail.
+		 * Although, thats not a functional failure from qseecom
+		 * driver prespective but this registration
+		 * failure will cause more complex issue at the
+		 * time of reboot or possibly halt the reboot.
+		 */
+		goto exit_del_cdev;
+	}
+
+	return 0;
+
+exit_del_cdev:
+	cdev_del(&qseecom.cdev);
+exit_destroy_device:
+	device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
+exit_destroy_class:
+	class_destroy(qseecom.driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qseecom.qseecom_device_no, 1);
+
+	return rc;
+}
+
+static void qseecom_deinit_dev(void)
+{
+	kfree(qseecom.dev->dma_parms);
+	qseecom.dev->dma_parms = NULL;
+	unregister_reboot_notifier(&(qseecom.reboot_nb));
+	cdev_del(&qseecom.cdev);
+	device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
+	class_destroy(qseecom.driver_class);
+	unregister_chrdev_region(qseecom.qseecom_device_no, 1);
+}
+
+static int qseecom_init_control(void)
+{
+	uint32_t feature = 10;
+	struct qseecom_command_scm_resp resp;
+	int rc = 0;
+
+	qseecom.qsee_version = QSEEE_VERSION_00;
+	mutex_lock(&app_access_lock);
+	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+		&resp, sizeof(resp));
+	mutex_unlock(&app_access_lock);
+	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+	if (rc) {
+		pr_err("Failed to get QSEE version info %d\n", rc);
+		return rc;
+	}
+	qseecom.qsee_version = resp.result;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	init_waitqueue_head(&qseecom.app_block_wq);
+	qseecom.whitelist_support = true;
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
+	INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+	spin_lock_init(&qseecom.registered_kclient_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
+	init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
+	INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
+	init_waitqueue_head(&qseecom.unload_app_kthread_wq);
+	qseecom.send_resp_flag = 0;
+	qseecom.qseos_version = QSEOS_VERSION_14;
+	qseecom.commonlib_loaded = false;
+	qseecom.commonlib64_loaded = false;
+	qseecom.commonlib_loaded_by_hostvm = false;
+	qseecom.whitelist_support = qseecom_check_whitelist_feature();
+
+	return rc;
+}
+
+static int qseecom_parse_dt(struct platform_device *pdev)
+{
+	if (!pdev->dev.of_node) {
+		pr_err("NULL of_node\n");
+		return -ENODEV;
+	}
+	qseecom.pdev->of_node = pdev->dev.of_node;
+	qseecom.support_bus_scaling =
+		of_property_read_bool((&pdev->dev)->of_node,
+					"qcom,support-bus-scaling");
+	qseecom.appsbl_qseecom_support =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,appsbl-qseecom-support");
+	qseecom.commonlib64_loaded =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,commonlib64-loaded-by-uefi");
+	qseecom.commonlib_loaded_by_hostvm =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,commonlib-loaded-by-hostvm");
+	qseecom.fde_key_size =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,fde-key-size");
+	qseecom.no_clock_support =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,no-clock-support");
+	qseecom.enable_key_wrap_in_ks =
+			of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,enable-key-wrap-in-ks");
+	if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,qsee-reentrancy-support",
+			&qseecom.qsee_reentrancy_support)) {
+		pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+		qseecom.qsee_reentrancy_support = 0;
+	}
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,ce-opp-freq", &qseecom.ce_opp_freq_hz)) {
+		pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+		qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+	}
+
+	/*
+	 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+	 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+	 * Pls add "qseecom.commonlib64_loaded = true" here too.
+	 */
+	if (qseecom.is_apps_region_protected ||
+				qseecom.appsbl_qseecom_support)
+		qseecom.commonlib_loaded = true;
+
+	return 0;
+}
+
+static int qseecom_create_kthreads(void)
+{
+	int rc = 0;
+
+	qseecom.unregister_lsnr_kthread_task = kthread_run(
+			__qseecom_unregister_listener_kthread_func,
+			NULL, "qseecom-unreg-lsnr");
+	if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
+		rc = PTR_ERR(qseecom.unregister_lsnr_kthread_task);
+		pr_err("fail to create kthread to unreg lsnr, rc = %x\n", rc);
+		return rc;
+	}
+	atomic_set(&qseecom.unregister_lsnr_kthread_state,
+					LSNR_UNREG_KT_SLEEP);
+
+	/*create a kthread to process pending ta unloading task */
+	qseecom.unload_app_kthread_task = kthread_run(
+			__qseecom_unload_app_kthread_func,
+			NULL, "qseecom-unload-ta");
+	if (IS_ERR(qseecom.unload_app_kthread_task)) {
+		rc = PTR_ERR(qseecom.unload_app_kthread_task);
+		pr_err("failed to create kthread to unload ta, rc = %x\n", rc);
+		kthread_stop(qseecom.unregister_lsnr_kthread_task);
+		return rc;
+	}
+	atomic_set(&qseecom.unload_app_kthread_state,
+					UNLOAD_APP_KT_SLEEP);
+	return 0;
+}
+
+static int qseecom_register_heap_shmbridge(struct platform_device *pdev,
+					   char *heap_mem_region_name,
+					   uint64_t *handle)
+{
+	phys_addr_t heap_pa = 0;
+	size_t heap_size = 0;
+	struct device_node *node = NULL;
+	struct reserved_mem *rmem = NULL;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+
+	node = of_parse_phandle(pdev->dev.of_node, heap_mem_region_name, 0);
+	if (!node) {
+		pr_err("unable to parse memory-region of heap %s\n", heap_mem_region_name);
+		return -EINVAL;
+	}
+	rmem = of_reserved_mem_lookup(node);
+	if (!rmem) {
+		pr_err("unable to acquire memory-region of heap %s\n", heap_mem_region_name);
+		return -EINVAL;
+	}
+
+	heap_pa = rmem->base;
+	heap_size = (size_t)rmem->size;
+
+	pr_debug("get heap %s info: shmbridge created\n", heap_mem_region_name);
+	return qtee_shmbridge_register(heap_pa,
+			heap_size, ns_vmids, ns_vm_perms, 1,
+			PERM_READ | PERM_WRITE, handle);
+}
+
+static int qseecom_register_shmbridge(struct platform_device *pdev)
+{
+	int ret = 0;
+	if (!qtee_shmbridge_is_enabled())
+		return 0; 
+	ret = qseecom_register_heap_shmbridge(pdev, "qseecom_ta_mem",
+					&qseecom.ta_bridge_handle);
+	if (ret)
+		return ret;
+	ret = qseecom_register_heap_shmbridge(pdev, "qseecom_mem",
+					&qseecom.qseecom_bridge_handle);
+	if (ret) {
+		qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+		return ret;
+	}
+
+	/* no-user-contig-mem is present in dtsi if user_contig_region is not needed*/
+	qseecom.no_user_contig_mem_support = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,no-user-contig-mem-support");
+	if (!qseecom.no_user_contig_mem_support) {
+		ret = qseecom_register_heap_shmbridge(pdev, "user_contig_mem",
+						&qseecom.user_contig_bridge_handle);
+		if (ret) {
+			qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+			qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static void qseecom_deregister_shmbridge(void)
+{
+	qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+	qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	rc = qseecom_register_shmbridge(pdev);
+	if (rc)
+		return rc;
+	rc = qseecom_init_dev(pdev);
+	if (rc)
+		goto exit_unregister_bridge;
+	rc = qseecom_init_control();
+	if (rc)
+		goto exit_deinit_dev;
+	rc = qseecom_parse_dt(pdev);
+	if (rc)
+		goto exit_deinit_dev;
+	rc = qseecom_retrieve_ce_data(pdev);
+	if (rc)
+		goto exit_deinit_dev;
+	rc = qseecom_init_clk();
+	if (rc)
+		goto exit_release_ce_data;
+	rc = qseecom_init_bus(pdev);
+	if (rc)
+		goto exit_deinit_clock;
+	rc = qseecom_send_app_region(pdev);
+	if (rc)
+		goto exit_deinit_bus;
+	rc = qseecom_create_kthreads();
+	if (rc)
+		goto exit_deinit_bus;
+
+#if IS_ENABLED(CONFIG_QSEECOM) && IS_ENABLED(CONFIG_QSEECOM_PROXY)
+	/*If the api fails to get the func ops, print the error and continue
+	* Do not treat it as fatal*/
+	rc = get_qseecom_kernel_fun_ops();
+	if (rc)
+		pr_err("failed to provide qseecom ops %d", rc);
+#endif
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return 0;
+
+exit_deinit_bus:
+	qseecom_deinit_bus();
+exit_deinit_clock:
+	qseecom_deinit_clk();
+exit_release_ce_data:
+	qseecom_release_ce_data();
+exit_deinit_dev:
+	qseecom_deinit_dev();
+exit_unregister_bridge:
+	qseecom_deregister_shmbridge();
+	return rc;
+}
+
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	struct qseecom_registered_kclient_list *kclient_tmp = NULL;
+	unsigned long flags = 0;
+	int ret = 0;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+	list_for_each_entry_safe(kclient, kclient_tmp,
+		&qseecom.registered_kclient_list_head, list) {
+
+		/* Break the loop if client handle is NULL */
+		if (!kclient->handle) {
+			list_del(&kclient->list);
+			kfree_sensitive(kclient);
+			break;
+		}
+
+		list_del(&kclient->list);
+		mutex_lock(&app_access_lock);
+		ret = qseecom_unload_app(kclient->handle->dev, false);
+		mutex_unlock(&app_access_lock);
+		if (!ret) {
+			kfree_sensitive(kclient->handle->dev);
+			kfree_sensitive(kclient->handle);
+			kfree_sensitive(kclient);
+		}
+	}
+
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	if (qseecom.qseos_version > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
+
+	qseecom_deregister_shmbridge();
+	kthread_stop(qseecom.unload_app_kthread_task);
+	kthread_stop(qseecom.unregister_lsnr_kthread_task);
+	qseecom_deinit_bus();
+	qseecom_deinit_clk();
+	qseecom_release_ce_data();
+	qseecom_deinit_dev();
+	return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+	if (qseecom.no_clock_support)
+		return 0;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.current_mode != INACTIVE) {
+		ret = qseecom_bus_scale_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
+		if (ret)
+			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+	return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+	int mode = 0;
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qseecom.no_clock_support)
+		goto exit;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+	if (qseecom.cumulative_mode >= HIGH)
+		mode = HIGH;
+	else
+		mode = qseecom.cumulative_mode;
+
+	if (qseecom.cumulative_mode != INACTIVE) {
+		ret = qseecom_bus_scale_update_request(
+			qseecom.qsee_perf_client, mode);
+		if (ret)
+			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_core_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_core_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE core clk\n");
+				qclk->clk_access_cnt = 0;
+				goto err;
+			}
+		}
+		if (qclk->ce_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE iface clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_clk_err;
+			}
+		}
+		if (qclk->ce_bus_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_bus_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE bus clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_bus_clk_err;
+			}
+		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
+		qseecom.timer_running = true;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	goto exit;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	ret = -EIO;
+exit:
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return ret;
+}
+
+static const struct of_device_id qseecom_match[] = {
+	{
+		.compatible = "qcom,qseecom",
+	},
+	{}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.suspend = qseecom_suspend,
+	.resume = qseecom_resume,
+	.driver = {
+		.name = "qseecom",
+		.of_match_table = qseecom_match,
+	},
+};
+
+static int qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+	platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+MODULE_IMPORT_NS(DMA_BUF);
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);

+ 112 - 0
qcom/opensource/securemsm-kernel/securemsm_kernel.bzl

@@ -0,0 +1,112 @@
+load(
+    "//build/kernel/kleaf:kernel.bzl",
+    "ddk_module",
+    "kernel_modules_install",
+)
+load(
+    ":securemsm_modules.bzl",
+    "securemsm_modules",
+    "securemsm_modules_by_config",
+)
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+
+def _replace_formatting_codes(target, variant, s):
+    kernel_build = "{}_{}".format(target, variant)
+
+    return s.replace("%b", kernel_build).replace("%t", target)
+
+def _console_print(target, variant, module, message):
+    if module:
+        print("{}: {}: securemsm-kernel: {}: {}".format(target, variant, module, message))
+    else:
+        print("{}: {}: securemsm-kernel: {} ".format(target, variant, message))
+
+def _get_options(target, variant, target_config_option, modules, extra_options):
+    all_options = {option: True for option in extra_options}
+
+    redundant_options = []
+
+    for option in securemsm_modules_by_config:
+        module_name = securemsm_modules_by_config[option]
+
+        if option in all_options:
+            if module_name in modules:
+                redundant_options.append(option)
+            else:
+                _console_print(target, variant, None, 'WARNING: Config option "{}" corresponds to securemsm module {}, but this module is not listed in module list!'.format(option, module_name))
+        else:
+            all_options[option] = True
+
+    if target_config_option in all_options:
+        redundant_options.append(target_config_option)
+    else:
+        all_options[target_config_option] = True
+
+    if redundant_options:
+        _console_print(target, variant, None, "INFO: The following options are already declared either by a module or the target, no need to redeclare: \n{}".format("\n".join(redundant_options)))
+
+    return all_options
+
+def _get_module_srcs(target, variant, module, options):
+    srcs = [] + module["default_srcs"] + module["srcs"]
+    module_path = "{}/".format(module["path"]) if module["path"] else ""
+
+    for option in module["config_srcs"]:
+        srcs.extend(module["config_srcs"][option].get(option in options, []))
+
+    globbed_srcs = native.glob(["{}{}".format(module_path, _replace_formatting_codes(target, variant, src)) for src in srcs])
+
+    if not globbed_srcs:
+        _console_print(target, variant, module["name"], "WARNING: Module has no sources attached!")
+
+    return globbed_srcs
+
+def define_target_variant_modules(target, variant, modules, extra_options = [], config_option = None):
+    kernel_build_variant = "{}_{}".format(target, variant)
+    options = _get_options(target, variant, config_option, modules, extra_options)
+    module_rules = []
+    target_local_defines = []
+    modules = [securemsm_modules[module_name] for module_name in modules]
+    tv = "{}_{}".format(target, variant)
+
+    target_local_defines = ["SMCINVOKE_TRACE_INCLUDE_PATH=../../../{}/smcinvoke".format(native.package_name())]
+
+    for config in extra_options:
+        target_local_defines.append(config)
+    for module in modules:
+        rule_name = "{}_{}".format(kernel_build_variant, module["name"])
+        module_srcs = _get_module_srcs(target, variant, module, options)
+
+        ddk_module(
+            name = rule_name,
+            kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+            srcs = module_srcs,
+            out = "{}.ko".format(module["name"]),
+            deps = ["//msm-kernel:all_headers"] + [_replace_formatting_codes(target, variant, dep) for dep in module["deps"]],
+            hdrs = module["hdrs"],
+            local_defines = target_local_defines,
+            copts = module["copts"],
+        )
+        module_rules.append(rule_name)
+
+    copy_to_dist_dir(
+        name = "{}_securemsm-kernel_dist".format(kernel_build_variant),
+        data = module_rules,
+        dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )
+
+    kernel_modules_install(
+        name = "{}_modules_install".format(kernel_build_variant),
+        kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+        kernel_modules = module_rules,
+    )
+
+def define_consolidate_gki_modules(target, modules, extra_options = [], config_option = None):
+    define_target_variant_modules(target, "consolidate", modules, extra_options, config_option)
+    define_target_variant_modules(target, "gki", modules, extra_options, config_option)
+    define_target_variant_modules(target, "perf", modules, extra_options, config_option)

+ 71 - 0
qcom/opensource/securemsm-kernel/securemsm_kernel_product_board.mk

@@ -0,0 +1,71 @@
+#Build ssg kernel driver
+
+ENABLE_SECUREMSM_DLKM := true
+ENABLE_SECUREMSM_QTEE_DLKM := true
+
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+  ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), false)
+      ENABLE_SECUREMSM_DLKM := false
+  endif
+  ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE), false)
+      ENABLE_SECUREMSM_QTEE_DLKM := false
+  endif
+endif
+
+ifeq ($(ENABLE_SECUREMSM_DLKM), true)
+  ENABLE_QCRYPTO_DLKM := true
+  ENABLE_HDCP_QSEECOM_DLKM := true
+  ENABLE_QRNG_DLKM := true
+  ifeq ($(TARGET_USES_SMMU_PROXY), true)
+    ENABLE_SMMU_PROXY := true
+  endif #TARGET_USES_SMMU_PROXY
+endif #ENABLE_SECUREMSM_DLKM
+
+ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := true
+  #Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
+  ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
+    ENABLE_QSEECOM_DLKM := true
+  endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
+endif #ENABLE_SECUREMSM_QTEE_DLKM
+
+ifeq ($(TARGET_USES_GY), true)
+  ENABLE_QCRYPTO_DLKM := false
+  ENABLE_HDCP_QSEECOM_DLKM := false
+  ENABLE_QRNG_DLKM := false
+  ENABLE_SMMU_PROXY := false
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := false
+  ENABLE_QSEECOM_DLKM := false
+endif #TARGET_USES_GY
+
+ifeq ($(ENABLE_QCRYPTO_DLKM), true)
+PRODUCT_PACKAGES += qcedev-mod_dlkm.ko
+PRODUCT_PACKAGES += qce50_dlkm.ko
+PRODUCT_PACKAGES += qcrypto-msm_dlkm.ko
+endif #ENABLE_QCRYPTO_DLKM
+
+ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
+PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko
+endif #ENABLE_HDCP_QSEECOM_DLKM
+
+ifeq ($(ENABLE_QRNG_DLKM), true)
+PRODUCT_PACKAGES += qrng_dlkm.ko
+endif #ENABLE_QRNG_DLKM
+
+ifeq ($(ENABLE_SMMU_PROXY), true)
+PRODUCT_PACKAGES += smmu_proxy_dlkm.ko
+endif #ENABLE_SMMU_PROXY
+
+ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
+PRODUCT_PACKAGES += smcinvoke_dlkm.ko
+endif #ENABLE_SMCINVOKE_DLKM
+
+ifeq ($(ENABLE_TZLOG_DLKM), true)
+PRODUCT_PACKAGES += tz_log_dlkm.ko
+endif #ENABLE_TZLOG_DLKM
+
+ifeq ($(ENABLE_QSEECOM_DLKM), true)
+PRODUCT_PACKAGES += qseecom_dlkm.ko
+endif #ENABLE_QSEECOM_DLKM

+ 75 - 0
qcom/opensource/securemsm-kernel/securemsm_kernel_vendor_board.mk

@@ -0,0 +1,75 @@
+ENABLE_SECUREMSM_DLKM := true
+ENABLE_SECUREMSM_QTEE_DLKM := true
+
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+  ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), false)
+      ENABLE_SECUREMSM_DLKM := false
+  endif
+  ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE), false)
+      ENABLE_SECUREMSM_QTEE_DLKM := false
+  endif
+endif
+
+ifeq ($(ENABLE_SECUREMSM_DLKM), true)
+  ENABLE_QCRYPTO_DLKM := true
+  ENABLE_HDCP_QSEECOM_DLKM := true
+  ENABLE_QRNG_DLKM := true
+  ifeq ($(TARGET_USES_SMMU_PROXY), true)
+    ENABLE_SMMU_PROXY := true
+  endif #TARGET_USES_SMMU_PROXY
+endif #ENABLE_SECUREMSM_DLKM
+
+ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := true
+  #Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
+  ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
+    ENABLE_QSEECOM_DLKM := true
+  endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
+endif #ENABLE_SECUREMSM_QTEE_DLKM
+
+ifeq ($(TARGET_USES_GY), true)
+  ENABLE_QCRYPTO_DLKM := false
+  ENABLE_HDCP_QSEECOM_DLKM := false
+  ENABLE_QRNG_DLKM := false
+  ENABLE_SMMU_PROXY := false
+  ENABLE_SMCINVOKE_DLKM := true
+  ENABLE_TZLOG_DLKM := false
+  ENABLE_QSEECOM_DLKM := false
+endif #TARGET_USES_GY
+
+ifeq ($(ENABLE_QCRYPTO_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \
+      $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \
+      $(KERNEL_MODULES_OUT)/qce50_dlkm.ko
+endif #ENABLE_QCRYPTO_DLKM
+
+ifeq ($(ENABLE_QRNG_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qrng_dlkm.ko
+endif #ENABLE_QRNG_DLKM
+
+ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko
+endif #ENABLE_HDCP_QSEECOM_DLKM
+
+ifeq ($(ENABLE_SMMU_PROXY), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smmu_proxy_dlkm.ko
+endif #ENABLE_SMMU_PROXY
+
+ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
+BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
+endif #ENABLE_SMCINVOKE_DLKM
+
+ifeq ($(ENABLE_TZLOG_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES +=  $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko
+endif # ENABLE_TZLOG_DLKM
+
+ifeq ($(ENABLE_QSEECOM_DLKM), true)
+BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
+BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
+BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
+endif #ENABLE_QSEECOM_DLKM

+ 160 - 0
qcom/opensource/securemsm-kernel/securemsm_modules.bzl

@@ -0,0 +1,160 @@
+SMCINVOKE_PATH = "smcinvoke"
+QSEECOM_PATH = "qseecom"
+TZLOG_PATH = "tz_log"
+HDCP_PATH = "hdcp"
+QCEDEV_PATH = "crypto-qti"
+QRNG_PATH = "qrng"
+SMMU_PROXY_PATH = "smmu-proxy"
+
+# This dictionary holds all the securemsm-kernel  modules included by calling register_securemsm_module
+securemsm_modules = {}
+securemsm_modules_by_config = {}
+
+# Registers securemsm module to kernel build system.
+# name: The name of the module. The name of the file generated for this module will be {name}.ko.
+# path: The path that will be prepended to all sources listed for this module.
+# config_option: If this module is enabled, the config optiont that will get enabled if so. Not all modules have this, and this is an optional parameter.
+# config_srcs: A dictionary of sources to be added to the module depending on if a configuration option is enabled or not. The keys to the dictionary are
+# the name of the config option, and the value depends If it is a list, it will just be the list of sources to be added to the module if the config option
+# is enabled. If the value is another dictionary, then you can specify sources to be added if the config option is DISABLED by having a list under the
+# default_srcs: A list of sources to be added to the module regardless of configuration options.
+# deps: A list of kernel_module or ddk_module rules that this module depends on.
+
+def register_securemsm_module(name, path = None, config_option = None, default_srcs = [], config_srcs = {}, deps = [], srcs = [], copts = [], hdrs = []):
+    processed_config_srcs = {}
+    for config_src_name in config_srcs:
+        config_src = config_srcs[config_src_name]
+
+        if type(config_src) == "list":
+            processed_config_srcs[config_src_name] = {True: config_src}
+        else:
+            processed_config_srcs[config_src_name] = config_src
+
+    module = {
+        "name": name,
+        "path": path,
+        "default_srcs": default_srcs,
+        "config_srcs": processed_config_srcs,
+        "config_option": config_option,
+        "deps": deps,
+        "copts": copts,
+        "srcs": srcs,
+        "hdrs": hdrs,
+    }
+
+    securemsm_modules[name] = module
+
+    if config_option:
+        securemsm_modules_by_config[config_option] = name
+
+# ------------------------------------ SECUREMSM MODULE DEFINITIONS ---------------------------------
+register_securemsm_module(
+    name = "qseecom_dlkm",
+    path = QSEECOM_PATH,
+    default_srcs = [
+        "qseecom.c",
+        "ice.h",
+    ],
+    deps = [":qseecom_kernel_headers"],
+    #srcs = ["config/sec-kernel_defconfig_qseecom.h"],
+    #copts = ["-include", "config/sec-kernel_defconfig_qseecom.h"],
+)
+
+
+register_securemsm_module(
+    name = "smcinvoke_dlkm",
+    path = SMCINVOKE_PATH,
+    default_srcs = [
+        "smcinvoke.c",
+        "smcinvoke_kernel.c",
+        "trace_smcinvoke.h",
+        "IQSEEComCompat.h",
+        "smci_qseecomcompat.h",
+        "IQSEEComCompatAppLoader.h",
+        "smci_qseecomcompatapploader.h",
+    ],
+    deps = [":smcinvoke_kernel_headers", ":qseecom_kernel_headers", "%b_qseecom_dlkm"],
+    hdrs = [":smcinvoke_kernel_headers"],
+)
+
+register_securemsm_module(
+    name = "tz_log_dlkm",
+    path = TZLOG_PATH,
+    deps = [":qseecom_kernel_headers"],
+    default_srcs = ["tz_log.c"],
+)
+
+register_securemsm_module(
+    name = "hdcp_qseecom_dlkm",
+    path = HDCP_PATH,
+    default_srcs = [
+        "hdcp_qseecom.c",
+        "hdcp_qseecom.h",
+        "hdcp_main.c",
+        "smcinvoke_object.h",
+        "smci_object.h",
+        "hdcp_main.h",
+        "hdcp_smcinvoke.c",
+        "hdcp_smcinvoke.h",
+        "CAppClient.h",
+        "CAppLoader.h",
+        "IAppClient.h",
+        "smci_appclient.h",
+        "IAppController.h",
+        "smci_appcontroller.h",
+        "IAppLoader.h",
+        "smci_apploader.h",
+        "IClientEnv.h",
+        "smci_clientenv.h",
+        "IOpener.h",
+        "smci_opener.h",
+        "hdcp1.h",
+        "hdcp1_ops.h",
+        "hdcp2p2.h",
+    ],
+    deps = [":hdcp_qseecom_dlkm", "%b_smcinvoke_dlkm", "%b_qseecom_dlkm"],
+    srcs = ["config/sec-kernel_defconfig.h"],
+    copts = [
+        "-include",
+        "config/sec-kernel_defconfig.h",
+    ],
+)
+
+register_securemsm_module(
+    name = "qce50_dlkm",
+    path = QCEDEV_PATH,
+    default_srcs = ["qce50.c"],
+    deps = [":qcedev_local_headers"],
+)
+
+register_securemsm_module(
+    name = "qcedev-mod_dlkm",
+    path = QCEDEV_PATH,
+    default_srcs = [
+                "qcedev.c",
+                "qcedev_smmu.c"],
+    deps = [":qcedev_local_headers",
+            "%b_qce50_dlkm"],
+)
+
+register_securemsm_module(
+    name = "qrng_dlkm",
+    path = QRNG_PATH,
+    default_srcs = ["msm_rng.c"],
+    deps = [":qcedev_local_headers"],
+)
+
+register_securemsm_module(
+    name = "qcrypto-msm_dlkm",
+    path = QCEDEV_PATH,
+    default_srcs = ["qcrypto.c"],
+    deps = [":qcedev_local_headers",
+            "%b_qce50_dlkm"],
+)
+
+register_securemsm_module(
+    name = "smmu_proxy_dlkm",
+    path = SMMU_PROXY_PATH,
+    srcs = ["qti-smmu-proxy-pvm.c", "qti-smmu-proxy-common.c"],
+    deps = ["%b_smcinvoke_dlkm", ":smmu_proxy_headers"],
+)

+ 71 - 0
qcom/opensource/securemsm-kernel/smcinvoke/IQSEEComCompat.h

@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ */
+
+#include "smcinvoke_object.h"
+
+#define IQSEEComCompat_ERROR_APP_UNAVAILABLE INT32_C(10)
+#define IQSEEComCompat_OP_sendRequest 0
+#define IQSEEComCompat_OP_disconnect 1
+#define IQSEEComCompat_OP_unload 2
+
+
+static inline int32_t
+IQSEEComCompat_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IQSEEComCompat_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IQSEEComCompat_sendRequest(struct Object self,
+		const void *reqIn_ptr, size_t reqIn_len,
+		const void *rspIn_ptr, size_t rspIn_len,
+		void *reqOut_ptr, size_t reqOut_len, size_t *reqOut_lenout,
+		void *rspOut_ptr, size_t rspOut_len, size_t *rspOut_lenout,
+		const uint32_t *embeddedBufOffsets_ptr,
+		size_t embeddedBufOffsets_len, uint32_t is64_val,
+		struct Object smo1_val, struct Object smo2_val,
+		struct Object smo3_val, struct Object smo4_val)
+{
+	union ObjectArg a[10];
+	int32_t result;
+
+	a[0].bi = (struct ObjectBufIn) { reqIn_ptr, reqIn_len * 1 };
+	a[1].bi = (struct ObjectBufIn) { rspIn_ptr, rspIn_len * 1 };
+	a[4].b = (struct ObjectBuf) { reqOut_ptr, reqOut_len * 1 };
+	a[5].b = (struct ObjectBuf) { rspOut_ptr, rspOut_len * 1 };
+	a[2].bi = (struct ObjectBufIn) { embeddedBufOffsets_ptr,
+			embeddedBufOffsets_len * sizeof(uint32_t) };
+	a[3].b = (struct ObjectBuf) { &is64_val, sizeof(uint32_t) };
+	a[6].o = smo1_val;
+	a[7].o = smo2_val;
+	a[8].o = smo3_val;
+	a[9].o = smo4_val;
+
+	result = Object_invoke(self, IQSEEComCompat_OP_sendRequest, a,
+			ObjectCounts_pack(4, 2, 4, 0));
+
+	*reqOut_lenout = a[4].b.size / 1;
+	*rspOut_lenout = a[5].b.size / 1;
+
+	return result;
+}
+
+static inline int32_t
+IQSEEComCompat_disconnect(struct Object self)
+{
+	return Object_invoke(self, IQSEEComCompat_OP_disconnect, 0, 0);
+}
+
+static inline int32_t
+IQSEEComCompat_unload(struct Object self)
+{
+	return Object_invoke(self, IQSEEComCompat_OP_unload, 0, 0);
+}

+ 106 - 0
qcom/opensource/securemsm-kernel/smcinvoke/IQSEEComCompatAppLoader.h

@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "smcinvoke_object.h"
+
+#define IQSEEComCompatAppLoader_MAX_FILENAME_LEN UINT32_C(64)
+#define IQSEEComCompatAppLoader_ELFCLASS32 UINT32_C(1)
+#define IQSEEComCompatAppLoader_ELFCLASS64 UINT32_C(2)
+
+#define IQSEEComCompatAppLoader_ERROR_INVALID_BUFFER INT32_C(10)
+#define IQSEEComCompatAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
+#define IQSEEComCompatAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
+#define IQSEEComCompatAppLoader_ERROR_METADATA_INVALID INT32_C(13)
+#define IQSEEComCompatAppLoader_ERROR_MAX_NUM_APPS INT32_C(14)
+#define IQSEEComCompatAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15)
+#define IQSEEComCompatAppLoader_ERROR_ALREADY_LOADED INT32_C(16)
+#define IQSEEComCompatAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
+#define IQSEEComCompatAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
+#define IQSEEComCompatAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
+#define IQSEEComCompatAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
+#define IQSEEComCompatAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
+#define IQSEEComCompatAppLoader_ERROR_APP_BLACKLISTED INT32_C(22)
+#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(23)
+#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(24)
+#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(25)
+#define IQSEEComCompatAppLoader_ERROR_APP_ARCH_NOT_SUPPORTED INT32_C(26)
+
+#define IQSEEComCompatAppLoader_OP_loadFromRegion 0
+#define IQSEEComCompatAppLoader_OP_loadFromBuffer 1
+#define IQSEEComCompatAppLoader_OP_lookupTA 2
+
+
+static inline int32_t
+IQSEEComCompatAppLoader_release(struct Object self)
+{
+	return Object_invoke(self, Object_OP_release, 0, 0);
+}
+
+static inline int32_t
+IQSEEComCompatAppLoader_retain(struct Object self)
+{
+	return Object_invoke(self, Object_OP_retain, 0, 0);
+}
+
+static inline int32_t
+IQSEEComCompatAppLoader_loadFromRegion(struct Object self,
+			struct Object appElf_val, const void *filename_ptr,
+			size_t filename_len, struct Object *appCompat_ptr)
+{
+	union ObjectArg a[3];
+	int32_t result;
+
+	a[1].o = appElf_val;
+	a[0].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
+
+	result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromRegion, a,
+			ObjectCounts_pack(1, 0, 1, 1));
+
+	*appCompat_ptr = a[2].o;
+
+	return result;
+}
+
+static inline int32_t
+IQSEEComCompatAppLoader_loadFromBuffer(struct Object self,
+			const void *appElf_ptr, size_t appElf_len,
+			const void *filename_ptr, size_t filename_len,
+			void *distName_ptr, size_t distName_len,
+			size_t *distName_lenout, struct Object *appCompat_ptr)
+{
+	union ObjectArg a[4];
+	int32_t result;
+
+	a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 };
+	a[1].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
+	a[2].b = (struct ObjectBuf) { distName_ptr, distName_len * 1 };
+
+	result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromBuffer,
+			a, ObjectCounts_pack(2, 1, 0, 1));
+
+	*distName_lenout = a[2].b.size / 1;
+	*appCompat_ptr = a[3].o;
+
+	return result;
+}
+
+static inline int32_t
+IQSEEComCompatAppLoader_lookupTA(struct Object self, const void *appName_ptr,
+			size_t appName_len, struct Object *appCompat_ptr)
+{
+	union ObjectArg a[2];
+	int32_t result;
+
+	a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
+
+	result = Object_invoke(self, IQSEEComCompatAppLoader_OP_lookupTA,
+			a, ObjectCounts_pack(1, 0, 0, 1));
+
+	*appCompat_ptr = a[1].o;
+
+	return result;
+}
+

+ 64 - 0
qcom/opensource/securemsm-kernel/smcinvoke/smci_qseecomcompat.h

@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_QSEECOMCOMPAT_H
+#define __SMCI_QSEECOMCOMPAT_H
+
+#include "smci_object.h"
+#include "IQSEEComCompat.h"
+
+#define SMCI_QSEECOMCOMPAT_ERROR_APP_UNAVAILABLE INT32_C(10)
+#define SMCI_QSEECOMCOMPAT_OP_SENDREQUEST 0
+#define SMCI_QSEECOMCOMPAT_OP_DISCONNECT 1
+#define SMCI_QSEECOMCOMPAT_OP_UNLOAD 2
+
+
+static inline int32_t
+smci_qseecomcompat_release(struct smci_object self)
+{
+	return IQSEEComCompat_release(self);
+}
+
+static inline int32_t
+smci_qseecomcompat_retain(struct smci_object self)
+{
+	return IQSEEComCompat_retain(self);
+}
+
+static inline int32_t
+smci_qseecomcompat_sendrequest(struct smci_object self,
+		const void *req_in_ptr, size_t req_in_len,
+		const void *rsp_in_ptr, size_t rsp_in_len,
+		void *req_out_ptr, size_t req_out_len, size_t *req_out_lenout,
+		void *rsp_out_ptr, size_t rsp_out_len, size_t *rsp_out_lenout,
+		const uint32_t *embedded_buf_offsets_ptr,
+		size_t embedded_buf_offsets_len, uint32_t is64_val,
+		struct smci_object smo1_val, struct smci_object smo2_val,
+		struct smci_object smo3_val, struct smci_object smo4_val)
+{
+	return IQSEEComCompat_sendRequest(self,
+		req_in_ptr, req_in_len,
+		rsp_in_ptr, rsp_in_len,
+		req_out_ptr, req_out_len, req_out_lenout,
+		rsp_out_ptr, rsp_out_len, rsp_out_lenout,
+		embedded_buf_offsets_ptr,
+		embedded_buf_offsets_len, is64_val,
+		smo1_val, smo2_val,
+		smo3_val, smo4_val);
+}
+
+static inline int32_t
+smci_qseecomcompat_disconnect(struct smci_object self)
+{
+	return IQSEEComCompat_disconnect(self);
+}
+
+static inline int32_t
+smci_qseecomcompat_unload(struct smci_object self)
+{
+	return IQSEEComCompat_unload(self);
+}
+
+#endif /* __SMCI_QSEECOMCOMPAT_H */

+ 83 - 0
qcom/opensource/securemsm-kernel/smcinvoke/smci_qseecomcompatapploader.h

@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __SMCI_QSEECOMCOMPATAPPLOADER_H
+#define __SMCI_QSEECOMCOMPATAPPLOADER_H
+
+#include "smci_object.h"
+#include "IQSEEComCompatAppLoader.h"
+
+#define SMCI_QSEECOMCOMPATAPPLOADER_MAX_FILENAME_LEN UINT32_C(64)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ELFCLASS32 UINT32_C(1)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ELFCLASS64 UINT32_C(2)
+
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_INVALID_BUFFER INT32_C(10)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_METADATA_INVALID INT32_C(13)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_MAX_NUM_APPS INT32_C(14)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_NO_NAME_IN_METADATA INT32_C(15)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_ALREADY_LOADED INT32_C(16)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_BLACKLISTED INT32_C(22)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_NOT_LOADED INT32_C(23)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(24)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_FILENAME_TOO_LONG INT32_C(25)
+#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_ARCH_NOT_SUPPORTED INT32_C(26)
+
+#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOADFROMREGION 0
+#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOADFROMBUFFER 1
+#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOOKUPTA 2
+
+
+static inline int32_t
+smci_qseecomcompatapploader_release(struct smci_object self)
+{
+	return IQSEEComCompatAppLoader_release(self);
+}
+
+static inline int32_t
+smci_qseecomcompatapploader_retain(struct smci_object self)
+{
+	return IQSEEComCompatAppLoader_retain(self);
+}
+
+static inline int32_t
+smci_qseecomcompatapploader_loadfromregion(struct smci_object self,
+			struct smci_object app_elf_val, const void *filename_ptr,
+			size_t filename_len, struct smci_object *app_compat_ptr)
+{
+	return IQSEEComCompatAppLoader_loadFromRegion(self,
+			app_elf_val, filename_ptr,
+			filename_len, app_compat_ptr);
+}
+
+static inline int32_t
+smci_qseecomcompatapploader_loadfrombuffer(struct smci_object self,
+			const void *app_elf_ptr, size_t app_elf_len,
+			const void *filename_ptr, size_t filename_len,
+			void *dist_name_ptr, size_t dist_name_len,
+			size_t *dist_name_lenout, struct smci_object *app_compat_ptr)
+{
+	return IQSEEComCompatAppLoader_loadFromBuffer(self,
+			app_elf_ptr, app_elf_len,
+			filename_ptr, filename_len,
+			dist_name_ptr, dist_name_len,
+			dist_name_lenout, app_compat_ptr);
+}
+
+static inline int32_t
+smci_qseecomcompatapploader_lookupta(struct smci_object self, const void *app_name_ptr,
+			size_t app_name_len, struct smci_object *app_compat_ptr)
+{
+	return IQSEEComCompatAppLoader_lookupTA(self, app_name_ptr,
+			app_name_len, app_compat_ptr);
+}
+
+#endif /* __SMCI_QSEECOMCOMPATAPPLOADER_H */

+ 3296 - 0
qcom/opensource/securemsm-kernel/smcinvoke/smcinvoke.c

@@ -0,0 +1,3296 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/hashtable.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/delay.h>
+#include <linux/kref.h>
+#include <linux/signal.h>
+#include <linux/msm_ion.h>
+#include <linux/mem-buf.h>
+#include <linux/of_platform.h>
+#include <linux/firmware.h>
+#include <linux/version.h>
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+#include <linux/firmware/qcom/qcom_scm.h>
+#else
+#include <linux/qcom_scm.h>
+#endif
+#include <linux/freezer.h>
+#include <linux/ratelimit.h>
+#include <asm/cacheflush.h>
+#include <linux/qtee_shmbridge.h>
+#include <linux/kthread.h>
+#include "smcinvoke.h"
+#include "smcinvoke_object.h"
+#include "IClientEnv.h"
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+#include <linux/qseecom_kernel.h>
+#include "misc/qseecom_priv.h"
+#else
+#include "misc/qseecom_kernel.h"
+#endif
+#include "misc/qseecomi.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace_smcinvoke.h"
+
+#define SMCINVOKE_DEV				"smcinvoke"
+#define SMCINVOKE_TZ_ROOT_OBJ			1
+#define SMCINVOKE_TZ_OBJ_NULL			0
+#define SMCINVOKE_ARGS_ALIGN_SIZE		(sizeof(uint64_t))
+#define SMCINVOKE_NEXT_AVAILABLE_TXN		0
+#define SMCINVOKE_REQ_PLACED			1
+#define SMCINVOKE_REQ_PROCESSING		2
+#define SMCINVOKE_REQ_PROCESSED			3
+#define SMCINVOKE_INCREMENT			1
+#define SMCINVOKE_DECREMENT			0
+#define SMCINVOKE_OBJ_TYPE_TZ_OBJ		0
+#define SMCINVOKE_OBJ_TYPE_SERVER		1
+#define SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL	2
+#define SMCINVOKE_MEM_MAP_OBJ			0
+#define SMCINVOKE_MEM_RGN_OBJ			1
+#define SMCINVOKE_MEM_PERM_RW			6
+#define SMCINVOKE_SCM_EBUSY_WAIT_MS		30
+#define SMCINVOKE_SCM_EBUSY_MAX_RETRY		200
+#define TZCB_ERR_RATELIMIT_INTERVAL		(1*HZ)
+#define TZCB_ERR_RATELIMIT_BURST		1
+
+//print tzcb err per sec
+#define tzcb_err_ratelimited(fmt, ...) 	do {		\
+	static DEFINE_RATELIMIT_STATE(_rs, 		\
+			TZCB_ERR_RATELIMIT_INTERVAL,	\
+			TZCB_ERR_RATELIMIT_BURST);	\
+	if (__ratelimit(&_rs))				\
+		pr_err(fmt, ##__VA_ARGS__);		\
+} while(0)
+
+
+
+/* TZ defined values - Start */
+#define SMCINVOKE_INVOKE_PARAM_ID		0x224
+#define SMCINVOKE_CB_RSP_PARAM_ID		0x22
+#define SMCINVOKE_INVOKE_CMD_LEGACY		0x32000600
+#define SMCINVOKE_INVOKE_CMD			0x32000602
+#define SMCINVOKE_CB_RSP_CMD			0x32000601
+#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED	3
+/* TZ defined values - End */
+
+/* Asynchronous protocol values */
+/* Driver async version is set to match the minimal TZ version that supports async memory object */
+#define SMCINVOKE_ASYNC_VERSION          (0x00010002)
+#define SMCINVOKE_ASYNC_OP_MEMORY_OBJECT (0x00000003)
+
+/*
+ * This is the state when server FD has been closed but
+ * TZ still has refs of CBOBjs served by this server
+ */
+#define SMCINVOKE_SERVER_STATE_DEFUNCT		1
+
+#define CBOBJ_MAX_RETRIES 50
+#define FOR_ARGS(ndxvar, counts, section) \
+	for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
+		ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
+		+ OBJECT_COUNTS_NUM_##section(counts)); \
+		++ndxvar)
+
+#define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \
+			sizeof(struct smcinvoke_msg_hdr) + \
+			sizeof(union smcinvoke_tz_args) * \
+			OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts))
+
+/*
+ * +ve uhandle : either remote obj or mem obj, decided by f_ops
+ * -ve uhandle : either Obj NULL or CBObj
+ *	- -1: OBJ NULL
+ *	- < -1: CBObj
+ */
+#define UHANDLE_IS_FD(h) ((h) >= 0)
+#define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL)
+#define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL)
+#define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL)
+/*
+ * MAKE => create handle for other domain i.e. TZ or userspace
+ * GET => retrieve obj from incoming handle
+ */
+#define UHANDLE_GET_CB_OBJ(h) (-2-(h))
+#define UHANDLE_MAKE_CB_OBJ(o) (-2-(o))
+#define UHANDLE_GET_FD(h) (h)
+
+/*
+ * +ve tzhandle : remote object i.e. owned by TZ
+ * -ve tzhandle : local object i.e. owned by linux
+ * --------------------------------------------------
+ *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) |
+ * ---------------------------------------------------
+ * Server ids are defined below for various local objects
+ * server id 0 : Kernel Obj
+ * server id 1 : Memory region Obj
+ * server id 2 : Memory map Obj
+ * server id 3-15: Reserverd
+ * server id 16 & up: Callback Objs
+ */
+#define KRNL_SRVR_ID 0
+#define MEM_RGN_SRVR_ID 1
+#define MEM_MAP_SRVR_ID 2
+#define CBOBJ_SERVER_ID_START 0x10
+#define CBOBJ_SERVER_ID_END ((1<<16) - 1)
+/* local obj id is represented by 15 bits */
+#define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
+/* CBOBJs will be served by server id 0x10 onwards */
+#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
+#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
+#define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s)
+#define SET_BIT(s,b) (s | (1 << b))
+#define UNSET_BIT(s,b) (s & (~ (1 << b)))
+
+#define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL)
+#define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000)
+#define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h))
+
+#define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
+				TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID)
+#define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
+				TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID)
+#define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
+				TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID)
+#define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \
+				TZHANDLE_IS_MEM_MAP_OBJ(h))
+#define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \
+				TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START)
+
+#define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops)
+
+static DEFINE_MUTEX(g_smcinvoke_lock);
+#define NO_LOCK 0
+#define TAKE_LOCK 1
+#define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); }
+#define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); }
+
+#define POST_KT_SLEEP           0
+#define POST_KT_WAKEUP          1
+#define MAX_CHAR_NAME           50
+
+enum worker_thread_type {
+	SHMB_WORKER_THREAD      = 0,
+	OBJECT_WORKER_THREAD,
+	ADCI_WORKER_THREAD,
+	MAX_THREAD_NUMBER
+};
+
+static DEFINE_HASHTABLE(g_cb_servers, 8);
+static LIST_HEAD(g_mem_objs);
+static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START;
+static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id;
+static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
+static unsigned int cb_reqs_inflight;
+static bool legacy_smc_call;
+static int invoke_cmd;
+
+static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long);
+static int smcinvoke_open(struct inode *, struct file *);
+static int smcinvoke_release(struct inode *, struct file *);
+static int release_cb_server(uint16_t);
+
+static const struct file_operations g_smcinvoke_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= smcinvoke_ioctl,
+	.compat_ioctl	= smcinvoke_ioctl,
+	.open		= smcinvoke_open,
+	.release	= smcinvoke_release,
+};
+
+static dev_t smcinvoke_device_no;
+static struct cdev smcinvoke_cdev;
+static struct class *driver_class;
+struct device *class_dev;
+static struct platform_device *smcinvoke_pdev;
+
+/* We disable async memory object support by default,
+ * until we receive the first message from TZ over the
+ * async channel and can determine TZ async version.
+ */
+static bool mem_obj_async_support = false;
+static uint32_t tz_async_version = 0x0;
+
+struct smcinvoke_buf_hdr {
+	uint32_t offset;
+	uint32_t size;
+};
+
+union smcinvoke_tz_args {
+	struct smcinvoke_buf_hdr b;
+	int32_t handle;
+};
+
+struct smcinvoke_msg_hdr {
+	uint32_t tzhandle;
+	uint32_t op;
+	uint32_t counts;
+};
+
+/* Inbound reqs from TZ */
+struct smcinvoke_tzcb_req {
+	int32_t result;
+	struct smcinvoke_msg_hdr hdr;
+	union smcinvoke_tz_args args[];
+};
+
+struct smcinvoke_file_data {
+	uint32_t context_type;
+	union {
+		uint32_t tzhandle;
+		uint16_t server_id;
+	};
+};
+
+struct smcinvoke_piggyback_msg {
+	uint32_t version;
+	uint32_t op;
+	uint32_t counts;
+	int32_t objs[];
+};
+
+/* Mapped memory object data
+ *
+ * memObjRef		Handle reference for the memory object
+ * mapObjRef		Handle reference for the map object
+ * addr				Mapped memory address
+ * size				Size of mapped memory
+ * perm				Access rights for the memory
+ */
+struct smcinvoke_mem_obj_info {
+	uint32_t memObjRef;
+	uint32_t mapObjRef;
+	uint64_t addr;
+	uint64_t size;
+	uint32_t perm;
+};
+
+/* Memory object info to be written into the async buffer
+ *
+ * version		Async protocol version
+ * op			Async protocol operation
+ * count		Number of memory objects passed
+ * mo			Mapped memory object data
+ */
+struct smcinvoke_mem_obj_msg {
+	uint32_t version;
+	uint32_t op;
+	uint32_t count;
+	struct smcinvoke_mem_obj_info mo[];
+};
+
+struct smcinvoke_mem_obj_pending_async {
+	struct smcinvoke_mem_obj *mem_obj;
+	struct list_head list;
+};
+
+/* Data structure to hold request coming from TZ */
+struct smcinvoke_cb_txn {
+	uint32_t txn_id;
+	int32_t state;
+	struct smcinvoke_tzcb_req *cb_req;
+	size_t cb_req_bytes;
+	struct file **filp_to_release;
+	struct hlist_node hash;
+	struct kref ref_cnt;
+};
+
+struct smcinvoke_server_info {
+	uint16_t server_id;
+	uint16_t state;
+	uint32_t txn_id;
+	struct kref ref_cnt;
+	wait_queue_head_t req_wait_q;
+	wait_queue_head_t rsp_wait_q;
+	size_t cb_buf_size;
+	DECLARE_HASHTABLE(reqs_table, 4);
+	DECLARE_HASHTABLE(responses_table, 4);
+	struct hlist_node hash;
+	struct list_head pending_cbobjs;
+	uint8_t is_server_suspended;
+};
+
+struct smcinvoke_cbobj {
+	uint16_t cbobj_id;
+	struct kref ref_cnt;
+	struct smcinvoke_server_info *server;
+	struct list_head list;
+};
+
+/*
+ * We require couple of objects, one for mem region & another
+ * for mapped mem_obj once mem region has been mapped. It is
+ * possible that TZ can release either independent of other.
+ */
+struct smcinvoke_mem_obj {
+	/* these ids are objid part of tzhandle */
+	uint16_t mem_region_id;
+	uint16_t mem_map_obj_id;
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *buf_attach;
+	struct sg_table *sgt;
+	struct kref mem_regn_ref_cnt;
+	struct kref mem_map_obj_ref_cnt;
+	uint64_t p_addr;
+	size_t p_addr_len;
+	struct list_head list;
+	uint64_t shmbridge_handle;
+	struct smcinvoke_server_info *server;
+	int32_t mem_obj_user_fd;
+};
+
+static LIST_HEAD(g_bridge_postprocess);
+DEFINE_MUTEX(bridge_postprocess_lock);
+
+static LIST_HEAD(g_object_postprocess);
+DEFINE_MUTEX(object_postprocess_lock);
+
+struct bridge_deregister {
+	uint64_t shmbridge_handle;
+	struct dma_buf *dmabuf_to_free;
+};
+
+struct object_release {
+	uint32_t tzhandle;
+	uint32_t context_type;
+};
+
+
+struct smcinvoke_shmbridge_deregister_pending_list {
+	struct list_head list;
+	struct bridge_deregister data;
+};
+
+struct smcinvoke_object_release_pending_list {
+	struct list_head list;
+	struct object_release data;
+};
+
+struct smcinvoke_worker_thread {
+	enum worker_thread_type type;
+	atomic_t postprocess_kthread_state;
+	wait_queue_head_t postprocess_kthread_wq;
+	struct task_struct *postprocess_kthread_task;
+};
+
+static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER];
+static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = {
+	"smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess", "smcinvoke_adci_thread"};
+static struct Object adci_rootEnv = Object_NULL;
+extern int get_root_obj(struct Object *rootObj);
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
+		size_t in_buf_len,
+		uint8_t *out_buf, phys_addr_t out_paddr,
+		size_t out_buf_len,
+		struct smcinvoke_cmd_req *req,
+		union smcinvoke_arg *args_buf,
+		bool *tz_acked, uint32_t context_type,
+		struct qtee_shm *in_shm, struct qtee_shm *out_shm, bool retry);
+
+static void process_piggyback_data(void *buf, size_t buf_size);
+static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj);
+static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj);
+
+static void destroy_cb_server(struct kref *kref)
+{
+	struct smcinvoke_server_info *server = container_of(kref,
+			struct smcinvoke_server_info, ref_cnt);
+	if (server) {
+		hash_del(&server->hash);
+		kfree(server);
+	}
+}
+
+/*
+ *  A separate find func is reqd mainly for couple of cases:
+ *  next_cb_server_id_locked which checks if server id had been utilized or not.
+ *      - It would be overhead if we do ref_cnt for this case
+ *  smcinvoke_release: which is called when server is closed from userspace.
+ *      - During server creation we init ref count, now put it back
+ */
+static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
+{
+	struct smcinvoke_server_info *data = NULL;
+
+	hash_for_each_possible(g_cb_servers, data, hash, server_id) {
+		if (data->server_id == server_id)
+			return data;
+	}
+	return NULL;
+}
+
+static struct smcinvoke_server_info *get_cb_server_locked(uint16_t server_id)
+{
+	struct smcinvoke_server_info *server = find_cb_server_locked(server_id);
+
+	if (server)
+		kref_get(&server->ref_cnt);
+
+	return server;
+}
+
+static uint16_t next_cb_server_id_locked(void)
+{
+	if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
+		g_last_cb_server_id = CBOBJ_SERVER_ID_START;
+
+	while (find_cb_server_locked(++g_last_cb_server_id))
+		;
+
+	return g_last_cb_server_id;
+}
+
+static inline void release_filp(struct file **filp_to_release, size_t arr_len)
+{
+	size_t i = 0;
+
+	for (i = 0; i < arr_len; i++) {
+		if (filp_to_release[i]) {
+			fput(filp_to_release[i]);
+			filp_to_release[i] = NULL;
+		}
+	}
+}
+
+static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
+							bool is_mem_rgn_obj)
+{
+	struct smcinvoke_mem_obj *mem_obj = NULL;
+
+	if (list_empty(&g_mem_objs))
+		return NULL;
+
+	list_for_each_entry(mem_obj, &g_mem_objs, list) {
+		if ((is_mem_rgn_obj &&
+				(mem_obj->mem_region_id == mem_obj_id)) ||
+				(!is_mem_rgn_obj &&
+				(mem_obj->mem_map_obj_id == mem_obj_id)))
+				return mem_obj;
+	}
+	return NULL;
+}
+
+static uint32_t next_mem_region_obj_id_locked(void)
+{
+	if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
+		g_last_mem_rgn_id = 0;
+
+	while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
+		;
+
+	return g_last_mem_rgn_id;
+}
+
+static uint32_t next_mem_map_obj_id_locked(void)
+{
+	if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
+		g_last_mem_map_obj_id = 0;
+
+	while (find_mem_obj_locked(++g_last_mem_map_obj_id,
+			SMCINVOKE_MEM_MAP_OBJ))
+		;
+
+	return g_last_mem_map_obj_id;
+}
+
+static void smcinvoke_shmbridge_post_process(void)
+{
+	struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
+	uint64_t handle = 0;
+	struct dma_buf *dmabuf_to_free = NULL;
+
+	do {
+		mutex_lock(&bridge_postprocess_lock);
+		if (list_empty(&g_bridge_postprocess)) {
+			mutex_unlock(&bridge_postprocess_lock);
+			break;
+		}
+		pos = g_bridge_postprocess.next;
+		entry = list_entry(pos,
+				struct smcinvoke_shmbridge_deregister_pending_list,
+				list);
+		if (entry) {
+			handle = entry->data.shmbridge_handle;
+			dmabuf_to_free = entry->data.dmabuf_to_free;
+		} else {
+			pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
+		}
+		list_del(pos);
+		kfree_sensitive(entry);
+		mutex_unlock(&bridge_postprocess_lock);
+
+		if (entry) {
+			do {
+				ret = qtee_shmbridge_deregister(handle);
+				if (unlikely(ret)) {
+					pr_err_ratelimited("SHM failed: ret:%d ptr:0x%p h:%#llx\n",
+							ret,
+							dmabuf_to_free,
+							handle);
+				} else {
+					pr_debug("SHM deletion: Handle:%#llx\n",
+							handle);
+					dma_buf_put(dmabuf_to_free);
+				}
+			} while (-EBUSY == ret);
+		}
+	} while (1);
+}
+
+static int smcinvoke_release_tz_object(struct qtee_shm *in_shm, struct qtee_shm *out_shm,
+		uint32_t tzhandle, uint32_t context_type)
+{
+	int ret = 0;
+	bool release_handles;
+	uint8_t *in_buf = NULL;
+        uint8_t *out_buf = NULL;
+	struct smcinvoke_msg_hdr hdr = {0};
+	struct smcinvoke_cmd_req req = {0};
+
+	in_buf = in_shm->vaddr;
+	out_buf = out_shm->vaddr;
+	hdr.tzhandle = tzhandle;
+	hdr.op = OBJECT_OP_RELEASE;
+	hdr.counts = 0;
+	*(struct smcinvoke_msg_hdr *)in_buf = hdr;
+
+	ret = prepare_send_scm_msg(in_buf, in_shm->paddr,
+			SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm->paddr,
+			SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL,
+			&release_handles, context_type, in_shm, out_shm, false);
+	process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE);
+	if (ret) {
+		pr_err_ratelimited("Failed to release object(0x%x), ret:%d\n",
+				hdr.tzhandle, ret);
+	} else {
+		pr_debug("Released object(0x%x) successfully.\n",
+				hdr.tzhandle);
+	}
+
+	return ret;
+}
+
+
+static int smcinvoke_object_post_process(void)
+{
+	struct smcinvoke_object_release_pending_list *entry = NULL;
+	struct list_head *pos;
+	int ret = 0;
+	struct qtee_shm in_shm = {0}, out_shm = {0};
+
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
+	if (ret) {
+		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for in msg in object release\n");
+		goto out;
+	}
+
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
+	if (ret) {
+		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for out msg in object release\n");
+		goto out;
+	}
+
+	do {
+		mutex_lock(&object_postprocess_lock);
+		if (list_empty(&g_object_postprocess)) {
+			mutex_unlock(&object_postprocess_lock);
+			break;
+		}
+		pos = g_object_postprocess.next;
+		entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list);
+
+		list_del(pos);
+		mutex_unlock(&object_postprocess_lock);
+
+		if (entry) {
+			do {
+				ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+					       	entry->data.tzhandle,  entry->data.context_type);
+			} while (-EBUSY == ret);
+		} else {
+			pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos);
+		}
+		kfree_sensitive(entry);
+	} while (1);
+
+out:
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
+
+	return ret;
+}
+
+static void smcinvoke_start_adci_thread(void)
+{
+
+	int32_t  ret = OBJECT_ERROR;
+	int retry_count = 0;
+
+	ret = get_root_obj(&adci_rootEnv);
+	if (ret) {
+		pr_err("failed to get rootEnv for ADCI invoke thread. ret = %d\n", ret);
+		/* Marking it Object_NULL in case of failure scenario in order to avoid
+		 * undefined behavior while relasing garbage adci_rootEnv object. */
+		adci_rootEnv = Object_NULL;
+		goto out;
+	}
+	/* Invoke call to QTEE which should never return if ADCI is supported */
+	pr_debug("Invoking adciAccept method in QTEE\n");
+	do {
+		ret = IClientEnv_adciAccept(adci_rootEnv);
+		if (ret == OBJECT_ERROR_BUSY) {
+			pr_err_ratelimited("Secure side is busy,will retry after 5 ms, retry_count = %d\n",retry_count);
+			msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
+		}
+	} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
+
+	if (ret == OBJECT_ERROR_INVALID)
+		pr_err("ADCI feature is not supported on this chipsets, ret = %d\n", ret);
+	else
+		pr_debug("Received response from QTEE, ret = %d\n", ret);
+out:
+	/* Control should reach to this point only if ADCI feature is not supported by QTEE
+	  (or) ADCI thread held in QTEE is released. */
+	Object_ASSIGN_NULL(adci_rootEnv);
+}
+
+static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke)
+{
+	if (smcinvoke) {
+		atomic_set(&smcinvoke->postprocess_kthread_state,
+				POST_KT_WAKEUP);
+		wake_up_interruptible(&smcinvoke->postprocess_kthread_wq);
+	} else {
+		pr_err("Invalid smcinvoke pointer.\n");
+	}
+}
+
+
+static int smcinvoke_postprocess_kthread_func(void *data)
+{
+	struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data;
+	static const char *const tag[] = {"shmbridge","object","adci","invalid"};
+
+	if (!smcinvoke_wrk_trd) {
+		pr_err("Bad input.\n");
+		return -EINVAL;
+	}
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(
+			smcinvoke_wrk_trd->postprocess_kthread_wq,
+			kthread_should_stop() ||
+			(atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state)
+			== POST_KT_WAKEUP));
+		switch (smcinvoke_wrk_trd->type) {
+		case SHMB_WORKER_THREAD:
+			pr_debug("kthread to %s postprocess is called %d\n",
+			tag[SHMB_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
+			smcinvoke_shmbridge_post_process();
+			break;
+		case OBJECT_WORKER_THREAD:
+			pr_debug("kthread to %s postprocess is called %d\n",
+			tag[OBJECT_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
+			smcinvoke_object_post_process();
+			break;
+		case ADCI_WORKER_THREAD:
+			pr_debug("kthread to %s postprocess is called %d\n",
+			tag[ADCI_WORKER_THREAD], atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state));
+			smcinvoke_start_adci_thread();
+			break;
+		default:
+			pr_err("Invalid thread type(%d), do nothing.\n",
+					(int)smcinvoke_wrk_trd->type);
+			break;
+		}
+		/* For ADCI thread, if control reaches here, that indicates either ADCI
+		 * thread is not supported (or) released by QTEE. Since ADCI thread is
+		 * getting signaled only during the smcinvoke driver initialization,
+		 * there is no point of putting the thread into sleep state again. All the
+		 * required post-processing will be taken care by object and shmbridge threads.
+		 */
+		if(smcinvoke_wrk_trd->type == ADCI_WORKER_THREAD) {
+			break;
+		  }
+		atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state,
+			POST_KT_SLEEP);
+	}
+	pr_warn("kthread(worker_thread) processed, worker_thread type is %d \n", smcinvoke_wrk_trd->type);
+
+	return 0;
+}
+
+
+static int smcinvoke_create_kthreads(void)
+{
+	int i, rc = 0;
+	const enum worker_thread_type thread_type[MAX_THREAD_NUMBER] = {
+		SHMB_WORKER_THREAD, OBJECT_WORKER_THREAD, ADCI_WORKER_THREAD};
+
+	for (i = 0; i < MAX_THREAD_NUMBER; i++) {
+		init_waitqueue_head(&smcinvoke[i].postprocess_kthread_wq);
+		smcinvoke[i].type = thread_type[i];
+		smcinvoke[i].postprocess_kthread_task = kthread_run(
+				smcinvoke_postprocess_kthread_func,
+				&smcinvoke[i], thread_name[i]);
+		if (IS_ERR(smcinvoke[i].postprocess_kthread_task)) {
+			rc = PTR_ERR(smcinvoke[i].postprocess_kthread_task);
+			pr_err("fail to create kthread to postprocess, rc = %x\n",
+					rc);
+			return rc;
+		}
+		atomic_set(&smcinvoke[i].postprocess_kthread_state,
+				POST_KT_SLEEP);
+	}
+
+	return rc;
+}
+
+static void smcinvoke_destroy_kthreads(void)
+{
+	int i;
+	int32_t  ret = OBJECT_ERROR;
+	int retry_count = 0;
+
+	if (!Object_isNull(adci_rootEnv)) {
+		pr_debug("Invoking adciShutdown method in QTEE\n");
+		do {
+			ret = IClientEnv_adciShutdown(adci_rootEnv);
+			if (ret == OBJECT_ERROR_BUSY) {
+				pr_err_ratelimited("Secure side is busy,will retry after 5 ms, retry_count = %d\n",retry_count);
+				msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
+			}
+		} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
+		if (OBJECT_isERROR(ret)) {
+			pr_err("adciShutdown in QTEE failed with error = %d\n", ret);
+		}
+		Object_ASSIGN_NULL(adci_rootEnv);
+	}
+
+	for (i = 0; i < MAX_THREAD_NUMBER; i++) {
+		kthread_stop(smcinvoke[i].postprocess_kthread_task);
+	}
+}
+
+/* Queue newly created memory object to l_pending_mem_obj list.
+ * Later, the mapping information for objects in this list will be sent to TZ
+ * over the async side channel.
+ *
+ * No return value as TZ is always able to explicitly ask for this information
+ * in case this function fails and the memory object is not added to this list.
+ */
+static void queue_mem_obj_pending_async_locked(struct smcinvoke_mem_obj *mem_obj, struct list_head *l_pending_mem_obj)
+{
+	struct smcinvoke_mem_obj_pending_async *t_mem_obj_pending =
+			kzalloc(sizeof(*t_mem_obj_pending), GFP_KERNEL);
+
+	/*
+	 * We are not failing execution in case of a failure here,
+	 * since TZ can always ask for this information explicitly
+	 * if it's not available in the side channel.
+	 */
+	if (!t_mem_obj_pending) {
+		pr_err("Unable to allocate memory\n");
+		return;
+	}
+
+	t_mem_obj_pending->mem_obj = mem_obj;
+	list_add(&t_mem_obj_pending->list, l_pending_mem_obj);
+}
+
+static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj)
+{
+	int ret = 0;
+	struct dma_buf *dmabuf_to_free = mem_obj->dma_buf;
+	uint64_t shmbridge_handle = mem_obj->shmbridge_handle;
+	struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL;
+
+	list_del(&mem_obj->list);
+	kfree(mem_obj->server);
+	kfree(mem_obj);
+	mem_obj = NULL;
+	mutex_unlock(&g_smcinvoke_lock);
+
+	if (shmbridge_handle)
+		ret = qtee_shmbridge_deregister(shmbridge_handle);
+	if (ret) {
+		pr_err("Error:%d delete bridge failed leaking memory 0x%p\n",
+				ret, dmabuf_to_free);
+		if (ret == -EBUSY) {
+			pr_err("EBUSY: we postpone it 0x%p\n",
+					dmabuf_to_free);
+			entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+			if (entry) {
+				entry->data.shmbridge_handle = shmbridge_handle;
+				entry->data.dmabuf_to_free = dmabuf_to_free;
+				mutex_lock(&bridge_postprocess_lock);
+				list_add_tail(&entry->list, &g_bridge_postprocess);
+				mutex_unlock(&bridge_postprocess_lock);
+				pr_debug("SHMBridge list: added a Handle:%#llx\n",
+						shmbridge_handle);
+				__wakeup_postprocess_kthread(
+						&smcinvoke[SHMB_WORKER_THREAD]);
+			}
+		}
+	} else {
+		dma_buf_put(dmabuf_to_free);
+	}
+
+	mutex_lock(&g_smcinvoke_lock);
+}
+
+static void del_mem_regn_obj_locked(struct kref *kref)
+{
+	struct smcinvoke_mem_obj *mem_obj = container_of(kref,
+			struct smcinvoke_mem_obj, mem_regn_ref_cnt);
+
+	/*
+	 * mem_regn obj and mem_map obj are held into mem_obj structure which
+	 * can't be released until both kinds of objs have been released.
+	 * So check whether mem_map iobj has ref 0 and only then release mem_obj
+	 */
+	if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0)
+		free_mem_obj_locked(mem_obj);
+}
+
+static void del_mem_map_obj_locked(struct kref *kref)
+{
+	struct smcinvoke_mem_obj *mem_obj = container_of(kref,
+			struct smcinvoke_mem_obj, mem_map_obj_ref_cnt);
+
+	mem_obj->p_addr_len = 0;
+	mem_obj->p_addr = 0;
+	if (mem_obj->sgt)
+		dma_buf_unmap_attachment(mem_obj->buf_attach,
+				mem_obj->sgt, DMA_BIDIRECTIONAL);
+	if (mem_obj->buf_attach)
+		dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach);
+
+	/*
+	 * mem_regn obj and mem_map obj are held into mem_obj structure which
+	 * can't be released until both kinds of objs have been released.
+	 * So check if mem_regn obj has ref 0 and only then release mem_obj
+	 */
+	if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0)
+		free_mem_obj_locked(mem_obj);
+}
+
+static int release_mem_obj_locked(int32_t tzhandle)
+{
+	int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle);
+	struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked(
+			TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj);
+
+	if (!mem_obj) {
+		pr_err("memory object not found\n");
+		return OBJECT_ERROR_BADOBJ;
+	}
+
+	if (is_mem_regn_obj)
+		kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked);
+	else
+		kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
+	return OBJECT_OK;
+}
+
+static void free_pending_cbobj_locked(struct kref *kref)
+{
+	struct smcinvoke_server_info *server = NULL;
+	struct smcinvoke_cbobj *obj = container_of(kref,
+			struct smcinvoke_cbobj, ref_cnt);
+	list_del(&obj->list);
+	server = obj->server;
+	kfree(obj);
+	if (server)
+		kref_put(&server->ref_cnt, destroy_cb_server);
+}
+
+static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
+{
+	int ret = 0;
+	bool release_server = true;
+	struct list_head *head = NULL;
+	struct smcinvoke_cbobj *cbobj = NULL;
+	struct smcinvoke_cbobj *obj = NULL;
+	struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id);
+
+	if (!server) {
+		pr_err("%s, server id : %u not found\n", __func__, srvr_id);
+		return OBJECT_ERROR_BADOBJ;
+	}
+
+	head = &server->pending_cbobjs;
+	list_for_each_entry(cbobj, head, list)
+		if (cbobj->cbobj_id == obj_id) {
+			kref_get(&cbobj->ref_cnt);
+			goto out;
+		}
+
+	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+	if (!obj) {
+		ret = OBJECT_ERROR_KMEM;
+		goto out;
+	}
+
+	obj->cbobj_id = obj_id;
+	kref_init(&obj->ref_cnt);
+	obj->server = server;
+	/*
+	 * we are holding server ref in cbobj; we will
+	 * release server ref when cbobj is destroyed
+	 */
+	release_server = false;
+	list_add_tail(&obj->list, head);
+out:
+	if (release_server)
+		kref_put(&server->ref_cnt, destroy_cb_server);
+	return ret;
+}
+
+static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
+{
+	int ret = -EINVAL;
+	struct smcinvoke_server_info *srvr_info =
+			get_cb_server_locked(srvr_id);
+	struct list_head *head = NULL;
+	struct smcinvoke_cbobj *cbobj = NULL;
+
+	if (!srvr_info) {
+		pr_err("%s, server id : %u not found\n", __func__, srvr_id);
+		return ret;
+	}
+
+	trace_put_pending_cbobj_locked(srvr_id, obj_id);
+
+	head = &srvr_info->pending_cbobjs;
+	list_for_each_entry(cbobj, head, list)
+		if (cbobj->cbobj_id == obj_id) {
+			kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked);
+			ret = 0;
+			break;
+		}
+	kref_put(&srvr_info->ref_cnt, destroy_cb_server);
+	return ret;
+}
+
+static int release_tzhandle_locked(int32_t tzhandle)
+{
+	if (TZHANDLE_IS_MEM_OBJ(tzhandle))
+		return release_mem_obj_locked(tzhandle);
+	else if (TZHANDLE_IS_CB_OBJ(tzhandle))
+		return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
+				TZHANDLE_GET_OBJID(tzhandle));
+	return OBJECT_ERROR;
+}
+
+static void release_tzhandles(const int32_t *tzhandles, size_t len)
+{
+	size_t i;
+
+	mutex_lock(&g_smcinvoke_lock);
+	for (i = 0; i < len; i++)
+		release_tzhandle_locked(tzhandles[i]);
+	mutex_unlock(&g_smcinvoke_lock);
+}
+
+static void delete_cb_txn_locked(struct kref *kref)
+{
+	struct smcinvoke_cb_txn *cb_txn = container_of(kref,
+			struct smcinvoke_cb_txn, ref_cnt);
+
+	if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
+		release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
+
+	kfree(cb_txn->cb_req);
+	hash_del(&cb_txn->hash);
+	kfree(cb_txn);
+}
+
+static struct smcinvoke_cb_txn *find_cbtxn_locked(
+		struct smcinvoke_server_info *server,
+		uint32_t txn_id, int32_t state)
+{
+	int i = 0;
+	struct smcinvoke_cb_txn *cb_txn = NULL;
+
+	/*
+	 * Since HASH_BITS() does not work on pointers, we can't select hash
+	 * table using state and loop over it.
+	 */
+	if (state == SMCINVOKE_REQ_PLACED) {
+		/* pick up 1st req */
+		hash_for_each(server->reqs_table, i, cb_txn, hash) {
+			kref_get(&cb_txn->ref_cnt);
+			hash_del(&cb_txn->hash);
+			return cb_txn;
+		}
+	} else if (state == SMCINVOKE_REQ_PROCESSING) {
+		hash_for_each_possible(
+				server->responses_table, cb_txn, hash, txn_id) {
+			if (cb_txn->txn_id == txn_id) {
+				kref_get(&cb_txn->ref_cnt);
+				hash_del(&cb_txn->hash);
+				return cb_txn;
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+ * size_add_ saturates at SIZE_MAX. If integer overflow is detected,
+ * this function would return SIZE_MAX otherwise normal a+b is returned.
+ */
+static inline size_t size_add_(size_t a, size_t b)
+{
+	return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b;
+}
+/*
+ * pad_size is used along with size_align to define a buffer overflow
+ * protected version of ALIGN
+ */
+static inline size_t pad_size(size_t a, size_t b)
+{
+	return (~a + 1) % b;
+}
+
+/*
+ * size_align saturates at SIZE_MAX. If integer overflow is detected, this
+ * function would return SIZE_MAX otherwise next aligned size is returned.
+ */
+static inline size_t size_align(size_t a, size_t b)
+{
+	return size_add_(a, pad_size(a, b));
+}
+
+static uint16_t get_server_id(int cb_server_fd)
+{
+	uint16_t server_id = 0;
+	struct smcinvoke_file_data *svr_cxt = NULL;
+	struct file *tmp_filp = fget(cb_server_fd);
+
+	if (!tmp_filp || !FILE_IS_REMOTE_OBJ(tmp_filp))
+		return server_id;
+
+	svr_cxt = tmp_filp->private_data;
+	if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER)
+		server_id = svr_cxt->server_id;
+	fput(tmp_filp);
+
+	return server_id;
+}
+
+static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf)
+{
+	*dma_buf = dma_buf_get(uhandle);
+	return IS_ERR_OR_NULL(*dma_buf) ? false : true;
+}
+
+static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj,
+		struct file **filp)
+{
+	bool ret = false;
+	struct file *tmp_filp = fget(uhandle);
+
+	if (!tmp_filp)
+		return ret;
+
+	if (FILE_IS_REMOTE_OBJ(tmp_filp)) {
+		*tzobj = tmp_filp->private_data;
+		if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
+			*filp = tmp_filp;
+			tmp_filp = NULL;
+			ret = true;
+		}
+	}
+
+	if (tmp_filp)
+		fput(tmp_filp);
+	return ret;
+}
+
+static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj)
+{
+  int ret = 0;
+  int tz_perm = PERM_READ|PERM_WRITE;
+  uint32_t *vmid_list;
+  uint32_t *perms_list;
+  uint32_t nelems = 0;
+  struct dma_buf *dmabuf = mem_obj->dma_buf;
+  phys_addr_t phys = mem_obj->p_addr;
+  size_t size = mem_obj->p_addr_len;
+
+  if (!qtee_shmbridge_is_enabled())
+    return 0;
+
+  ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
+      (int **)&perms_list, (int *)&nelems);
+  if (ret) {
+    pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
+    return ret;
+  }
+
+  if (mem_buf_dma_buf_exclusive_owner(dmabuf))
+    perms_list[0] = PERM_READ | PERM_WRITE;
+
+  ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
+      tz_perm, &mem_obj->shmbridge_handle);
+
+  if (ret) {
+	  pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n",
+			  mem_obj->mem_region_id, ret);
+	  goto exit;
+  }
+
+  trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id);
+exit:
+  kfree(perms_list);
+  kfree(vmid_list);
+  return ret;
+}
+
+/* Map memory region for a given memory object.
+ * Mapping information will be saved as part of the memory object structure.
+ */
+static int32_t smcinvoke_map_mem_region_locked(struct smcinvoke_mem_obj* mem_obj)
+{
+	int ret = OBJECT_OK;
+	struct dma_buf_attachment *buf_attach = NULL;
+	struct sg_table *sgt = NULL;
+
+	if (!mem_obj) {
+		pr_err("Invalid memory object\n");
+		return OBJECT_ERROR_BADOBJ;
+	}
+
+	if (!mem_obj->p_addr) {
+		kref_init(&mem_obj->mem_map_obj_ref_cnt);
+		buf_attach = dma_buf_attach(mem_obj->dma_buf,
+				&smcinvoke_pdev->dev);
+		if (IS_ERR(buf_attach)) {
+			ret = OBJECT_ERROR_KMEM;
+			pr_err("dma buf attach failed, ret: %d\n", ret);
+			goto out;
+		}
+		mem_obj->buf_attach = buf_attach;
+
+		sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR(sgt)) {
+			pr_err("mapping dma buffers failed, ret: %ld\n",
+					PTR_ERR(sgt));
+			ret = OBJECT_ERROR_KMEM;
+			goto out;
+		}
+		mem_obj->sgt = sgt;
+
+		/* contiguous only => nents=1 */
+		if (sgt->nents != 1) {
+			ret = OBJECT_ERROR_INVALID;
+			pr_err("sg enries are not contigous, ret: %d\n", ret);
+			goto out;
+		}
+		mem_obj->p_addr = sg_dma_address(sgt->sgl);
+		mem_obj->p_addr_len = sgt->sgl->length;
+		if (!mem_obj->p_addr) {
+			ret = OBJECT_ERROR_INVALID;
+			pr_err("invalid physical address, ret: %d\n", ret);
+			goto out;
+		}
+
+		/* Increase reference count as we are feeding the memobj to
+		 * smcinvoke and unlock the mutex. No need to hold the mutex in
+		 * case of shmbridge creation.
+		 */
+		kref_get(&mem_obj->mem_map_obj_ref_cnt);
+		mutex_unlock(&g_smcinvoke_lock);
+
+		ret = smcinvoke_create_bridge(mem_obj);
+
+		/* Take lock again and decrease the reference count which we
+		 * increased for shmbridge but before proceeding further we
+		 * have to check again if the memobj is still valid or not
+		 * after decreasing the reference.
+		 */
+		mutex_lock(&g_smcinvoke_lock);
+		kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
+
+		if (ret) {
+			ret = OBJECT_ERROR_INVALID;
+			pr_err("Unable to create shm bridge, ret: %d\n", ret);
+			goto out;
+		}
+
+		if (!find_mem_obj_locked(mem_obj->mem_region_id,
+				SMCINVOKE_MEM_RGN_OBJ)) {
+			pr_err("Memory object not found\n");
+			return OBJECT_ERROR_BADOBJ;
+		}
+
+		mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked();
+	}
+
+out:
+	if (ret != OBJECT_OK)
+		kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
+	return ret;
+}
+
+static int create_mem_obj(struct dma_buf *dma_buf, int32_t *tzhandle,
+			struct smcinvoke_mem_obj **mem_obj, int32_t server_id, int32_t user_handle)
+{
+	struct smcinvoke_mem_obj *t_mem_obj = NULL;
+	struct smcinvoke_server_info *server_i = NULL;
+
+	t_mem_obj = kzalloc(sizeof(struct smcinvoke_mem_obj), GFP_KERNEL);
+	if (!t_mem_obj) {
+		dma_buf_put(dma_buf);
+		return -ENOMEM;
+	}
+	server_i = kzalloc(sizeof(struct smcinvoke_server_info),GFP_KERNEL);
+	if (!server_i) {
+		kfree(t_mem_obj);
+		dma_buf_put(dma_buf);
+		return -ENOMEM;
+	}
+	kref_init(&t_mem_obj->mem_regn_ref_cnt);
+	t_mem_obj->dma_buf = dma_buf;
+	mutex_lock(&g_smcinvoke_lock);
+	t_mem_obj->mem_region_id = next_mem_region_obj_id_locked();
+	server_i->server_id = server_id;
+	t_mem_obj->server = server_i;
+	t_mem_obj->mem_obj_user_fd = user_handle;
+	list_add_tail(&t_mem_obj->list, &g_mem_objs);
+	mutex_unlock(&g_smcinvoke_lock);
+	*mem_obj = t_mem_obj;
+	*tzhandle = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID,
+			t_mem_obj->mem_region_id);
+	return 0;
+}
+
+/*
+ * This function retrieves file pointer corresponding to FD provided. It stores
+ * retrieved file pointer until IOCTL call is concluded. Once call is completed,
+ * all stored file pointers are released. file pointers are stored to prevent
+ * other threads from releasing that FD while IOCTL is in progress.
+ */
+static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd,
+		struct file **filp, uint32_t *tzhandle, struct list_head *l_pending_mem_obj)
+{
+	int ret = -EBADF;
+	uint16_t server_id = 0;
+	struct smcinvoke_mem_obj *mem_obj = NULL;
+
+	if (UHANDLE_IS_NULL(uhandle)) {
+		*tzhandle = SMCINVOKE_TZ_OBJ_NULL;
+		ret = 0;
+	} else if (UHANDLE_IS_CB_OBJ(uhandle)) {
+		server_id = get_server_id(server_fd);
+		if (server_id < CBOBJ_SERVER_ID_START)
+			goto out;
+
+		mutex_lock(&g_smcinvoke_lock);
+		ret = get_pending_cbobj_locked(server_id,
+					UHANDLE_GET_CB_OBJ(uhandle));
+		mutex_unlock(&g_smcinvoke_lock);
+		if (ret)
+			goto out;
+		*tzhandle = TZHANDLE_MAKE_LOCAL(server_id,
+				UHANDLE_GET_CB_OBJ(uhandle));
+		ret = 0;
+	} else if (UHANDLE_IS_FD(uhandle)) {
+		struct dma_buf *dma_buf = NULL;
+		struct smcinvoke_file_data *tzobj = NULL;
+
+		if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) {
+			server_id = get_server_id(server_fd);
+			ret = create_mem_obj(dma_buf, tzhandle, &mem_obj, server_id, uhandle);
+			if (!ret && mem_obj_async_support && l_pending_mem_obj) {
+				mutex_lock(&g_smcinvoke_lock);
+				/* Map the newly created memory object and add it
+				 * to l_pending_mem_obj list.
+				 * Before returning to TZ, add the mapping data
+				 * to the async side channel so it's available to TZ
+				 * together with the memory object.
+				 */
+				if (!smcinvoke_map_mem_region_locked(mem_obj)) {
+					queue_mem_obj_pending_async_locked(mem_obj, l_pending_mem_obj);
+				} else {
+					pr_err("Failed to map memory region\n");
+				}
+				mutex_unlock(&g_smcinvoke_lock);
+			}
+
+		} else if (is_remote_obj(UHANDLE_GET_FD(uhandle),
+				&tzobj, filp)) {
+			*tzhandle = tzobj->tzhandle;
+			ret = 0;
+		}
+	}
+out:
+	return ret;
+}
+
+static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd)
+{
+	int unused_fd = -1, ret = -EINVAL;
+	struct file *f = NULL;
+	struct smcinvoke_file_data *cxt = NULL;
+
+	cxt = kzalloc(sizeof(*cxt), GFP_KERNEL);
+	if (!cxt) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ ||
+		obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
+		cxt->context_type = obj_type;
+		cxt->tzhandle = obj;
+	} else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) {
+		cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER;
+		cxt->server_id = obj;
+	} else {
+		goto out;
+	}
+
+	unused_fd = get_unused_fd_flags(O_RDWR);
+	if (unused_fd < 0)
+		goto out;
+
+	if (fd == NULL)
+		goto out;
+
+	f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR);
+	if (IS_ERR(f))
+		goto out;
+
+	*fd = unused_fd;
+	fd_install(*fd, f);
+	return 0;
+out:
+	if (unused_fd >= 0)
+		put_unused_fd(unused_fd);
+	kfree(cxt);
+
+	return ret;
+}
+
+static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id,
+		int32_t *uhandle, bool lock, uint32_t context_type)
+{
+	int ret = -1;
+
+	if (TZHANDLE_IS_NULL(tzhandle)) {
+		*uhandle = UHANDLE_NULL;
+		ret = 0;
+	} else if (TZHANDLE_IS_CB_OBJ(tzhandle)) {
+		if (srvr_id != TZHANDLE_GET_SERVER(tzhandle))
+			goto out;
+		*uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle));
+		MUTEX_LOCK(lock)
+		ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle),
+				TZHANDLE_GET_OBJID(tzhandle));
+		MUTEX_UNLOCK(lock)
+	} else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) {
+		struct smcinvoke_mem_obj *mem_obj = NULL;
+
+		MUTEX_LOCK(lock)
+		mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle),
+				SMCINVOKE_MEM_RGN_OBJ);
+
+		if (mem_obj != NULL) {
+			int fd;
+
+			fd = mem_obj->mem_obj_user_fd;
+			if (fd < 0)
+				goto exit_lock;
+			*uhandle = fd;
+			ret = 0;
+		}
+exit_lock:
+		MUTEX_UNLOCK(lock)
+	} else if (TZHANDLE_IS_REMOTE(tzhandle)) {
+		/* if execution comes here => tzhandle is an unsigned int */
+		ret = get_fd_for_obj(context_type,
+				(uint32_t)tzhandle, uhandle);
+	}
+out:
+	return ret;
+}
+
+static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len)
+{
+	struct smcinvoke_tzcb_req *msg = buf;
+
+	if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) {
+		pr_err("Invalid object count in %s\n", __func__);
+		return OBJECT_ERROR_INVALID;
+	}
+
+	trace_release_mem_obj_locked(msg->hdr.tzhandle, buf_len);
+
+	return release_tzhandle_locked(msg->hdr.tzhandle);
+}
+
+static int32_t smcinvoke_process_map_mem_region_req(void *buf, size_t buf_len)
+{
+	int ret = OBJECT_OK;
+	struct smcinvoke_tzcb_req *msg = buf;
+	struct {
+		uint64_t p_addr;
+		uint64_t len;
+		uint32_t perms;
+	} *ob = NULL;
+	int32_t *oo = NULL;
+	struct smcinvoke_mem_obj *mem_obj = NULL;
+
+	if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) ||
+			(buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
+		pr_err("Invalid counts received for mapping mem obj\n");
+		return OBJECT_ERROR_INVALID;
+	}
+	/* args[0] = BO, args[1] = OI, args[2] = OO */
+	ob = buf + msg->args[0].b.offset;
+	oo = &msg->args[2].handle;
+
+	mutex_lock(&g_smcinvoke_lock);
+	mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle),
+			SMCINVOKE_MEM_RGN_OBJ);
+	if (!mem_obj) {
+		mutex_unlock(&g_smcinvoke_lock);
+		pr_err("Memory object not found\n");
+		return OBJECT_ERROR_BADOBJ;
+	}
+
+	if (!mem_obj->p_addr) {
+		ret = smcinvoke_map_mem_region_locked(mem_obj);
+	} else {
+		kref_get(&mem_obj->mem_map_obj_ref_cnt);
+	}
+
+	if (!ret) {
+		ob->p_addr = mem_obj->p_addr;
+		ob->len = mem_obj->p_addr_len;
+		ob->perms = SMCINVOKE_MEM_PERM_RW;
+		*oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id);
+	}
+
+	mutex_unlock(&g_smcinvoke_lock);
+
+	return ret;
+}
+
+static int32_t smcinvoke_sleep(void *buf, size_t buf_len)
+{
+    struct smcinvoke_tzcb_req *msg = buf;
+    uint32_t sleepTimeMs_val = 0;
+
+    if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) ||
+       (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
+         pr_err("Invalid counts received for sleeping in hlos\n");
+         return OBJECT_ERROR_INVALID;
+    }
+
+    /* Time in miliseconds is expected from tz */
+    sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset));
+    msleep(sleepTimeMs_val);
+    return OBJECT_OK;
+}
+
+static void process_kernel_obj(void *buf, size_t buf_len)
+{
+	struct smcinvoke_tzcb_req *cb_req = buf;
+
+	switch (cb_req->hdr.op) {
+	case OBJECT_OP_MAP_REGION:
+		if (mem_obj_async_support) {
+			/* Mapping requests are not supposed to come
+			 * from TZ once memory object async support
+			 * is enabled.
+			 * If they are still coming, we would like to
+			 * know about it.
+			 */
+			pr_info("Received a request to map memory region\n");
+		}
+		cb_req->result = smcinvoke_process_map_mem_region_req(buf, buf_len);
+		break;
+	case OBJECT_OP_YIELD:
+		cb_req->result = OBJECT_OK;
+		break;
+	case OBJECT_OP_SLEEP:
+		cb_req->result = smcinvoke_sleep(buf, buf_len);
+		break;
+	default:
+		pr_err(" invalid operation for tz kernel object\n");
+		cb_req->result = OBJECT_ERROR_INVALID;
+		break;
+	}
+}
+
+static void process_mem_obj(void *buf, size_t buf_len)
+{
+	struct smcinvoke_tzcb_req *cb_req = buf;
+
+	mutex_lock(&g_smcinvoke_lock);
+	cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ?
+			smcinvoke_release_mem_obj_locked(buf, buf_len) :
+			OBJECT_ERROR_INVALID;
+	mutex_unlock(&g_smcinvoke_lock);
+}
+
+static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len,
+		uint8_t *out_buf, phys_addr_t out_paddr,
+		size_t out_buf_len, int32_t *result, u64 *response_type,
+		unsigned int *data, struct qtee_shm *in_shm,
+		struct qtee_shm *out_shm)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case SMCINVOKE_INVOKE_CMD_LEGACY:
+		qtee_shmbridge_flush_shm_buf(in_shm);
+		qtee_shmbridge_flush_shm_buf(out_shm);
+		ret = qcom_scm_invoke_smc_legacy(in_paddr, in_buf_len, out_paddr, out_buf_len,
+				result, response_type, data);
+		qtee_shmbridge_inv_shm_buf(in_shm);
+		qtee_shmbridge_inv_shm_buf(out_shm);
+		break;
+
+	case SMCINVOKE_INVOKE_CMD:
+		ret = qcom_scm_invoke_smc(in_paddr, in_buf_len, out_paddr, out_buf_len,
+				result, response_type, data);
+		break;
+
+	case SMCINVOKE_CB_RSP_CMD:
+		if (legacy_smc_call)
+			qtee_shmbridge_flush_shm_buf(out_shm);
+		ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len,
+				result, response_type, data);
+		if (legacy_smc_call) {
+			qtee_shmbridge_inv_shm_buf(in_shm);
+			qtee_shmbridge_inv_shm_buf(out_shm);
+		}
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	trace_invoke_cmd_handler(cmd, *response_type, *result, ret);
+	return ret;
+}
+/*
+ * Buf should be aligned to struct smcinvoke_tzcb_req
+ */
+static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
+{
+	/* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
+	int ret = OBJECT_ERROR_DEFUNCT;
+	int cbobj_retries = 0;
+	long timeout_jiff;
+	bool wait_interrupted = false;
+	struct smcinvoke_cb_txn *cb_txn = NULL;
+	struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
+	struct smcinvoke_server_info *srvr_info = NULL;
+	struct smcinvoke_mem_obj *mem_obj = NULL;
+	uint16_t server_id = 0;
+
+	if (buf_len < sizeof(struct smcinvoke_tzcb_req)) {
+		pr_err("smaller buffer length : %zu\n", buf_len);
+		return;
+	}
+
+	cb_req = buf;
+
+	/* check whether it is to be served by kernel or userspace */
+	if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
+		return process_kernel_obj(buf, buf_len);
+	} else if (TZHANDLE_IS_MEM_MAP_OBJ(cb_req->hdr.tzhandle)) {
+		/*
+		 * MEM_MAP memory object is created and owned by kernel,
+		 * hence its processing(handling deletion) is done in
+		 * kernel context.
+		 */
+		return process_mem_obj(buf, buf_len);
+	} else if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
+		/*
+		 * MEM_RGN memory objects are created and owned by userspace,
+		 * and hence their deletion/handling requires going back to the
+		 * userspace, similar to that of callback objects. If we enter
+		 * this 'if' condition, its no-op here, and proceed similar to
+		 * case of callback objects.
+		 */
+	} else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) {
+		pr_err("Request object is not a callback object %x\n",
+			cb_req->hdr.tzhandle);
+		cb_req->result = OBJECT_ERROR_INVALID;
+		return;
+	}
+
+	/*
+	 * We need a copy of req that could be sent to server. Otherwise, if
+	 * someone kills invoke caller, buf would go away and server would be
+	 * working on already freed buffer, causing a device crash.
+	 */
+	tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
+	if (!tmp_cb_req) {
+		/* we need to return error to caller so fill up result */
+		cb_req->result = OBJECT_ERROR_KMEM;
+		pr_err("failed to create copy of request, set result: %d\n",
+				cb_req->result);
+		return;
+	}
+
+	cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
+	if (!cb_txn) {
+		cb_req->result = OBJECT_ERROR_KMEM;
+		pr_err("failed to allocate memory for request, result: %d\n",
+				cb_req->result);
+		kfree(tmp_cb_req);
+		return;
+	}
+	/* no need for memcpy as we did kmemdup() above */
+	cb_req = tmp_cb_req;
+
+	trace_process_tzcb_req_handle(cb_req->hdr.tzhandle, cb_req->hdr.op, cb_req->hdr.counts);
+
+	cb_txn->state = SMCINVOKE_REQ_PLACED;
+	cb_txn->cb_req = cb_req;
+	cb_txn->cb_req_bytes = buf_len;
+	cb_txn->filp_to_release = arr_filp;
+	kref_init(&cb_txn->ref_cnt);
+
+	mutex_lock(&g_smcinvoke_lock);
+	++cb_reqs_inflight;
+
+	if(TZHANDLE_IS_MEM_RGN_OBJ(cb_req->hdr.tzhandle)) {
+		mem_obj= find_mem_obj_locked(TZHANDLE_GET_OBJID(cb_req->hdr.tzhandle),SMCINVOKE_MEM_RGN_OBJ);
+		if(!mem_obj) {
+			pr_err("mem obj with tzhandle : %d not found",cb_req->hdr.tzhandle);
+			mutex_unlock(&g_smcinvoke_lock);
+			goto out;
+		}
+		server_id = mem_obj->server->server_id;
+	} else {
+		server_id = TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle);
+	}
+
+	srvr_info = get_cb_server_locked(server_id);
+	if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+		/* ret equals Object_ERROR_DEFUNCT, at this point go to out */
+		if (!srvr_info)
+			pr_err("server is invalid\n");
+		else {
+			pr_err("server is defunct, state= %d tzhandle = %d\n",
+					srvr_info->state, cb_req->hdr.tzhandle);
+		}
+		mutex_unlock(&g_smcinvoke_lock);
+		goto out;
+	}
+
+	cb_txn->txn_id = ++srvr_info->txn_id;
+	hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
+	mutex_unlock(&g_smcinvoke_lock);
+
+	trace_process_tzcb_req_wait(cb_req->hdr.tzhandle, cbobj_retries, cb_txn->txn_id,
+			current->pid, current->tgid, srvr_info->state, srvr_info->server_id,
+			cb_reqs_inflight);
+	/*
+	 * we need not worry that server_info will be deleted because as long
+	 * as this CBObj is served by this server, srvr_info will be valid.
+	 */
+	wake_up_interruptible_all(&srvr_info->req_wait_q);
+	/* timeout before 1s otherwise tzbusy would come */
+	timeout_jiff = msecs_to_jiffies(100);
+
+	while (cbobj_retries < CBOBJ_MAX_RETRIES) {
+		if (wait_interrupted) {
+			ret = wait_event_timeout(srvr_info->rsp_wait_q,
+					(cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
+					(srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
+					timeout_jiff);
+		} else {
+			ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q,
+					(cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
+					(srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT),
+					timeout_jiff);
+		}
+		if (ret == 0) {
+			if (srvr_info->is_server_suspended == 0) {
+				tzcb_err_ratelimited("CBobj timed out waiting on cbtxn :%d,cb-tzhandle:%d, retry:%d, op:%d counts :%d\n",
+						cb_txn->txn_id,cb_req->hdr.tzhandle, cbobj_retries,
+						cb_req->hdr.op, cb_req->hdr.counts);
+				tzcb_err_ratelimited("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n",
+						cb_req->hdr.tzhandle, current->pid,
+						current->tgid, srvr_info->state,
+						srvr_info->server_id);
+			}
+		} else {
+			/* wait_event returned due to a signal */
+			if (srvr_info->state != SMCINVOKE_SERVER_STATE_DEFUNCT &&
+					cb_txn->state != SMCINVOKE_REQ_PROCESSED) {
+				wait_interrupted = true;
+			} else {
+				break;
+			}
+		}
+		/*
+		 * If bit corresponding to any accept thread is set, invoke threads
+		 * should wait infinitely for the accept thread to come back with
+		 * response.
+		 */
+		if (srvr_info->is_server_suspended > 0) {
+			cbobj_retries = 0;
+		} else {
+			cbobj_retries++;
+		}
+	}
+
+out:
+	/*
+	 * we could be here because of either:
+	 * a. Req is PROCESSED
+	 * b. Server was killed
+	 * c. Invoke thread is killed
+	 * sometime invoke thread and server are part of same process.
+	 */
+	mutex_lock(&g_smcinvoke_lock);
+	hash_del(&cb_txn->hash);
+	if (ret == 0) {
+		pr_err("CBObj timed out! No more retries\n");
+		cb_req->result = Object_ERROR_TIMEOUT;
+	} else if (ret == -ERESTARTSYS) {
+		pr_err("wait event interruped, ret: %d\n", ret);
+		cb_req->result = OBJECT_ERROR_ABORT;
+	} else {
+		if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
+			/*
+			 * it is possible that server was killed immediately
+			 * after CB Req was processed but who cares now!
+			 */
+		} else if (!srvr_info ||
+			srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+			cb_req->result = OBJECT_ERROR_DEFUNCT;
+			pr_err("server invalid, res: %d\n", cb_req->result);
+		} else {
+			pr_err("%s: unexpected event happened, ret:%d\n", __func__, ret);
+			cb_req->result = OBJECT_ERROR_ABORT;
+		}
+	}
+	--cb_reqs_inflight;
+
+	trace_process_tzcb_req_result(cb_req->result, cb_req->hdr.tzhandle, cb_req->hdr.op,
+			cb_req->hdr.counts, cb_reqs_inflight);
+
+	memcpy(buf, cb_req, buf_len);
+
+	kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
+	if (srvr_info)
+		kref_put(&srvr_info->ref_cnt, destroy_cb_server);
+	mutex_unlock(&g_smcinvoke_lock);
+}
+
+static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size,
+		struct smcinvoke_cmd_req *req,
+		union smcinvoke_arg *args_buf,
+		uint32_t context_type)
+{
+	int ret = -EINVAL, i = 0;
+	int32_t temp_fd = UHANDLE_NULL;
+	union smcinvoke_tz_args *tz_args = NULL;
+	size_t offset = sizeof(struct smcinvoke_msg_hdr) +
+			OBJECT_COUNTS_TOTAL(req->counts) *
+			sizeof(union smcinvoke_tz_args);
+
+	if (offset > buf_size)
+		goto out;
+
+	tz_args = (union smcinvoke_tz_args *)
+			(buf + sizeof(struct smcinvoke_msg_hdr));
+
+	tz_args += OBJECT_COUNTS_NUM_BI(req->counts);
+
+	if (args_buf == NULL)
+		return 0;
+
+	FOR_ARGS(i, req->counts, BO) {
+		args_buf[i].b.size = tz_args->b.size;
+		if ((buf_size - tz_args->b.offset < tz_args->b.size) ||
+				tz_args->b.offset > buf_size) {
+			pr_err("%s: buffer overflow detected\n", __func__);
+			goto out;
+		}
+		if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
+			if (copy_to_user((void __user *)
+					(uintptr_t)(args_buf[i].b.addr),
+					(uint8_t *)(buf) + tz_args->b.offset,
+					tz_args->b.size)) {
+				pr_err("Error %d copying ctxt to user\n", ret);
+				goto out;
+			}
+		} else {
+			memcpy((uint8_t *)(args_buf[i].b.addr),
+					(uint8_t *)(buf) + tz_args->b.offset,
+					tz_args->b.size);
+		}
+		tz_args++;
+	}
+	tz_args += OBJECT_COUNTS_NUM_OI(req->counts);
+
+	FOR_ARGS(i, req->counts, OO) {
+		/*
+		 * create a new FD and assign to output object's context.
+		 * We are passing cb_server_fd from output param in case OO
+		 * is a CBObj. For CBObj, we have to ensure that it is sent
+		 * to server who serves it and that info comes from USpace.
+		 */
+		temp_fd = UHANDLE_NULL;
+
+		ret = get_uhandle_from_tzhandle(tz_args->handle,
+				TZHANDLE_GET_SERVER(tz_args->handle),
+				&temp_fd, NO_LOCK, context_type);
+
+		args_buf[i].o.fd = temp_fd;
+
+		if (ret)
+			goto out;
+
+		trace_marshal_out_invoke_req(i, tz_args->handle,
+				TZHANDLE_GET_SERVER(tz_args->handle), temp_fd);
+
+		tz_args++;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+static bool is_inbound_req(int val)
+{
+	return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED ||
+		val == QSEOS_RESULT_INCOMPLETE ||
+		val == QSEOS_RESULT_BLOCKED_ON_LISTENER);
+}
+
+static void process_piggyback_cb_data(uint8_t *outbuf, size_t buf_len)
+{
+	struct smcinvoke_tzcb_req *msg = NULL;
+	uint32_t max_offset = 0;
+	uint32_t buffer_size_max_offset = 0;
+	void *piggyback_buf = NULL;
+	size_t piggyback_buf_size;
+	size_t piggyback_offset = 0;
+	int i = 0;
+
+	if (outbuf == NULL) {
+		pr_err("%s: outbuf is NULL\n", __func__);
+		return;
+	}
+
+	msg = (void *) outbuf;
+	if ((buf_len < msg->args[0].b.offset) ||
+		(buf_len - msg->args[0].b.offset < msg->args[0].b.size)) {
+		pr_err("%s: invalid scenario\n", __func__);
+		return;
+	}
+
+	FOR_ARGS(i, msg->hdr.counts, BI)
+	{
+		if (msg->args[i].b.offset > max_offset) {
+			max_offset = msg->args[i].b.offset;
+			buffer_size_max_offset = msg->args[i].b.size;
+		}
+	}
+
+	FOR_ARGS(i, msg->hdr.counts, BO)
+	{
+		if (msg->args[i].b.offset > max_offset) {
+			max_offset = msg->args[i].b.offset;
+			buffer_size_max_offset = msg->args[i].b.size;
+		}
+	}
+
+	//Take out the offset after BI and BO objects end
+	if (max_offset)
+		piggyback_offset = max_offset + buffer_size_max_offset;
+	else
+		piggyback_offset = TZCB_BUF_OFFSET(msg);
+
+	piggyback_offset = size_align(piggyback_offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+	// Jump to piggy back data offset
+	piggyback_buf = (uint8_t *)msg + piggyback_offset;
+	piggyback_buf_size = g_max_cb_buf_size - piggyback_offset;
+
+	process_piggyback_data(piggyback_buf, piggyback_buf_size);
+}
+
+static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr,
+		size_t in_buf_len,
+		uint8_t *out_buf, phys_addr_t out_paddr,
+		size_t out_buf_len,
+		struct smcinvoke_cmd_req *req,
+		union smcinvoke_arg *args_buf,
+		bool *tz_acked, uint32_t context_type,
+		struct qtee_shm *in_shm, struct qtee_shm *out_shm,
+		bool retry)
+{
+	int ret = 0, cmd, retry_count = 0;
+	u64 response_type;
+	unsigned int data;
+	struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL};
+
+	*tz_acked = false;
+	/* buf size should be page aligned */
+	if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0)
+		return -EINVAL;
+
+	cmd = invoke_cmd;
+
+	while (1) {
+		do {
+			ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf,
+					out_paddr, out_buf_len, &req->result,
+					&response_type, &data, in_shm, out_shm);
+
+			if (ret == -EBUSY) {
+				pr_err_ratelimited("Secure side is busy,will retry after 30 ms, retry_count = %d\n",retry_count);
+				msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS);
+			}
+
+		} while (retry && (ret == -EBUSY) &&
+				(retry_count++ < SMCINVOKE_SCM_EBUSY_MAX_RETRY));
+
+		if (!ret && !is_inbound_req(response_type)) {
+			/* dont marshal if Obj returns an error */
+			if (!req->result) {
+				if (args_buf != NULL)
+					ret = marshal_out_invoke_req(in_buf,
+							in_buf_len, req, args_buf,
+							context_type);
+			}
+			*tz_acked = true;
+		}
+
+		if (cmd == SMCINVOKE_CB_RSP_CMD)
+			release_filp(arr_filp, OBJECT_COUNTS_MAX_OO);
+
+		if (ret || !is_inbound_req(response_type))
+			break;
+
+		/* process listener request */
+		if (response_type == QSEOS_RESULT_INCOMPLETE ||
+				response_type == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
+			ret = qseecom_process_listener_from_smcinvoke(
+					&req->result, &response_type, &data);
+
+			trace_prepare_send_scm_msg(response_type, req->result);
+
+			if (!req->result &&
+			response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
+				ret = marshal_out_invoke_req(in_buf,
+						in_buf_len, req, args_buf,
+						context_type);
+			}
+			*tz_acked = true;
+		}
+
+		/*
+		 * qseecom does not understand smcinvoke's callback object &&
+		 * erringly sets ret value as -EINVAL :( We need to handle it.
+		 */
+		if (response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED)
+			break;
+
+		if (response_type == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) {
+			trace_status(__func__, "looks like inbnd req reqd");
+			process_piggyback_cb_data(out_buf, out_buf_len);
+			process_tzcb_req(out_buf, out_buf_len, arr_filp);
+			cmd = SMCINVOKE_CB_RSP_CMD;
+		}
+	}
+	return ret;
+}
+/*
+ * SMC expects arguments in following format
+ * ---------------------------------------------------------------------------
+ * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload |
+ * ---------------------------------------------------------------------------
+ * cxt: target, op: operation, counts: total arguments
+ * offset: offset is from beginning of buffer i.e. cxt
+ * size: size is 8 bytes aligned value
+ */
+static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req,
+		const union smcinvoke_arg *args_buf)
+{
+	uint32_t i = 0;
+
+	size_t total_size = sizeof(struct smcinvoke_msg_hdr) +
+			OBJECT_COUNTS_TOTAL(req->counts) *
+			sizeof(union smcinvoke_tz_args);
+
+	/* Computed total_size should be 8 bytes aligned from start of buf */
+	total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE);
+
+	/* each buffer has to be 8 bytes aligned */
+	while (i < OBJECT_COUNTS_NUM_buffers(req->counts))
+		total_size = size_add_(total_size,
+				size_align(args_buf[i++].b.size,
+				SMCINVOKE_ARGS_ALIGN_SIZE));
+
+	return PAGE_ALIGN(total_size);
+}
+
+static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req,
+		const union smcinvoke_arg *args_buf, uint32_t tzhandle,
+		uint8_t *buf, size_t buf_size, struct file **arr_filp,
+		int32_t *tzhandles_to_release, uint32_t context_type,
+		struct list_head *l_pending_mem_obj)
+{
+	int ret = -EINVAL, i = 0, j = 0, k = 0;
+	const struct smcinvoke_msg_hdr msg_hdr = {
+			tzhandle, req->op, req->counts};
+	uint32_t offset = sizeof(struct smcinvoke_msg_hdr) +
+			sizeof(union smcinvoke_tz_args) *
+			OBJECT_COUNTS_TOTAL(req->counts);
+	union smcinvoke_tz_args *tz_args = NULL;
+
+	if (buf_size < offset)
+		goto out;
+
+	*(struct smcinvoke_msg_hdr *)buf = msg_hdr;
+	tz_args = (union smcinvoke_tz_args *)(buf +
+			sizeof(struct smcinvoke_msg_hdr));
+
+	if (args_buf == NULL)
+		return 0;
+
+	FOR_ARGS(i, req->counts, BI) {
+		offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+		if ((offset > buf_size) ||
+			(args_buf[i].b.size > (buf_size - offset)))
+			goto out;
+
+		tz_args[i].b.offset = offset;
+		tz_args[i].b.size = args_buf[i].b.size;
+		if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
+			if (copy_from_user(buf + offset,
+					(void __user *)(uintptr_t)(args_buf[i].b.addr),
+					args_buf[i].b.size))
+				goto out;
+		} else {
+			memcpy(buf + offset, (void *)(args_buf[i].b.addr),
+					args_buf[i].b.size);
+		}
+		offset += args_buf[i].b.size;
+	}
+	FOR_ARGS(i, req->counts, BO) {
+		offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+		if ((offset > buf_size) ||
+				(args_buf[i].b.size > (buf_size - offset)))
+			goto out;
+
+		tz_args[i].b.offset = offset;
+		tz_args[i].b.size = args_buf[i].b.size;
+		offset += args_buf[i].b.size;
+	}
+	FOR_ARGS(i, req->counts, OI) {
+		ret = get_tzhandle_from_uhandle(args_buf[i].o.fd,
+				args_buf[i].o.cb_server_fd, &arr_filp[j++],
+				&(tz_args[i].handle), l_pending_mem_obj);
+		if (ret)
+			goto out;
+
+		trace_marshal_in_invoke_req(i, args_buf[i].o.fd,
+				args_buf[i].o.cb_server_fd, tz_args[i].handle);
+
+		tzhandles_to_release[k++] = tz_args[i].handle;
+	}
+	ret = 0;
+out:
+	return ret;
+}
+
+static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn,
+				struct smcinvoke_accept *user_req, int srvr_id)
+{
+	int ret = 0, i = 0;
+	int32_t temp_fd = UHANDLE_NULL;
+	union smcinvoke_arg tmp_arg;
+	struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
+	union smcinvoke_tz_args *tz_args = tzcb_req->args;
+	size_t tzcb_req_len = cb_txn->cb_req_bytes;
+	size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
+	size_t user_req_buf_offset = sizeof(union smcinvoke_arg) *
+			OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts);
+
+	if (tz_buf_offset > tzcb_req_len) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	user_req->txn_id = cb_txn->txn_id;
+	if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id,
+			(int32_t*)(&user_req->cbobj_id), TAKE_LOCK,
+			SMCINVOKE_OBJ_TYPE_TZ_OBJ)) {
+		ret = -EINVAL;
+		goto out;
+	}
+	user_req->op = tzcb_req->hdr.op;
+	user_req->counts = tzcb_req->hdr.counts;
+	user_req->argsize = sizeof(union smcinvoke_arg);
+
+	trace_marshal_in_tzcb_req_handle(tzcb_req->hdr.tzhandle, srvr_id,
+			user_req->cbobj_id, user_req->op, user_req->counts);
+
+	FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
+		user_req_buf_offset = size_align(user_req_buf_offset,
+				SMCINVOKE_ARGS_ALIGN_SIZE);
+		tmp_arg.b.size = tz_args[i].b.size;
+		if ((tz_args[i].b.offset > tzcb_req_len) ||
+				(tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) ||
+				(user_req_buf_offset > user_req->buf_len) ||
+				(tmp_arg.b.size >
+				user_req->buf_len - user_req_buf_offset)) {
+			ret = -EINVAL;
+			pr_err("%s: buffer overflow detected\n", __func__);
+			goto out;
+		}
+		tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
+
+		if (copy_to_user(u64_to_user_ptr
+				(user_req->buf_addr + i * sizeof(tmp_arg)),
+				&tmp_arg, sizeof(tmp_arg)) ||
+				copy_to_user(u64_to_user_ptr(tmp_arg.b.addr),
+				(uint8_t *)(tzcb_req) + tz_args[i].b.offset,
+				tz_args[i].b.size)) {
+			ret = -EFAULT;
+			goto out;
+		}
+		user_req_buf_offset += tmp_arg.b.size;
+	}
+	FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
+		user_req_buf_offset = size_align(user_req_buf_offset,
+				SMCINVOKE_ARGS_ALIGN_SIZE);
+
+		tmp_arg.b.size = tz_args[i].b.size;
+		if ((user_req_buf_offset > user_req->buf_len) ||
+				(tmp_arg.b.size >
+				user_req->buf_len - user_req_buf_offset)) {
+			ret = -EINVAL;
+			pr_err("%s: buffer overflow detected\n", __func__);
+			goto out;
+		}
+		tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset;
+
+		if (copy_to_user(u64_to_user_ptr
+				(user_req->buf_addr + i * sizeof(tmp_arg)),
+				&tmp_arg, sizeof(tmp_arg))) {
+			ret = -EFAULT;
+			goto out;
+		}
+		user_req_buf_offset += tmp_arg.b.size;
+	}
+	FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
+		/*
+		 * create a new FD and assign to output object's
+		 * context
+		 */
+		temp_fd = UHANDLE_NULL;
+
+		ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id,
+				&temp_fd, TAKE_LOCK, SMCINVOKE_OBJ_TYPE_TZ_OBJ);
+
+		tmp_arg.o.fd = temp_fd;
+
+		if (ret) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (copy_to_user(u64_to_user_ptr
+				(user_req->buf_addr + i * sizeof(tmp_arg)),
+				&tmp_arg, sizeof(tmp_arg))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		trace_marshal_in_tzcb_req_fd(i, tz_args[i].handle, srvr_id, temp_fd);
+	}
+out:
+	return ret;
+}
+
+static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req,
+		struct smcinvoke_cb_txn *cb_txn,
+		struct file **arr_filp)
+{
+	int ret = -EINVAL, i = 0;
+	int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
+	struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req;
+	union smcinvoke_tz_args *tz_args = tzcb_req->args;
+	size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req);
+	LIST_HEAD(l_mem_objs_pending_async);    /* Holds new memory objects, to be later sent to TZ */
+	uint32_t max_offset = 0;
+	uint32_t buffer_size_max_offset = 0;
+	void* async_buf_begin;
+	size_t async_buf_size;
+	uint32_t offset = 0;
+
+	/* We assume 'marshal_in_tzcb_req' increases the ref-count of the CBOBJs.
+	 * It should be the case for mem-obj as well. However, it does not do that.
+	 * It is easier to filter out the 'release_tzhandles' for mem-obj here rather
+	 * than increases its ref-count on 'marshal_in_tzcb_req'. Because, there is no
+	 * reliable error handling and cleanup in 'marshal_in_tzcb_req'. So if it fails
+	 * mem-obj may not get released.  **/
+
+	if (!TZHANDLE_IS_MEM_OBJ(cb_txn->cb_req->hdr.tzhandle))
+		release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
+
+	tzcb_req->result = user_req->result;
+        /* Return without marshaling user args if destination callback invocation was
+           unsuccessful. */
+        if (tzcb_req->result != 0) {
+                ret = 0;
+                goto out;
+        }
+
+	FOR_ARGS(i, tzcb_req->hdr.counts, BI) {
+
+		/* Find the max offset and the size of the buffer in that offset */
+		if (tz_args[i].b.offset > max_offset) {
+			max_offset = tz_args[i].b.offset;
+			buffer_size_max_offset = tz_args[i].b.size;
+		}
+	}
+
+	FOR_ARGS(i, tzcb_req->hdr.counts, BO) {
+		union smcinvoke_arg tmp_arg;
+
+		if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
+				user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
+				sizeof(union smcinvoke_arg))) {
+			ret = -EFAULT;
+			goto out;
+		}
+		if (tmp_arg.b.size > tz_args[i].b.size)
+			goto out;
+		if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset,
+				u64_to_user_ptr(tmp_arg.b.addr),
+				tmp_arg.b.size)) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		/* Find the max offset and the size of the buffer in that offset */
+		if (tz_args[i].b.offset > max_offset) {
+			max_offset = tz_args[i].b.offset;
+			buffer_size_max_offset = tz_args[i].b.size;
+		}
+	}
+
+	FOR_ARGS(i, tzcb_req->hdr.counts, OO) {
+		union smcinvoke_arg tmp_arg;
+
+		if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr(
+				user_req->buf_addr + i * sizeof(union smcinvoke_arg)),
+				sizeof(union smcinvoke_arg))) {
+			ret = -EFAULT;
+			goto out;
+		}
+		ret = get_tzhandle_from_uhandle(tmp_arg.o.fd,
+				tmp_arg.o.cb_server_fd, &arr_filp[i],
+				&(tz_args[i].handle), &l_mem_objs_pending_async);
+
+		if (ret)
+			goto out;
+		tzhandles_to_release[i] = tz_args[i].handle;
+
+		trace_marshal_out_tzcb_req(i, tmp_arg.o.fd,
+				tmp_arg.o.cb_server_fd, tz_args[i].handle);
+	}
+	ret = 0;
+out:
+	FOR_ARGS(i, tzcb_req->hdr.counts, OI) {
+		if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle))
+			release_tzhandles(&tz_args[i].handle, 1);
+	}
+
+	do {
+		if (mem_obj_async_support) {
+		/* We will be able to add the async information to the buffer beyond the data in the max offset, if exists.
+		 * If doesn't exist, we can add the async information after the header and the args. */
+		offset = (max_offset ? (max_offset + buffer_size_max_offset) : tz_buf_offset);
+		offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE);
+		async_buf_begin = (uint8_t *)tzcb_req + offset;
+
+		if (async_buf_begin - (void *)tzcb_req > g_max_cb_buf_size) {
+			pr_err("Unable to add memory object info to the async channel\n");
+			break;
+		} else {
+			async_buf_size = g_max_cb_buf_size - (async_buf_begin - (void *)tzcb_req);
+		}
+
+		mutex_lock(&g_smcinvoke_lock);
+		add_mem_obj_info_to_async_side_channel_locked(async_buf_begin, async_buf_size, &l_mem_objs_pending_async);
+		delete_pending_async_list_locked(&l_mem_objs_pending_async);
+		mutex_unlock(&g_smcinvoke_lock);
+		}
+	} while (0);
+
+	if (ret)
+		release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
+
+	return ret;
+}
+
+static void set_tz_version (uint32_t tz_version)
+{
+
+	tz_async_version = tz_version;
+
+	/* We enable async memory object support when TZ async
+	 * version is equal or larger than the driver version.
+	 * It is expected that if the protocol changes in later
+	 * TZ versions, TZ will support backward compatibility
+	 * so this condition should still be valid.
+	 */
+	if (tz_version >= SMCINVOKE_ASYNC_VERSION) {
+		mem_obj_async_support = true;
+		pr_debug("Enabled asynchronous memory object support\n");
+	}
+}
+
+static void process_piggyback_data(void *buf, size_t buf_size)
+{
+	int i;
+	struct smcinvoke_tzcb_req req = {0};
+	struct smcinvoke_piggyback_msg *msg = buf;
+	int32_t *objs = msg->objs;
+
+	if (msg->version == 0) {
+		/* QTEE reset the buffer if it is unused. */
+		return;
+	}
+
+	for (i = 0; i < msg->counts; i++) {
+		if (msg->op != OBJECT_OP_RELEASE) {
+			/* We only support release handler. */
+			break;
+		}
+
+		req.hdr.op = msg->op;
+		req.hdr.counts = 0; /* release op does not require any args */
+		req.hdr.tzhandle = objs[i];
+		if (tz_async_version == 0)
+			set_tz_version(msg->version);
+		process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL);
+		/* cbobjs_in_flight will be adjusted during CB processing */
+	}
+
+	/* Reset output buffer after processing.*/
+	memset(buf, 0, buf_size);
+}
+
+
+/* Add memory object mapped data to the async side channel, so it's available to TZ
+ * together with the memory object.
+ *
+ * No return value as TZ is always able to explicitly ask for this information
+ * in case this function fails.
+ */
+static void add_mem_obj_info_to_async_side_channel_locked(void *buf, size_t buf_size, struct list_head *l_pending_mem_obj)
+{
+	struct smcinvoke_mem_obj_msg *msg = buf;
+	struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
+	size_t header_size = 0;
+	size_t mo_size = 0;
+	size_t used = 0;
+	size_t index = 0;
+
+	if (list_empty(l_pending_mem_obj))
+		return;
+
+	header_size = sizeof(struct smcinvoke_mem_obj_msg);
+	mo_size = sizeof(struct smcinvoke_mem_obj_info);
+
+	/* Minimal size required is the header data + one mem obj info */
+	if (buf_size < header_size + mo_size) {
+		pr_err("Unable to add memory object info to async channel\n");
+		return;
+	}
+
+	msg->version = SMCINVOKE_ASYNC_VERSION;
+	msg->op = SMCINVOKE_ASYNC_OP_MEMORY_OBJECT;
+	msg->count = 0;
+
+	used = header_size;
+	index = 0;
+
+	list_for_each_entry(mem_obj_pending, l_pending_mem_obj, list) {
+		if (NULL == mem_obj_pending->mem_obj) {
+			pr_err("Memory object is no longer valid\n");
+			continue;
+		}
+
+		if (used + mo_size > buf_size) {
+			pr_err("Not all memory object info was added to the async channel\n");
+			break;
+		}
+
+		msg->mo[index].memObjRef = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, mem_obj_pending->mem_obj->mem_region_id);
+		msg->mo[index].mapObjRef = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj_pending->mem_obj->mem_map_obj_id);
+		msg->mo[index].addr = mem_obj_pending->mem_obj->p_addr;
+		msg->mo[index].size = mem_obj_pending->mem_obj->p_addr_len;
+		msg->mo[index].perm = SMCINVOKE_MEM_PERM_RW;
+
+		used += sizeof(msg->mo[index]);
+		index++;
+	}
+
+	msg->count = index;
+
+	pr_debug("Added %lu memory objects to the side channel, total size = %zu\n", index, used);
+
+	return;
+}
+
+/*
+ * Delete entire pending async list.
+ */
+static void delete_pending_async_list_locked(struct list_head *l_pending_mem_obj)
+{
+	struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
+	struct smcinvoke_mem_obj_pending_async *temp = NULL;
+
+	if (list_empty(l_pending_mem_obj))
+		return;
+
+	list_for_each_entry_safe(mem_obj_pending, temp, l_pending_mem_obj, list) {
+		mem_obj_pending->mem_obj = NULL;
+		list_del(&mem_obj_pending->list);
+		kfree(mem_obj_pending);
+	}
+}
+
+
+/*
+ * Unmap/release the mapped objects from  pending async list.
+ */
+static void release_map_obj_pending_async_list_locked(struct list_head *l_pending_mem_obj)
+{
+	struct smcinvoke_mem_obj_pending_async *mem_obj_pending = NULL;
+	struct smcinvoke_mem_obj_pending_async *temp = NULL;
+
+	if (list_empty(l_pending_mem_obj))
+		return;
+
+	list_for_each_entry_safe(mem_obj_pending, temp, l_pending_mem_obj, list) {
+		kref_put(&mem_obj_pending->mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked);
+	}
+}
+
+static long process_ack_local_obj(struct file *filp, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = -1;
+	int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL;
+	struct smcinvoke_file_data *filp_data = filp->private_data;
+
+	if (_IOC_SIZE(cmd) != sizeof(int32_t))
+		return -EINVAL;
+
+	ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg,
+			sizeof(int32_t));
+	if (ret)
+		return -EFAULT;
+
+	mutex_lock(&g_smcinvoke_lock);
+	if (UHANDLE_IS_CB_OBJ(local_obj))
+		ret = put_pending_cbobj_locked(filp_data->server_id,
+				UHANDLE_GET_CB_OBJ(local_obj));
+	mutex_unlock(&g_smcinvoke_lock);
+
+	return ret;
+}
+
+static long process_server_req(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	int ret = -1;
+	int32_t server_fd = -1;
+	struct smcinvoke_server server_req = {0};
+	struct smcinvoke_server_info *server_info = NULL;
+
+	if (_IOC_SIZE(cmd) != sizeof(server_req)) {
+		pr_err("invalid command size received for server request\n");
+		return -EINVAL;
+	}
+	ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg,
+					sizeof(server_req));
+	if (ret) {
+		pr_err("copying server request from user failed\n");
+		return -EFAULT;
+	}
+	server_info = kzalloc(sizeof(*server_info), GFP_KERNEL);
+	if (!server_info)
+		return -ENOMEM;
+
+	kref_init(&server_info->ref_cnt);
+	init_waitqueue_head(&server_info->req_wait_q);
+	init_waitqueue_head(&server_info->rsp_wait_q);
+	server_info->cb_buf_size = server_req.cb_buf_size;
+	hash_init(server_info->reqs_table);
+	hash_init(server_info->responses_table);
+	INIT_LIST_HEAD(&server_info->pending_cbobjs);
+	server_info->is_server_suspended = 0;
+
+	mutex_lock(&g_smcinvoke_lock);
+
+	server_info->server_id = next_cb_server_id_locked();
+	hash_add(g_cb_servers, &server_info->hash,
+			server_info->server_id);
+	if (g_max_cb_buf_size < server_req.cb_buf_size)
+		g_max_cb_buf_size = server_req.cb_buf_size;
+
+	mutex_unlock(&g_smcinvoke_lock);
+	ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER,
+			server_info->server_id, &server_fd);
+
+	if (ret)
+		release_cb_server(server_info->server_id);
+
+	return server_fd;
+}
+
+static long process_accept_req(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	int ret = -1;
+	struct smcinvoke_file_data *server_obj = filp->private_data;
+	struct smcinvoke_accept user_args = {0};
+	struct smcinvoke_cb_txn *cb_txn = NULL;
+	struct smcinvoke_server_info *server_info = NULL;
+
+	if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) {
+		pr_err("command size invalid for accept request\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&user_args, (void __user *)arg,
+			sizeof(struct smcinvoke_accept))) {
+		pr_err("copying accept request from user failed\n");
+		return -EFAULT;
+	}
+
+	if (user_args.argsize != sizeof(union smcinvoke_arg)) {
+		pr_err("arguments size is invalid for accept thread\n");
+		return -EINVAL;
+	}
+
+	/* ACCEPT is available only on server obj */
+	if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) {
+		pr_err("invalid object type received for accept req\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&g_smcinvoke_lock);
+	server_info = get_cb_server_locked(server_obj->server_id);
+
+	if (!server_info) {
+		pr_err("No matching server with server id : %u found\n",
+				server_obj->server_id);
+		mutex_unlock(&g_smcinvoke_lock);
+		return -EINVAL;
+	}
+
+	if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT)
+		server_info->state = 0;
+
+	server_info->is_server_suspended = UNSET_BIT(server_info->is_server_suspended,
+				(current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
+
+	mutex_unlock(&g_smcinvoke_lock);
+
+	/* First check if it has response otherwise wait for req */
+	if (user_args.has_resp) {
+		trace_process_accept_req_has_response(current->pid, current->tgid);
+
+		mutex_lock(&g_smcinvoke_lock);
+		cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
+				SMCINVOKE_REQ_PROCESSING);
+		mutex_unlock(&g_smcinvoke_lock);
+		/*
+		 * cb_txn can be null if userspace provides wrong txn id OR
+		 * invoke thread died while server was processing cb req.
+		 * if invoke thread dies, it would remove req from Q. So
+		 * no matching cb_txn would be on Q and hence NULL cb_txn.
+		 * In this case, we want this thread to start waiting
+		 * new cb requests.
+		 */
+		if (!cb_txn) {
+			pr_err_ratelimited("%s txn %llu either invalid or removed from Q\n",
+					__func__, user_args.txn_id);
+			goto start_waiting_for_requests;
+		}
+		ret = marshal_out_tzcb_req(&user_args, cb_txn,
+				cb_txn->filp_to_release);
+		/*
+		 * if client did not set error and we get error locally,
+		 * we return local error to TA
+		 */
+		if (ret && cb_txn->cb_req->result == 0)
+			cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
+
+		cb_txn->state = SMCINVOKE_REQ_PROCESSED;
+		mutex_lock(&g_smcinvoke_lock);
+
+		kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
+		mutex_unlock(&g_smcinvoke_lock);
+		wake_up(&server_info->rsp_wait_q);
+		/*
+		 * if marshal_out fails, we should let userspace release
+		 * any ref/obj it created for CB processing
+		 */
+		if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts))
+			goto out;
+	}
+start_waiting_for_requests:
+	/*
+	 * Once response has been delivered, thread will wait for another
+	 * callback req to process.
+	 */
+	do {
+		ret = wait_event_interruptible(server_info->req_wait_q,
+				!hash_empty(server_info->reqs_table));
+		if (ret) {
+			trace_process_accept_req_ret(current->pid, current->tgid, ret);
+			/*
+			 * Ideally, we should destroy server if accept threads
+			 * are returning due to client being killed or device
+			 * going down (Shutdown/Reboot) but that would make
+			 * server_info invalid. Other accept/invoke threads are
+			 * using server_info and would crash. So dont do that.
+			 */
+			mutex_lock(&g_smcinvoke_lock);
+
+			if(freezing(current)) {
+				pr_err_ratelimited("Server id :%d interrupted probaby due to suspend, pid:%d\n",
+					server_info->server_id, current->pid);
+				/*
+				 * Each accept thread is identified by bits ranging from
+				 * 0 to DEFAULT_CBOBJ_THREAD_CNT-1. When an accept thread is
+				 * interrupted by a signal other than SIGUSR1,SIGKILL,SIGTERM,
+				 * set the corresponding bit of accept thread, indicating that
+				 * current accept thread's state to be "suspended"/ or something
+				 * that needs infinite timeout for invoke thread.
+				 */
+				server_info->is_server_suspended =
+						SET_BIT(server_info->is_server_suspended,
+							(current->pid)%DEFAULT_CB_OBJ_THREAD_CNT);
+			} else {
+				pr_err_ratelimited("Setting pid:%d, server id : %d state to defunct\n",
+						current->pid, server_info->server_id);
+						server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT;
+			}
+			mutex_unlock(&g_smcinvoke_lock);
+			wake_up_interruptible(&server_info->rsp_wait_q);
+			goto out;
+		}
+		mutex_lock(&g_smcinvoke_lock);
+		cb_txn = find_cbtxn_locked(server_info,
+				SMCINVOKE_NEXT_AVAILABLE_TXN,
+				SMCINVOKE_REQ_PLACED);
+		mutex_unlock(&g_smcinvoke_lock);
+		if (cb_txn) {
+			cb_txn->state = SMCINVOKE_REQ_PROCESSING;
+			ret = marshal_in_tzcb_req(cb_txn, &user_args,
+					server_obj->server_id);
+			if (ret) {
+				pr_err("failed to marshal in the callback request\n");
+				cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
+				cb_txn->state = SMCINVOKE_REQ_PROCESSED;
+				mutex_lock(&g_smcinvoke_lock);
+				kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
+				mutex_unlock(&g_smcinvoke_lock);
+				wake_up_interruptible(&server_info->rsp_wait_q);
+				continue;
+			}
+			mutex_lock(&g_smcinvoke_lock);
+			hash_add(server_info->responses_table, &cb_txn->hash,
+					cb_txn->txn_id);
+			kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked);
+			mutex_unlock(&g_smcinvoke_lock);
+
+			trace_process_accept_req_placed(current->pid, current->tgid);
+
+			ret = copy_to_user((void __user *)arg, &user_args,
+					sizeof(struct smcinvoke_accept));
+		}
+	} while (!cb_txn);
+out:
+	if (server_info) {
+		mutex_lock(&g_smcinvoke_lock);
+		kref_put(&server_info->ref_cnt, destroy_cb_server);
+		mutex_unlock(&g_smcinvoke_lock);
+	}
+
+	if (ret && ret != -ERESTARTSYS)
+		pr_err("accept thread returning with ret: %d\n", ret);
+
+	return ret;
+}
+
+static long process_invoke_req(struct file *filp, unsigned int cmd,
+		unsigned long arg)
+{
+	int    ret = -1, nr_args = 0;
+	struct smcinvoke_cmd_req req = {0};
+	void   *in_msg = NULL, *out_msg = NULL;
+	size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE;
+	union  smcinvoke_arg *args_buf = NULL;
+	struct smcinvoke_file_data *tzobj = filp->private_data;
+	struct qtee_shm in_shm = {0}, out_shm = {0};
+	LIST_HEAD(l_mem_objs_pending_async);    /* Holds new memory objects, to be later sent to TZ */
+
+	/*
+	 * Hold reference to remote object until invoke op is not
+	 * completed. Release once invoke is done.
+	 */
+	struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL};
+	/*
+	 * If anything goes wrong, release alloted tzhandles for
+	 * local objs which could be either CBObj or MemObj.
+	 */
+	int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
+	bool tz_acked = false;
+	uint32_t context_type = tzobj->context_type;
+
+	if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
+			_IOC_SIZE(cmd) != sizeof(req)) {
+		pr_err("command size for invoke req is invalid\n");
+		return -EINVAL;
+	}
+
+	if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
+			context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
+		pr_err("invalid context_type %d\n", context_type);
+		return -EPERM;
+	}
+	if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) {
+		ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
+		if (ret) {
+			pr_err("copying invoke req failed\n");
+			return -EFAULT;
+		}
+	} else {
+		req = *(struct smcinvoke_cmd_req *)arg;
+	}
+	if (req.argsize != sizeof(union smcinvoke_arg)) {
+		pr_err("arguments size for invoke req is invalid\n");
+		return -EINVAL;
+	}
+
+	if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ &&
+			tzobj->tzhandle == SMCINVOKE_TZ_ROOT_OBJ &&
+			(req.op == IClientEnv_OP_notifyDomainChange ||
+			req.op == IClientEnv_OP_registerWithCredentials ||
+			req.op == IClientEnv_OP_adciAccept ||
+			req.op == IClientEnv_OP_adciShutdown)) {
+		pr_err("invalid rootenv op\n");
+		return -EINVAL;
+	}
+
+	nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
+			OBJECT_COUNTS_NUM_objects(req.counts);
+
+	if (nr_args) {
+		args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
+		if (!args_buf)
+			return -ENOMEM;
+		if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
+			ret = copy_from_user(args_buf,
+					u64_to_user_ptr(req.args),
+					nr_args * req.argsize);
+			if (ret) {
+				ret = -EFAULT;
+				goto out;
+			}
+		} else {
+			memcpy(args_buf, (void *)(req.args),
+					nr_args * req.argsize);
+		}
+	}
+
+	inmsg_size = compute_in_msg_size(&req, args_buf);
+	ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm);
+	if (ret) {
+		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for in msg in invoke req\n");
+		goto out;
+	}
+	in_msg = in_shm.vaddr;
+
+	mutex_lock(&g_smcinvoke_lock);
+	outmsg_size = PAGE_ALIGN(g_max_cb_buf_size);
+	mutex_unlock(&g_smcinvoke_lock);
+	ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm);
+	if (ret) {
+		ret = -ENOMEM;
+		pr_err("shmbridge alloc failed for out msg in invoke req\n");
+		goto out;
+	}
+	out_msg = out_shm.vaddr;
+
+	trace_process_invoke_req_tzhandle(tzobj->tzhandle, req.op, req.counts);
+
+	ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg,
+			inmsg_size, filp_to_release, tzhandles_to_release,
+			context_type, &l_mem_objs_pending_async);
+	if (ret) {
+		pr_err("failed to marshal in invoke req, ret :%d\n", ret);
+		goto out;
+	}
+
+	if (mem_obj_async_support) {
+		mutex_lock(&g_smcinvoke_lock);
+		add_mem_obj_info_to_async_side_channel_locked(out_msg, outmsg_size, &l_mem_objs_pending_async);
+		mutex_unlock(&g_smcinvoke_lock);
+	}
+
+	ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size,
+			out_msg, out_shm.paddr, outmsg_size,
+			&req, args_buf, &tz_acked, context_type,
+			&in_shm, &out_shm, true);
+
+	/*
+	 * If scm_call is success, TZ owns responsibility to release
+	 * refs for local objs.
+	 */
+	if (!tz_acked) {
+		trace_status(__func__, "scm call successful");
+		goto out;
+	}
+	memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release));
+
+	/*
+	 * if invoke op results in an err, no need to marshal_out and
+	 * copy args buf to user space
+	 */
+	if (!req.result) {
+		/*
+		 * Dont check ret of marshal_out because there might be a
+		 * FD for OO which userspace must release even if an error
+		 * occurs. Releasing FD from user space is much simpler than
+		 * doing here. ORing of ret is reqd not to miss past error
+		 */
+		if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ)
+			ret |= copy_to_user(u64_to_user_ptr(req.args),
+					args_buf, nr_args * req.argsize);
+		else
+			memcpy((void *)(req.args), args_buf,
+					nr_args * req.argsize);
+
+	}
+	/* copy result of invoke op */
+	if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
+		ret |= copy_to_user((void __user *)arg, &req, sizeof(req));
+		if (ret)
+			goto out;
+	} else {
+		memcpy((void *)arg, (void *)&req, sizeof(req));
+	}
+
+	/* Outbuf could be carrying local objs to be released. */
+	process_piggyback_data(out_msg, outmsg_size);
+out:
+	trace_process_invoke_req_result(ret, req.result, tzobj->tzhandle,
+			req.op, req.counts);
+
+	release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO);
+	if (ret) {
+		release_map_obj_pending_async_list_locked(&l_mem_objs_pending_async);
+		release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO);
+	}
+	delete_pending_async_list_locked(&l_mem_objs_pending_async);
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
+	kfree(args_buf);
+
+	if (ret)
+		pr_err("invoke thread returning with ret = %d\n", ret);
+
+	return ret;
+}
+
+static long process_log_info(struct file *filp, unsigned int cmd,
+					unsigned long arg)
+{
+	int ret = 0;
+	char buf[SMCINVOKE_LOG_BUF_SIZE];
+	struct smcinvoke_file_data *tzobj = filp->private_data;
+
+	ret = copy_from_user(buf, (void __user *)arg, SMCINVOKE_LOG_BUF_SIZE);
+	if (ret) {
+		pr_err("logging HLOS info copy failed\n");
+		return -EFAULT;
+	}
+	buf[SMCINVOKE_LOG_BUF_SIZE - 1] = '\0';
+
+	trace_process_log_info(buf, tzobj->context_type, tzobj->tzhandle);
+
+	return ret;
+}
+
+static long smcinvoke_ioctl(struct file *filp, unsigned int cmd,
+						unsigned long arg)
+{
+	long ret = 0;
+
+	switch (cmd) {
+	case SMCINVOKE_IOCTL_INVOKE_REQ:
+		ret = process_invoke_req(filp, cmd, arg);
+		break;
+	case SMCINVOKE_IOCTL_ACCEPT_REQ:
+		ret = process_accept_req(filp, cmd, arg);
+		break;
+	case SMCINVOKE_IOCTL_SERVER_REQ:
+		ret = process_server_req(filp, cmd, arg);
+		break;
+	case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ:
+		ret = process_ack_local_obj(filp, cmd, arg);
+		break;
+	case SMCINVOKE_IOCTL_LOG:
+		ret = process_log_info(filp, cmd, arg);
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	trace_smcinvoke_ioctl(cmd, ret);
+	return ret;
+}
+
+int get_root_fd(int *root_fd)
+{
+	if (!root_fd)
+		return -EINVAL;
+	else
+		return get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL,
+				SMCINVOKE_TZ_ROOT_OBJ, root_fd);
+}
+
+int process_invoke_request_from_kernel_client(int fd,
+			struct smcinvoke_cmd_req *req)
+{
+	struct file *filp = NULL;
+	int ret = 0;
+
+	if (!req) {
+		pr_err("NULL req\n");
+		return -EINVAL;
+	}
+
+	filp = fget(fd);
+	if (!filp) {
+		pr_err("Invalid fd %d\n", fd);
+		return -EINVAL;
+	}
+	ret = process_invoke_req(filp, 0, (uintptr_t)req);
+	fput(filp);
+	trace_process_invoke_request_from_kernel_client(fd, filp, file_count(filp));
+	return ret;
+}
+
+static int smcinvoke_open(struct inode *nodp, struct file *filp)
+{
+	struct smcinvoke_file_data *tzcxt = NULL;
+
+	tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL);
+	if (!tzcxt)
+		return -ENOMEM;
+
+	tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ;
+	tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ;
+	filp->private_data = tzcxt;
+
+	return 0;
+}
+
+static int release_cb_server(uint16_t server_id)
+{
+	struct smcinvoke_server_info *server = NULL;
+
+	mutex_lock(&g_smcinvoke_lock);
+	server = find_cb_server_locked(server_id);
+	if (server)
+		kref_put(&server->ref_cnt, destroy_cb_server);
+	mutex_unlock(&g_smcinvoke_lock);
+	return 0;
+}
+
+int smcinvoke_release_filp(struct file *filp)
+{
+	int ret = 0;
+	struct smcinvoke_file_data *file_data = filp->private_data;
+	uint32_t tzhandle = 0;
+	struct smcinvoke_object_release_pending_list *entry = NULL;
+	struct qtee_shm in_shm = {0}, out_shm = {0};
+
+	trace_smcinvoke_release_filp(current->files, filp,
+			file_count(filp), file_data->context_type);
+
+	if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) {
+		ret = release_cb_server(file_data->server_id);
+		goto out;
+	}
+
+	tzhandle = file_data->tzhandle;
+	/* Root object is special in sense it is indestructible */
+	if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) {
+		if (!tzhandle)
+			pr_err("tzhandle not valid in object release\n");
+		goto out;
+	}
+
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm);
+	if (ret) {
+		pr_err("shmbridge alloc failed for in msg in object release"
+				"with ret %d\n", ret);
+		goto out;
+	}
+
+	ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm);
+	if (ret) {
+		pr_err("shmbridge alloc failed for out msg in object release"
+				"with ret:%d\n", ret);
+		goto out;
+	}
+
+	ret = smcinvoke_release_tz_object(&in_shm, &out_shm,
+		tzhandle, file_data->context_type);
+
+	if (-EBUSY == ret) {
+		pr_debug("failed to release handle in sync adding to list\n");
+		entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		ret = 0;
+		entry->data.tzhandle = tzhandle;
+		entry->data.context_type = file_data->context_type;
+		mutex_lock(&object_postprocess_lock);
+		list_add_tail(&entry->list, &g_object_postprocess);
+		mutex_unlock(&object_postprocess_lock);
+		pr_debug("Object release list: added a handle:%u\n", tzhandle);
+		__wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]);
+	}
+
+out:
+	qtee_shmbridge_free_shm(&in_shm);
+	qtee_shmbridge_free_shm(&out_shm);
+	kfree(filp->private_data);
+	filp->private_data = NULL;
+
+	if (ret != 0)
+		pr_err ("Object release failed with ret %d\n", ret);
+	return ret;
+}
+
+int smcinvoke_release_from_kernel_client(int fd)
+{
+	struct file *filp = NULL;
+
+	/* use fget() to get filp, but this will increase file ref_cnt to 1,
+	 * then decrease file ref_cnt to 0 with fput().
+	 */
+	filp = fget(fd);
+	if (!filp) {
+		pr_err("invalid fd %d to release\n", fd);
+		return -EINVAL;
+	}
+	trace_smcinvoke_release_from_kernel_client(current->files, filp,
+			file_count(filp));
+	/* free filp, notify TZ to release object */
+	smcinvoke_release_filp(filp);
+	fput(filp);
+	return 0;
+}
+
+static int smcinvoke_release(struct inode *nodp, struct file *filp)
+{
+	trace_smcinvoke_release(current->files, filp, file_count(filp),
+			filp->private_data);
+
+	if (filp->private_data)
+		return smcinvoke_release_filp(filp);
+	else
+		return 0;
+}
+
+static int smcinvoke_probe(struct platform_device *pdev)
+{
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	int rc = 0;
+
+	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (rc) {
+		pr_err("dma_set_mask_and_coherent failed %d\n", rc);
+		return rc;
+	}
+	legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-legacy_smc");
+	invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD;
+
+	rc = smcinvoke_create_kthreads();
+	if (rc) {
+		pr_err("smcinvoke_create_kthreads failed %d\n", rc);
+		return rc;
+	}
+
+	rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count,
+							SMCINVOKE_DEV);
+	if (rc < 0) {
+		pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV);
+		goto exit_destroy_wkthread;
+	}
+#if  (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	driver_class = class_create(SMCINVOKE_DEV);
+#else
+	driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV);
+#endif
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+	class_dev = device_create(driver_class, NULL, smcinvoke_device_no,
+						NULL, SMCINVOKE_DEV);
+	if (!class_dev) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops);
+	smcinvoke_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0),
+								count);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV);
+		goto exit_destroy_device;
+	}
+	smcinvoke_pdev = pdev;
+
+#if IS_ENABLED(CONFIG_QSEECOM_COMPAT) && IS_ENABLED(CONFIG_QSEECOM_PROXY)
+	/*If the api fails to get the func ops, print the error and continue
+	* Do not treat it as fatal*/
+	rc = get_qseecom_kernel_fun_ops();
+	if (rc) {
+		pr_err("failed to get qseecom kernel func ops %d", rc);
+	}
+#endif
+	__wakeup_postprocess_kthread(&smcinvoke[ADCI_WORKER_THREAD]);
+	return 0;
+
+exit_destroy_device:
+	device_destroy(driver_class, smcinvoke_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(smcinvoke_device_no, count);
+exit_destroy_wkthread:
+	smcinvoke_destroy_kthreads();
+	return rc;
+}
+
+static int smcinvoke_remove(struct platform_device *pdev)
+{
+	int count = 1;
+
+	smcinvoke_destroy_kthreads();
+	cdev_del(&smcinvoke_cdev);
+	device_destroy(driver_class, smcinvoke_device_no);
+	class_destroy(driver_class);
+	unregister_chrdev_region(smcinvoke_device_no, count);
+	return 0;
+}
+
+static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev,
+					pm_message_t state)
+{
+	int ret = 0;
+
+	mutex_lock(&g_smcinvoke_lock);
+	if (cb_reqs_inflight) {
+		pr_err("Failed to suspend smcinvoke driver\n");
+		ret = -EIO;
+	}
+	mutex_unlock(&g_smcinvoke_lock);
+	return ret;
+}
+
+static int __maybe_unused smcinvoke_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id smcinvoke_match[] = {
+	{
+		.compatible = "qcom,smcinvoke",
+	},
+	{},
+};
+
+static struct platform_driver smcinvoke_plat_driver = {
+	.probe = smcinvoke_probe,
+	.remove = smcinvoke_remove,
+	.suspend = smcinvoke_suspend,
+	.resume = smcinvoke_resume,
+	.driver = {
+		.name = "smcinvoke",
+		.of_match_table = smcinvoke_match,
+	},
+};
+
+static int smcinvoke_init(void)
+{
+	return platform_driver_register(&smcinvoke_plat_driver);
+}
+
+static void smcinvoke_exit(void)
+{
+	platform_driver_unregister(&smcinvoke_plat_driver);
+}
+
+module_init(smcinvoke_init);
+module_exit(smcinvoke_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SMC Invoke driver");
+MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
+MODULE_IMPORT_NS(DMA_BUF);

+ 639 - 0
qcom/opensource/securemsm-kernel/smcinvoke/smcinvoke_kernel.c

@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/anon_inodes.h>
+#include <linux/delay.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/elf.h>
+#include "smcinvoke.h"
+#include "smcinvoke_object.h"
+#include "IClientEnv.h"
+#if IS_ENABLED(CONFIG_QSEECOM_COMPAT)
+#include "IQSEEComCompat.h"
+#include "IQSEEComCompatAppLoader.h"
+#include "linux/qseecom_api.h"
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+#include <linux/qseecom_kernel.h>
+#else
+#include "misc/qseecom_kernel.h"
+#endif
+#endif
+
+#define MAX_FW_APP_SIZE	256		//Application name size.
+#define FILE_EXT_SIZE	5		//File extension like .mbn etc
+
+const uint32_t CQSEEComCompatAppLoader_UID = 122;
+extern struct device *class_dev;
+
+struct qseecom_compat_context {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+	struct qtee_shm shm;
+	uint8_t app_arch;
+	struct Object client_env;
+	struct Object app_loader;
+	struct Object app_controller;
+};
+
+struct tzobject_context {
+	int fd;
+	struct kref refs;
+};
+
+static int invoke_over_smcinvoke(void *cxt,
+			uint32_t op,
+			union ObjectArg *args,
+			uint32_t counts);
+
+static struct Object tzobject_new(int fd)
+{
+	struct tzobject_context *me =
+			kzalloc(sizeof(struct tzobject_context), GFP_KERNEL);
+	if (!me)
+		return Object_NULL;
+
+	kref_init(&me->refs);
+	me->fd = fd;
+	pr_debug("%s: me->fd = %d, me->refs = %u\n", __func__,
+			me->fd, kref_read(&me->refs));
+	return (struct Object) { invoke_over_smcinvoke, me };
+}
+
+static void tzobject_delete(struct kref *refs)
+{
+	struct tzobject_context *me = container_of(refs,
+				struct tzobject_context, refs);
+
+	pr_info("%s: me->fd = %d, me->refs = %d, files = %p\n",
+		__func__, me->fd, kref_read(&me->refs), current->files);
+	/*
+	 * after _close_fd(), ref_cnt will be 0,
+	 * but smcinvoke_release() was still not called,
+	 * so we first call smcinvoke_release_from_kernel_client() to
+	 * free filp and ask TZ to release object, then call _close_fd()
+	 */
+	smcinvoke_release_from_kernel_client(me->fd);
+	close_fd(me->fd);
+	kfree(me);
+}
+
+int getObjectFromHandle(int handle, struct Object *obj)
+{
+	int ret = 0;
+
+	if (handle == SMCINVOKE_USERSPACE_OBJ_NULL) {
+		/* NULL object*/
+		Object_ASSIGN_NULL(*obj);
+	} else if (handle > SMCINVOKE_USERSPACE_OBJ_NULL) {
+		*obj = tzobject_new(handle);
+		if (Object_isNull(*obj))
+			ret = OBJECT_ERROR_BADOBJ;
+	} else {
+		pr_err("CBobj not supported for handle %d\n", handle);
+		ret = OBJECT_ERROR_BADOBJ;
+	}
+
+	return ret;
+}
+
+int getHandleFromObject(struct Object obj, int *handle)
+{
+	int ret = 0;
+
+	if (Object_isNull(obj)) {
+	/* set NULL Object's fd to be -1 */
+		*handle = SMCINVOKE_USERSPACE_OBJ_NULL;
+		return ret;
+	}
+
+	if (obj.invoke == invoke_over_smcinvoke) {
+		struct tzobject_context *ctx = (struct tzobject_context *)(obj.context);
+
+		if (ctx != NULL) {
+			*handle = ctx->fd;
+		} else {
+			pr_err("Failed to get tzobject_context obj handle, ret = %d\n", ret);
+			ret = OBJECT_ERROR_BADOBJ;
+		}
+	} else {
+		pr_err("CBobj not supported\n");
+		ret = OBJECT_ERROR_BADOBJ;
+	}
+
+	return ret;
+}
+
+static int marshalIn(struct smcinvoke_cmd_req *req,
+			union smcinvoke_arg *argptr,
+			uint32_t op, union ObjectArg *args,
+			uint32_t counts)
+{
+	size_t i = 0;
+
+	req->op = op;
+	req->counts = counts;
+	req->argsize = sizeof(union smcinvoke_arg);
+	req->args = (uintptr_t)argptr;
+
+	FOR_ARGS(i, counts, buffers) {
+		argptr[i].b.addr = (uintptr_t) args[i].b.ptr;
+		argptr[i].b.size = args[i].b.size;
+	}
+
+	FOR_ARGS(i, counts, OI) {
+		int handle = -1, ret;
+
+		ret = getHandleFromObject(args[i].o, &handle);
+		if (ret) {
+			pr_err("invalid OI[%zu]\n", i);
+			return OBJECT_ERROR_BADOBJ;
+		}
+		argptr[i].o.fd = handle;
+	}
+
+	FOR_ARGS(i, counts, OO) {
+		argptr[i].o.fd = SMCINVOKE_USERSPACE_OBJ_NULL;
+	}
+	return OBJECT_OK;
+}
+
+static int marshalOut(struct smcinvoke_cmd_req *req,
+			union smcinvoke_arg *argptr,
+			union ObjectArg *args, uint32_t counts,
+			struct tzobject_context *me)
+{
+	int ret = req->result;
+	bool failed = false;
+	size_t i = 0;
+
+	argptr = (union smcinvoke_arg *)(uintptr_t)(req->args);
+
+	FOR_ARGS(i, counts, BO) {
+		args[i].b.size = argptr[i].b.size;
+	}
+
+	FOR_ARGS(i, counts, OO) {
+		ret = getObjectFromHandle(argptr[i].o.fd, &(args[i].o));
+		if (ret) {
+			pr_err("Failed to get OO[%zu] from handle = %d\n",
+				i, (int)argptr[i].o.fd);
+			failed = true;
+			break;
+		}
+		pr_debug("Succeed to create OO for args[%zu].o, fd = %d\n",
+			i, (int)argptr[i].o.fd);
+	}
+	if (failed) {
+		FOR_ARGS(i, counts, OO) {
+			Object_ASSIGN_NULL(args[i].o);
+		}
+		/* Only overwrite ret value if invoke result is 0 */
+		if (ret == 0)
+			ret = OBJECT_ERROR_BADOBJ;
+	}
+	return ret;
+}
+
+static int invoke_over_smcinvoke(void *cxt,
+			uint32_t op,
+			union ObjectArg *args,
+			uint32_t counts)
+{
+	int ret = OBJECT_OK;
+	struct smcinvoke_cmd_req req = {0, 0, 0, 0, 0};
+	size_t i = 0;
+	struct tzobject_context *me = NULL;
+	uint32_t method;
+	union smcinvoke_arg *argptr = NULL;
+
+	FOR_ARGS(i, counts, OO) {
+		args[i].o = Object_NULL;
+	}
+
+	me = (struct tzobject_context *)cxt;
+	method = ObjectOp_methodID(op);
+	pr_debug("%s: cxt = %p, fd = %d, op = %u, cnt = %x, refs = %u\n",
+			__func__, me, me->fd, op, counts, kref_read(&me->refs));
+
+	if (ObjectOp_isLocal(op)) {
+		switch (method) {
+		case Object_OP_retain:
+			kref_get(&me->refs);
+			return OBJECT_OK;
+		case Object_OP_release:
+			kref_put(&me->refs, tzobject_delete);
+			return OBJECT_OK;
+		}
+		return OBJECT_ERROR_REMOTE;
+	}
+
+	argptr = kcalloc(OBJECT_COUNTS_TOTAL(counts),
+			sizeof(union smcinvoke_arg), GFP_KERNEL);
+	if (argptr == NULL)
+		return OBJECT_ERROR_KMEM;
+
+	ret = marshalIn(&req, argptr, op, args, counts);
+	if (ret)
+		goto exit;
+
+	ret = process_invoke_request_from_kernel_client(me->fd, &req);
+	if (ret) {
+		pr_err("INVOKE failed with ret = %d, result = %d\n"
+			"obj.context = %p, fd = %d, op = %d, counts = 0x%x\n",
+			ret, req.result, me, me->fd, op, counts);
+		FOR_ARGS(i, counts, OO) {
+			struct smcinvoke_obj obj = argptr[i].o;
+
+			if (obj.fd >= 0) {
+				pr_err("Close OO[%zu].fd = %lld\n", i, obj.fd);
+				close_fd(obj.fd);
+			}
+		}
+		if (ret == -EBUSY) {
+			ret = OBJECT_ERROR_BUSY;
+		}
+		else if (ret == -ENOMEM){
+			ret = OBJECT_ERROR_KMEM;
+		} else {
+			ret = OBJECT_ERROR_UNAVAIL;
+		}
+		goto exit;
+	}
+
+	if (!req.result)
+		ret = marshalOut(&req, argptr, args, counts, me);
+exit:
+	kfree(argptr);
+	return ret | req.result;
+}
+
+int get_root_obj(struct Object *rootObj)
+{
+	int ret = 0;
+	int root_fd = -1;
+
+	ret = get_root_fd(&root_fd);
+	if (ret) {
+		pr_err("Failed to get root fd, ret = %d\n", ret);
+		return ret;
+	}
+	*rootObj = tzobject_new(root_fd);
+	if (Object_isNull(*rootObj)) {
+		close_fd(root_fd);
+		ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/*
+ * Get a client environment using a NULL credentials Object
+ */
+int32_t get_client_env_object(struct Object *clientEnvObj)
+{
+	int32_t  ret = OBJECT_ERROR;
+	int retry_count = 0;
+	struct Object rootObj = Object_NULL;
+
+	/* get rootObj */
+	ret = get_root_obj(&rootObj);
+	if (ret) {
+		pr_err("Failed to create rootobj\n");
+		return ret;
+	}
+
+	/* get client env */
+	do {
+		ret = IClientEnv_registerWithCredentials(rootObj,
+			Object_NULL, clientEnvObj);
+		if (ret == OBJECT_ERROR_BUSY) {
+			pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
+			msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
+		}
+	} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
+
+	if (ret)
+		pr_err("Failed to get ClientEnvObject, ret = %d\n", ret);
+	Object_release(rootObj);
+	return ret;
+}
+EXPORT_SYMBOL(get_client_env_object);
+
+#if IS_ENABLED(CONFIG_QSEECOM_COMPAT)
+
+static int load_app(struct qseecom_compat_context *cxt, const char *app_name)
+{
+	size_t fw_size = 0;
+	u8 *imgbuf_va = NULL;
+	int ret = 0;
+	char dist_name[MAX_FW_APP_SIZE] = {0};
+	size_t dist_name_len = 0;
+	struct qtee_shm shm = {0};
+
+	if (strnlen(app_name, MAX_FW_APP_SIZE) == MAX_FW_APP_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strnlen(app_name, MAX_FW_APP_SIZE));
+		return -EINVAL;
+	}
+
+	ret = IQSEEComCompatAppLoader_lookupTA(cxt->app_loader,
+		app_name, strlen(app_name), &cxt->app_controller);
+	if (!ret) {
+		pr_info("app %s exists\n", app_name);
+		return ret;
+	}
+
+	imgbuf_va = firmware_request_from_smcinvoke(app_name, &fw_size, &shm);
+	if (imgbuf_va == NULL) {
+		pr_err("Failed on firmware_request_from_smcinvoke\n");
+		return -EINVAL;
+	}
+
+	ret = IQSEEComCompatAppLoader_loadFromBuffer(
+			cxt->app_loader, imgbuf_va, fw_size,
+			app_name, strlen(app_name),
+			dist_name, MAX_FW_APP_SIZE, &dist_name_len,
+			&cxt->app_controller);
+	if (ret) {
+		pr_err("loadFromBuffer failed for app %s, ret = %d\n",
+				app_name, ret);
+		goto exit_release_shm;
+	}
+	cxt->app_arch = *(uint8_t *)(imgbuf_va + EI_CLASS);
+
+	pr_info("%s %d, loaded app %s, dist_name %s, dist_name_len %zu\n",
+		__func__, __LINE__, app_name, dist_name, dist_name_len);
+
+exit_release_shm:
+	qtee_shmbridge_free_shm(&shm);
+	return ret;
+}
+
+static int __qseecom_start_app(struct qseecom_handle **handle,
+					char *app_name, uint32_t size)
+{
+	int ret = 0;
+	struct qseecom_compat_context *cxt = NULL;
+
+	pr_warn("%s, start app %s, size %u\n",
+		__func__, app_name, size);
+	if (app_name == NULL || handle == NULL) {
+		pr_err("app_name is null or invalid handle\n");
+		return -EINVAL;
+	}
+	/* allocate qseecom_compat_context */
+	cxt = kzalloc(sizeof(struct qseecom_compat_context), GFP_KERNEL);
+	if (!cxt)
+		return -ENOMEM;
+
+	/* get client env */
+	ret = get_client_env_object(&cxt->client_env);
+	if (ret) {
+		pr_err("failed to get clientEnv when loading app %s, ret %d\n",
+			app_name, ret);
+		ret = -EINVAL;
+		goto exit_free_cxt;
+	}
+	/* get apploader with CQSEEComCompatAppLoader_UID */
+	ret = IClientEnv_open(cxt->client_env, CQSEEComCompatAppLoader_UID,
+				&cxt->app_loader);
+	if (ret) {
+		pr_err("failed to get apploader when loading app %s, ret %d\n",
+			app_name, ret);
+		ret = -EINVAL;
+		goto exit_release_clientenv;
+	}
+
+	/* load app*/
+	ret = load_app(cxt, app_name);
+	if (ret) {
+		pr_err("failed to load app %s, ret = %d\n",
+			app_name, ret);
+		ret = -EINVAL;
+		goto exit_release_apploader;
+	}
+
+	/* Get the physical address of the req/resp buffer */
+	ret = qtee_shmbridge_allocate_shm(size, &cxt->shm);
+
+	if (ret) {
+		pr_err("qtee_shmbridge_allocate_shm failed, ret :%d\n", ret);
+		ret = -EINVAL;
+		goto exit_release_appcontroller;
+	}
+	cxt->sbuf = cxt->shm.vaddr;
+	cxt->sbuf_len = size;
+	*handle = (struct qseecom_handle *)cxt;
+
+	return ret;
+
+exit_release_appcontroller:
+	Object_release(cxt->app_controller);
+exit_release_apploader:
+	Object_release(cxt->app_loader);
+exit_release_clientenv:
+	Object_release(cxt->client_env);
+exit_free_cxt:
+	kfree(cxt);
+
+	return ret;
+}
+
+static int __qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+
+	struct qseecom_compat_context *cxt = NULL;
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is NULL\n");
+		return -EINVAL;
+	}
+
+	cxt = (struct qseecom_compat_context *)(*handle);
+
+	qtee_shmbridge_free_shm(&cxt->shm);
+	Object_release(cxt->app_controller);
+	Object_release(cxt->app_loader);
+	Object_release(cxt->client_env);
+	kfree(cxt);
+	*handle = NULL;
+	return 0;
+}
+
+static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	struct qseecom_compat_context *cxt =
+			(struct qseecom_compat_context *)handle;
+	size_t out_len = 0;
+
+	pr_debug("%s, sbuf_len %u, rbuf_len %u\n",
+		__func__, sbuf_len, rbuf_len);
+
+	if (!handle || !send_buf || !resp_buf || !sbuf_len || !rbuf_len) {
+		pr_err("One of params is invalid. %s, handle %p, send_buf %p,resp_buf %p,sbuf_len %u, rbuf_len %u\n",
+			 __func__, handle, send_buf, resp_buf, sbuf_len, rbuf_len);
+		return -EINVAL;
+	}
+	return IQSEEComCompat_sendRequest(cxt->app_controller,
+				  send_buf, sbuf_len,
+				  resp_buf, rbuf_len,
+				  send_buf, sbuf_len, &out_len,
+				  resp_buf, rbuf_len, &out_len,
+				  NULL, 0, /* embedded offset array */
+				  (cxt->app_arch == ELFCLASS64),
+				  Object_NULL, Object_NULL,
+				  Object_NULL, Object_NULL);
+}
+
+#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
+const static struct qseecom_drv_ops qseecom_driver_ops = {
+       .qseecom_send_command = __qseecom_send_command,
+       .qseecom_start_app = __qseecom_start_app,
+       .qseecom_shutdown_app = __qseecom_shutdown_app,
+};
+
+int get_qseecom_kernel_fun_ops(void)
+{
+        return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
+}
+#else
+
+int qseecom_start_app(struct qseecom_handle **handle,
+                    char *app_name, uint32_t size)
+{
+    return __qseecom_start_app(handle, app_name, size);
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+    return __qseecom_shutdown_app(handle);
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+            uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+    return __qseecom_send_command(handle, send_buf, sbuf_len,
+                        resp_buf, rbuf_len);
+}
+EXPORT_SYMBOL(qseecom_send_command);
+#endif
+
+#endif
+
+char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm)
+{
+
+	int rc = 0;
+	const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entrylast = NULL;
+	char fw_name[MAX_FW_APP_SIZE + FILE_EXT_SIZE] = "\0";
+	int num_images = 0, phi = 0;
+	unsigned char app_arch = 0;
+	u8 *img_data_ptr = NULL;
+	size_t bufferOffset = 0, phdr_table_offset = 0;
+	size_t *offset = NULL;
+	Elf32_Phdr phdr32;
+	Elf64_Phdr phdr64;
+	struct elf32_hdr *ehdr = NULL;
+	struct elf64_hdr *ehdr64 = NULL;
+
+
+	/* load b00*/
+	snprintf(fw_name, sizeof(fw_name), "%s.b00", appname);
+	rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev);
+	if (rc) {
+		pr_err("Load %s failed, ret:%d\n", fw_name, rc);
+		return NULL;
+	}
+
+	app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS);
+
+	/*Get the offsets for split images header*/
+	if (app_arch == ELFCLASS32) {
+
+		ehdr = (struct elf32_hdr *)fw_entry00->data;
+		num_images = ehdr->e_phnum;
+		offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
+		if (offset == NULL)
+			goto release_fw_entry00;
+		phdr_table_offset = (size_t) ehdr->e_phoff;
+		for (phi = 1; phi < num_images; ++phi) {
+			bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr);
+			phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset);
+			offset[phi] = (size_t)phdr32.p_offset;
+		}
+
+	} else if (app_arch == ELFCLASS64) {
+
+		ehdr64 = (struct elf64_hdr *)fw_entry00->data;
+		num_images = ehdr64->e_phnum;
+		offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
+		if (offset == NULL)
+			goto release_fw_entry00;
+		phdr_table_offset = (size_t) ehdr64->e_phoff;
+		for (phi = 1; phi < num_images; ++phi) {
+			bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr);
+			phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset);
+			offset[phi] = (size_t)phdr64.p_offset;
+		}
+
+	} else {
+
+		pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch);
+		goto release_fw_entry00;
+	}
+
+	/*Find the size of last split bin image*/
+	snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1);
+	rc = firmware_request_nowarn(&fw_entrylast, fw_name, class_dev);
+	if (rc) {
+		pr_err("Failed to locate blob %s\n", fw_name);
+		goto release_fw_entry00;
+	}
+
+	/*Total size of image will be the offset of last image + the size of last split image*/
+	*fw_size = fw_entrylast->size + offset[num_images-1];
+
+	/*Allocate memory for the buffer that will hold the split image*/
+	rc = qtee_shmbridge_allocate_shm((*fw_size), shm);
+	if (rc) {
+		pr_err("smbridge alloc failed for size: %zu\n", *fw_size);
+		goto release_fw_entrylast;
+	}
+	img_data_ptr = shm->vaddr;
+	/*
+	 * Copy contents of split bins to the buffer
+	 */
+	memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size);
+	for (phi = 1; phi < num_images-1; phi++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi);
+		rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev);
+		if (rc) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			qtee_shmbridge_free_shm(shm);
+			img_data_ptr = NULL;
+			goto release_fw_entrylast;
+		}
+		memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size);
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	memcpy(img_data_ptr + offset[phi], fw_entrylast->data, fw_entrylast->size);
+
+release_fw_entrylast:
+	release_firmware(fw_entrylast);
+release_fw_entry00:
+	release_firmware(fw_entry00);
+	kfree(offset);
+	return img_data_ptr;
+}
+EXPORT_SYMBOL(firmware_request_from_smcinvoke);

+ 502 - 0
qcom/opensource/securemsm-kernel/smcinvoke/trace_smcinvoke.h

@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM smcinvoke
+
+#if !defined(_TRACE_SMCINVOKE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SMCINVOKE_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include "smcinvoke.h"
+
+TRACE_EVENT(put_pending_cbobj_locked,
+	TP_PROTO(uint16_t srvr_id, uint16_t obj_id),
+	TP_ARGS(srvr_id, obj_id),
+	TP_STRUCT__entry(
+		__field(uint16_t,	srvr_id)
+		__field(uint16_t,	obj_id)
+	),
+	TP_fast_assign(
+		__entry->srvr_id	= srvr_id;
+		__entry->obj_id		= obj_id;
+	),
+	TP_printk("srvr_id=0x%x obj_id=0x%x",
+			__entry->srvr_id, __entry->obj_id)
+);
+
+TRACE_EVENT(release_mem_obj_locked,
+	TP_PROTO(uint32_t tzhandle, size_t buf_len),
+	TP_ARGS(tzhandle, buf_len),
+	TP_STRUCT__entry(
+		__field(uint32_t,	tzhandle)
+		__field(size_t,		buf_len)
+	),
+	TP_fast_assign(
+		__entry->tzhandle	= tzhandle;
+		__entry->buf_len	= buf_len;
+	),
+	TP_printk("tzhandle=0x%08x, buf_len=%zu",
+			__entry->tzhandle, __entry->buf_len)
+);
+
+TRACE_EVENT(invoke_cmd_handler,
+	TP_PROTO(int cmd, uint64_t response_type, int32_t result, int ret),
+	TP_ARGS(cmd, response_type, result, ret),
+	TP_STRUCT__entry(
+		__field(int,		cmd)
+		__field(uint64_t,	response_type)
+		__field(int32_t,	result)
+		__field(int,		ret)
+	),
+	TP_fast_assign(
+		__entry->response_type	= response_type;
+		__entry->result		= result;
+		__entry->ret		= ret;
+		__entry->cmd		= cmd;
+	),
+	TP_printk("cmd=0x%x (%d), response_type=%llu, result=0x%x (%d), ret=%d",
+			__entry->cmd, __entry->cmd, __entry->response_type,
+			__entry->result, __entry->result, __entry->ret)
+);
+
+TRACE_EVENT(process_tzcb_req_handle,
+	TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
+	TP_ARGS(tzhandle, op, counts),
+	TP_STRUCT__entry(
+		__field(uint32_t,	tzhandle)
+		__field(uint32_t,	op)
+		__field(uint32_t,	counts)
+	),
+	TP_fast_assign(
+		__entry->tzhandle	= tzhandle;
+		__entry->op		= op;
+		__entry->counts		= counts;
+	),
+	TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
+			__entry->tzhandle, __entry->op, __entry->counts)
+);
+
+TRACE_EVENT(process_tzcb_req_wait,
+	TP_PROTO(uint32_t tzhandle, int cbobj_retries, uint32_t txn_id, pid_t pid, pid_t tgid,
+			uint16_t server_state, uint16_t server_id, unsigned int cb_reqs_inflight),
+	TP_ARGS(tzhandle, cbobj_retries, txn_id, pid, tgid, server_state, server_id,
+			cb_reqs_inflight),
+	TP_STRUCT__entry(
+		__field(uint32_t,	tzhandle)
+		__field(int,		cbobj_retries)
+		__field(uint32_t,	txn_id)
+		__field(pid_t,		pid)
+		__field(pid_t,		tgid)
+		__field(uint16_t,	server_state)
+		__field(uint16_t,	server_id)
+		__field(unsigned int,	cb_reqs_inflight)
+	),
+	TP_fast_assign(
+		__entry->tzhandle		= tzhandle;
+		__entry->cbobj_retries		= cbobj_retries;
+		__entry->txn_id			= txn_id;
+		__entry->pid			= pid;
+		__entry->tgid			= tgid;
+		__entry->server_state		= server_state;
+		__entry->server_id		= server_id;
+		__entry->cb_reqs_inflight	= cb_reqs_inflight;
+	),
+	TP_printk("tzhandle=0x%08x, retries=%d, txn_id=%d, pid %x,tid %x, srvr state=%d, server_id=0x%x, cb_reqs_inflight=%d",
+			__entry->tzhandle, __entry->cbobj_retries, __entry->txn_id,
+			__entry->pid, __entry->tgid, __entry->server_state,
+			__entry->server_id, __entry->cb_reqs_inflight)
+);
+
+TRACE_EVENT(process_tzcb_req_result,
+	TP_PROTO(int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts,
+			unsigned int cb_reqs_inflight),
+	TP_ARGS(result, tzhandle, op, counts, cb_reqs_inflight),
+	TP_STRUCT__entry(
+		__field(int32_t,	result)
+		__field(uint32_t,	tzhandle)
+		__field(uint32_t,	op)
+		__field(uint32_t,	counts)
+		__field(unsigned int,	cb_reqs_inflight)
+	),
+	TP_fast_assign(
+		__entry->result			= result;
+		__entry->tzhandle		= tzhandle;
+		__entry->op			= op;
+		__entry->counts			= counts;
+		__entry->cb_reqs_inflight	= cb_reqs_inflight;
+	),
+	TP_printk("result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x, cb_reqs_inflight=%d",
+			__entry->result, __entry->tzhandle, __entry->op, __entry->counts,
+			__entry->cb_reqs_inflight)
+);
+
+TRACE_EVENT(marshal_out_invoke_req,
+	TP_PROTO(int i, uint32_t tzhandle, uint16_t server, uint32_t fd),
+	TP_ARGS(i, tzhandle, server, fd),
+	TP_STRUCT__entry(
+		__field(int,		i)
+		__field(uint32_t,	tzhandle)
+		__field(uint16_t,	server)
+		__field(uint32_t,	fd)
+	),
+	TP_fast_assign(
+		__entry->i		= i;
+		__entry->tzhandle	= tzhandle;
+		__entry->server		= server;
+		__entry->fd		= fd;
+	),
+	TP_printk("OO[%d]: tzhandle=0x%x server=0x%x fd=0x%x",
+			__entry->i, __entry->tzhandle, __entry->server, __entry->fd)
+);
+
+TRACE_EVENT(prepare_send_scm_msg,
+	TP_PROTO(uint64_t response_type, int32_t result),
+	TP_ARGS(response_type, result),
+	TP_STRUCT__entry(
+		__field(uint64_t,	response_type)
+		__field(int32_t,	result)
+	),
+	TP_fast_assign(
+		__entry->response_type	= response_type;
+		__entry->result		= result;
+	),
+	TP_printk("response_type=%llu (%llu), result=0x%x (%d)",
+			__entry->response_type, __entry->response_type,
+			__entry->result, __entry->result)
+);
+
+TRACE_EVENT(marshal_in_invoke_req,
+	TP_PROTO(int i, int64_t fd, int32_t cb_server_fd, uint32_t tzhandle),
+	TP_ARGS(i, fd, cb_server_fd, tzhandle),
+	TP_STRUCT__entry(
+		__field(int,		i)
+		__field(int64_t,	fd)
+		__field(int32_t,	cb_server_fd)
+		__field(uint32_t,	tzhandle)
+	),
+	TP_fast_assign(
+		__entry->i		= i;
+		__entry->fd		= fd;
+		__entry->cb_server_fd	= cb_server_fd;
+		__entry->tzhandle	= tzhandle;
+	),
+	TP_printk("OI[%d]: fd=%lld cb_server_fd=0x%x tzhandle=0x%x",
+			__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
+);
+
+TRACE_EVENT(marshal_in_tzcb_req_handle,
+	TP_PROTO(uint32_t tzhandle, int srvr_id, int32_t cbobj_id, uint32_t op, uint32_t counts),
+	TP_ARGS(tzhandle, srvr_id, cbobj_id, op, counts),
+	TP_STRUCT__entry(
+		__field(uint32_t,	tzhandle)
+		__field(int,		srvr_id)
+		__field(int32_t,	cbobj_id)
+		__field(uint32_t,	op)
+		__field(uint32_t,	counts)
+	),
+	TP_fast_assign(
+		__entry->tzhandle	= tzhandle;
+		__entry->srvr_id	= srvr_id;
+		__entry->cbobj_id	= cbobj_id;
+		__entry->op		= op;
+		__entry->counts		= counts;
+	),
+	TP_printk("tzhandle=0x%x srvr_id=0x%x cbobj_id=0x%08x op=0x%02x counts=0x%04x",
+			__entry->tzhandle, __entry->srvr_id, __entry->cbobj_id,
+			__entry->op, __entry->counts)
+);
+
+TRACE_EVENT(marshal_in_tzcb_req_fd,
+	TP_PROTO(int i, uint32_t tzhandle, int srvr_id, int32_t fd),
+	TP_ARGS(i, tzhandle, srvr_id, fd),
+	TP_STRUCT__entry(
+		__field(int,		i)
+		__field(uint32_t,	tzhandle)
+		__field(int,		srvr_id)
+		__field(int32_t,	fd)
+	),
+	TP_fast_assign(
+		__entry->i		= i;
+		__entry->tzhandle	= tzhandle;
+		__entry->srvr_id	= srvr_id;
+		__entry->fd		= fd;
+	),
+	TP_printk("OI[%d]: tzhandle=0x%x srvr_id=0x%x fd=0x%x",
+			__entry->i, __entry->tzhandle, __entry->srvr_id, __entry->fd)
+);
+
+TRACE_EVENT(marshal_out_tzcb_req,
+	TP_PROTO(uint32_t i, int32_t fd, int32_t cb_server_fd, uint32_t tzhandle),
+	TP_ARGS(i, fd, cb_server_fd, tzhandle),
+	TP_STRUCT__entry(
+		__field(int,		i)
+		__field(int32_t,	fd)
+		__field(int32_t,	cb_server_fd)
+		__field(uint32_t,	tzhandle)
+	),
+	TP_fast_assign(
+		__entry->i		= i;
+		__entry->fd		= fd;
+		__entry->cb_server_fd	= cb_server_fd;
+		__entry->tzhandle	= tzhandle;
+	),
+	TP_printk("OO[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x",
+			__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
+);
+
+TRACE_EVENT(process_invoke_req_tzhandle,
+	TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
+	TP_ARGS(tzhandle, op, counts),
+	TP_STRUCT__entry(
+		__field(uint32_t, tzhandle)
+		__field(uint32_t, op)
+		__field(uint32_t, counts)
+	),
+	TP_fast_assign(
+		__entry->tzhandle	= tzhandle;
+		__entry->op		= op;
+		__entry->counts		= counts;
+	),
+	TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
+			__entry->tzhandle, __entry->op, __entry->counts)
+);
+
+TRACE_EVENT(process_invoke_req_result,
+	TP_PROTO(int ret, int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts),
+	TP_ARGS(ret, result, tzhandle, op, counts),
+	TP_STRUCT__entry(
+		__field(int,		ret)
+		__field(int32_t,	result)
+		__field(uint32_t,	tzhandle)
+		__field(uint32_t,	op)
+		__field(uint32_t,	counts)
+	),
+	TP_fast_assign(
+		__entry->ret		= ret;
+		__entry->result		= result;
+		__entry->tzhandle	= tzhandle;
+		__entry->op		= op;
+		__entry->counts		= counts;
+	),
+	TP_printk("ret=%d result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x",
+			__entry->ret, __entry->result, __entry->tzhandle,
+			__entry->op, __entry->counts)
+);
+
+TRACE_EVENT(process_log_info,
+	TP_PROTO(char *buf, uint32_t context_type, uint32_t tzhandle),
+	TP_ARGS(buf, context_type, tzhandle),
+	TP_STRUCT__entry(
+		__string(str,		buf)
+		__field(uint32_t,	context_type)
+		__field(uint32_t,	tzhandle)
+	),
+	TP_fast_assign(
+		__assign_str(str, buf);
+		__entry->context_type	= context_type;
+		__entry->tzhandle	= tzhandle;
+	),
+	TP_printk("%s context_type=%d tzhandle=0x%08x",
+			__get_str(str),
+			__entry->context_type, __entry->tzhandle)
+);
+
+TRACE_EVENT_CONDITION(smcinvoke_ioctl,
+	TP_PROTO(unsigned int cmd, long ret),
+	TP_ARGS(cmd, ret),
+	TP_CONDITION(ret),
+	TP_STRUCT__entry(
+		__field(unsigned int,	cmd)
+		__field(long,		ret)
+	),
+	TP_fast_assign(
+		__entry->cmd = cmd;
+		__entry->ret = ret;
+	),
+	TP_printk("cmd=%s ret=%ld",
+			__print_symbolic(__entry->cmd,
+				{SMCINVOKE_IOCTL_INVOKE_REQ,	"SMCINVOKE_IOCTL_INVOKE_REQ"},
+				{SMCINVOKE_IOCTL_ACCEPT_REQ,	"SMCINVOKE_IOCTL_ACCEPT_REQ"},
+				{SMCINVOKE_IOCTL_SERVER_REQ,	"SMCINVOKE_IOCTL_SERVER_REQ"},
+				{SMCINVOKE_IOCTL_ACK_LOCAL_OBJ,	"SMCINVOKE_IOCTL_ACK_LOCAL_OBJ"},
+				{SMCINVOKE_IOCTL_LOG,		"SMCINVOKE_IOCTL_LOG"}
+			), __entry->ret)
+);
+
+TRACE_EVENT(smcinvoke_create_bridge,
+	TP_PROTO(uint64_t shmbridge_handle, uint16_t mem_region_id),
+	TP_ARGS(shmbridge_handle, mem_region_id),
+	TP_STRUCT__entry(
+		__field(uint64_t,	shmbridge_handle)
+		__field(uint16_t,	mem_region_id)
+	),
+	TP_fast_assign(
+		__entry->shmbridge_handle	= shmbridge_handle;
+		__entry->mem_region_id		= mem_region_id;
+	),
+	TP_printk("created shm bridge handle %llu for mem_region_id %u",
+			__entry->shmbridge_handle, __entry->mem_region_id)
+);
+
+TRACE_EVENT(status,
+	TP_PROTO(const char *func, const char *status),
+	TP_ARGS(func, status),
+	TP_STRUCT__entry(
+		__string(str,	func)
+		__string(str2,	status)
+	),
+	TP_fast_assign(
+		__assign_str(str,	func);
+		__assign_str(str2,	status);
+	),
+	TP_printk("%s status=%s", __get_str(str), __get_str(str2))
+);
+
+TRACE_EVENT(process_accept_req_has_response,
+	TP_PROTO(pid_t pid, pid_t tgid),
+	TP_ARGS(pid, tgid),
+	TP_STRUCT__entry(
+		__field(pid_t,	pid)
+		__field(pid_t,	tgid)
+	),
+	TP_fast_assign(
+		__entry->pid	= pid;
+		__entry->tgid	= tgid;
+	),
+	TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
+);
+
+TRACE_EVENT(process_accept_req_ret,
+	TP_PROTO(pid_t pid, pid_t tgid, int ret),
+	TP_ARGS(pid, tgid, ret),
+	TP_STRUCT__entry(
+		__field(pid_t,	pid)
+		__field(pid_t,	tgid)
+		__field(int,	ret)
+	),
+	TP_fast_assign(
+		__entry->pid	= pid;
+		__entry->tgid	= tgid;
+		__entry->ret	= ret;
+	),
+	TP_printk("pid=0x%x tgid=0x%x ret=%d", __entry->pid, __entry->tgid, __entry->ret)
+);
+
+TRACE_EVENT(process_accept_req_placed,
+	TP_PROTO(pid_t pid, pid_t tgid),
+	TP_ARGS(pid, tgid),
+	TP_STRUCT__entry(
+		__field(pid_t,	pid)
+		__field(pid_t,	tgid)
+	),
+	TP_fast_assign(
+		__entry->pid	= pid;
+		__entry->tgid	= tgid;
+	),
+	TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
+);
+
+TRACE_EVENT(process_invoke_request_from_kernel_client,
+	TP_PROTO(int fd, struct file *filp, int f_count),
+	TP_ARGS(fd, filp, f_count),
+	TP_STRUCT__entry(
+		__field(int,		fd)
+		__field(struct file*,	filp)
+		__field(int,		f_count)
+	),
+	TP_fast_assign(
+		__entry->fd		= fd;
+		__entry->filp		= filp;
+		__entry->f_count	= f_count;
+	),
+	TP_printk("fd=%d, filp=%p, f_count=%d",
+			__entry->fd,
+			__entry->filp,
+			__entry->f_count)
+);
+
+TRACE_EVENT(smcinvoke_release_filp,
+	TP_PROTO(struct files_struct *files, struct file *filp,
+			int f_count, uint32_t context_type),
+	TP_ARGS(files, filp, f_count, context_type),
+	TP_STRUCT__entry(
+		__field(struct files_struct*,	files)
+		__field(struct file*,		filp)
+		__field(int,			f_count)
+		__field(uint32_t,		context_type)
+	),
+	TP_fast_assign(
+		__entry->files		= files;
+		__entry->filp		= filp;
+		__entry->f_count	= f_count;
+		__entry->context_type	= context_type;
+	),
+	TP_printk("files=%p, filp=%p, f_count=%u, cxt_type=%d",
+			__entry->files,
+			__entry->filp,
+			__entry->f_count,
+			__entry->context_type)
+);
+
+TRACE_EVENT(smcinvoke_release_from_kernel_client,
+	TP_PROTO(struct files_struct *files, struct file *filp, int f_count),
+	TP_ARGS(files, filp, f_count),
+	TP_STRUCT__entry(
+		__field(struct files_struct*,	files)
+		__field(struct file*,		filp)
+		__field(int,			f_count)
+	),
+	TP_fast_assign(
+		__entry->files		= files;
+		__entry->filp		= filp;
+		__entry->f_count	= f_count;
+	),
+	TP_printk("files=%p, filp=%p, f_count=%u",
+			__entry->files,
+			__entry->filp,
+			__entry->f_count)
+);
+
+TRACE_EVENT(smcinvoke_release,
+	TP_PROTO(struct files_struct *files, struct file *filp,
+			int f_count, void *private_data),
+	TP_ARGS(files, filp, f_count, private_data),
+	TP_STRUCT__entry(
+		__field(struct files_struct*,	files)
+		__field(struct file*,		filp)
+		__field(int,			f_count)
+		__field(void*,			private_data)
+	),
+	TP_fast_assign(
+		__entry->files		= files;
+		__entry->filp		= filp;
+		__entry->f_count	= f_count;
+		__entry->private_data	= private_data;
+	),
+	TP_printk("files=%p, filp=%p, f_count=%d, private_data=%p",
+			__entry->files,
+			__entry->filp,
+			__entry->f_count,
+			__entry->private_data)
+);
+
+#endif /* _TRACE_SMCINVOKE_H */
+/*
+* Path must be relative to location of 'define_trace.h' header in kernel
+* Define path if not defined in bazel file
+ */
+#ifndef SMCINVOKE_TRACE_INCLUDE_PATH
+#define SMCINVOKE_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH SMCINVOKE_TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_smcinvoke
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>

+ 58 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/include/uapi/linux/qti-smmu-proxy.h

@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __QTI_SMMU_PROXY_UAPI_H_
+#define __QTI_SMMU_PROXY_UAPI_H_
+
+#include <linux/types.h>
+
+#define QTI_SMMU_PROXY_CAMERA_CB 0
+#define QTI_SMMU_PROXY_DISPLAY_CB 1
+#define QTI_SMMU_PROXY_EVA_CB 2
+
+#define QTI_SMMU_PROXY_IOC_BASE 0x55
+
+struct csf_version {
+	__u32 arch_ver;
+	__u32 max_ver;
+	__u32 min_ver;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_GET_VERSION_IOCTL	_IOR(QTI_SMMU_PROXY_IOC_BASE, 0, \
+						     struct csf_version)
+
+struct smmu_proxy_acl_ctl {
+	__u32 dma_buf_fd;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_AC_LOCK_BUFFER	_IOW(QTI_SMMU_PROXY_IOC_BASE, 1, \
+					     struct smmu_proxy_acl_ctl)
+#define QTI_SMMU_PROXY_AC_UNLOCK_BUFFER	_IOW(QTI_SMMU_PROXY_IOC_BASE, 2, \
+					     struct smmu_proxy_acl_ctl)
+
+struct smmu_proxy_wipe_buf_ctl {
+	__u64 context_bank_id_array;
+	__u32 num_cb_ids;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_WIPE_BUFFERS	_IOW(QTI_SMMU_PROXY_IOC_BASE, 3, \
+					     struct smmu_proxy_wipe_buf_ctl)
+
+struct smmu_proxy_get_dma_buf_ctl {
+	/*
+	 * memparcel_hdl only needs to be 32-bit for Gunyah, but a 64-bit value
+	 * is needed to remain forward compatible with FF-A .
+	 */
+	__u64 memparcel_hdl;
+	__u32 dma_buf_fd;
+	__u32 padding;
+};
+
+#define QTI_SMMU_PROXY_GET_DMA_BUF	_IOWR(QTI_SMMU_PROXY_IOC_BASE, 4, \
+					      struct smmu_proxy_get_dma_buf_ctl)
+
+#endif /* __QTI_SMMU_PROXY_UAPI_H_ */

+ 18 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/linux/qti-smmu-proxy.h

@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef __QTI_SMMU_PROXY_H_
+#define __QTI_SMMU_PROXY_H_
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/align.h>
+
+#include <smmu-proxy/include/uapi/linux/qti-smmu-proxy.h>
+
+#define SMMU_PROXY_MEM_ALIGNMENT (1 << 21)
+
+int smmu_proxy_get_csf_version(struct csf_version *csf_version);
+
+#endif /* __QTI_SMMU_PROXY_H_ */

+ 113 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-common.c

@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include "qti-smmu-proxy-common.h"
+#include "smcinvoke_object.h"
+#include "../include/linux/ITrustedCameraDriver.h"
+#include "../include/linux/CTrustedCameraDriver.h"
+#include "../include/linux/IClientEnv.h"
+
+#define SMMU_PROXY_MAX_DEVS 1
+static dev_t smmu_proxy_dev_no;
+static struct class *smmu_proxy_class;
+static struct cdev smmu_proxy_char_dev;
+
+static struct csf_version cached_csf_version;
+
+int smmu_proxy_get_csf_version(struct csf_version *csf_version)
+{
+	int ret;
+	struct Object client_env = {0};
+	struct Object sc_object;
+
+	/* Assumption is that cached_csf_version.arch_ver !=0 ==> other vals are set */
+	if (cached_csf_version.arch_ver != 0) {
+		csf_version->arch_ver = cached_csf_version.arch_ver;
+		csf_version->max_ver = cached_csf_version.max_ver;
+		csf_version->min_ver = cached_csf_version.min_ver;
+
+		return 0;
+	}
+
+	ret = get_client_env_object(&client_env);
+	if (ret) {
+		pr_err("%s: Failed to get env object rc: %d\n", __func__,
+		       ret);
+		return ret;
+	}
+
+	ret = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
+	if (ret) {
+		pr_err("%s: Failed to get seccam object rc: %d\n", __func__,
+		       ret);
+		return ret;
+	}
+
+	ret = ITrustedCameraDriver_getVersion(sc_object, &csf_version->arch_ver,
+					      &csf_version->max_ver,
+					      &csf_version->min_ver);
+
+	Object_release(sc_object);
+	Object_release(client_env);
+
+	/*
+	 * Once we set cached_csf_version.arch_ver, concurrent callers will get
+	 * the cached value.
+	 */
+	cached_csf_version.min_ver = csf_version->min_ver;
+	cached_csf_version.max_ver = csf_version->max_ver;
+	cached_csf_version.arch_ver = csf_version->arch_ver;
+
+	return ret;
+}
+EXPORT_SYMBOL(smmu_proxy_get_csf_version);
+
+int smmu_proxy_create_dev(const struct file_operations *fops)
+{
+	int ret;
+	struct device *class_dev;
+
+	ret = alloc_chrdev_region(&smmu_proxy_dev_no, 0, SMMU_PROXY_MAX_DEVS,
+				  "qti-smmu-proxy");
+	if (ret < 0)
+		return ret;
+
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+	smmu_proxy_class = class_create("qti-smmu-proxy");
+#else
+	smmu_proxy_class = class_create(THIS_MODULE, "qti-smmu-proxy");
+#endif
+	if (IS_ERR(smmu_proxy_class)) {
+		ret = PTR_ERR(smmu_proxy_class);
+		goto err_class_create;
+	}
+
+	cdev_init(&smmu_proxy_char_dev, fops);
+	ret = cdev_add(&smmu_proxy_char_dev, smmu_proxy_dev_no,
+		       SMMU_PROXY_MAX_DEVS);
+	if (ret < 0)
+		goto err_cdev_add;
+
+	class_dev = device_create(smmu_proxy_class, NULL, smmu_proxy_dev_no, NULL,
+				  "qti-smmu-proxy");
+	if (IS_ERR(class_dev)) {
+		ret = PTR_ERR(class_dev);
+		goto err_dev_create;
+	}
+
+	return 0;
+
+err_dev_create:
+	cdev_del(&smmu_proxy_char_dev);
+err_cdev_add:
+	class_destroy(smmu_proxy_class);
+err_class_create:
+	unregister_chrdev_region(smmu_proxy_dev_no, SMMU_PROXY_MAX_DEVS);
+
+	return ret;
+}
+

+ 30 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-common.h

@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QTI_SMMU_PROXY_COMMON_H_
+#define __QTI_SMMU_PROXY_COMMON_H_
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/cdev.h>
+#include <linux/dma-buf.h>
+
+#include <linux/mem-buf.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/gunyah/gh_msgq.h>
+#include "qti-smmu-proxy-msgq.h"
+#include "linux/qti-smmu-proxy.h"
+
+union smmu_proxy_ioctl_arg {
+	struct csf_version csf_version;
+	struct smmu_proxy_acl_ctl acl_ctl;
+	struct smmu_proxy_wipe_buf_ctl wipe_buf_ctl;
+	struct smmu_proxy_get_dma_buf_ctl get_dma_buf_ctl;
+};
+
+int smmu_proxy_create_dev(const struct file_operations *fops);
+
+#endif /* __QTI_SMMU_PROXY_COMMON_H_ */

+ 107 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-msgq.h

@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef SMMU_PROXY_MSGQ_H
+#define SMMU_PROXY_MSGQ_H
+
+#include <linux/gunyah/gh_rm_drv.h>
+
+
+/**
+ * enum smmu_proxy_msg_type: Message types used by the SMMU proxy driver for
+ * communication.
+ * @SMMU_PROXY_MAP: The message is a request to map memory into the VM's
+ * SMMU.
+ * @SMMU_PROXY_MAP_RESP: The message is a response from a remote VM to a
+ * mapping request issued by the receiving VM
+ * @SMMU_PROXY_UNMAP: The message is a request to unmap some previously
+ * SMMU-mapped memory from the VM
+ * @SMMU_PROXY_UNMAP_RESP: The message is a response from a remote VM to an
+ * unmapping request issued by the receiving VM
+ * @SMMU_PROXY_ERR_RESP: The message is a response from a remote VM to give
+ * a generic error response for a prior message sent to the remote VM
+ */
+enum smmu_proxy_msg_type {
+	SMMU_PROXY_MAP,
+	SMMU_PROXY_MAP_RESP,
+	SMMU_PROXY_UNMAP,
+	SMMU_PROXY_UNMAP_RESP,
+	SMMU_PROXY_ERR_RESP,
+	SMMU_PROXY_MSG_MAX,
+};
+
+/**
+ * struct smmu_proxy_msg_hdr: The header for SMMU proxy messages
+ * @msg_type: The type of message.
+ * @msg_size: The size of message.
+ */
+struct smmu_proxy_msg_hdr {
+	u32 msg_type;
+	u32 msg_size;
+} __packed;
+
+/**
+ * struct smmu_proxy_msg_hdr: The header for responses to SMMU proxy messages
+ * @msg_type: The type of message.
+ * @msg_size: The size of message.
+ * @ret: Return code from remote VM
+ */
+struct smmu_proxy_resp_hdr {
+	u32 msg_type;
+	u32 msg_size;
+	s32 ret;
+} __packed;
+
+/**
+ * struct smmu_proxy_map_req: The message format for an SMMU mapping request from
+ * another VM.
+ * @hdr: Message header
+ * @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
+ * of the relevant VM
+ * @cb_id: Context bank ID that we will map the memory associated with @hdl to
+ * @acl_desc: A GH ACL descriptor that describes the VMIDs that will be
+ * accessing the memory, as well as what permissions each VMID will have.
+ */
+struct smmu_proxy_map_req {
+	struct smmu_proxy_msg_hdr hdr;
+	u32 hdl;
+	u32 cb_id;
+	struct gh_acl_desc acl_desc;
+} __packed;
+
+/**
+ * struct smmu_proxy_map_resp: The message format for an SMMU mapping
+ * request response.
+ * @hdr: Response header
+ * @iova: IOVA of mapped memory
+ * @mapping_len: Lenth of IOMMU IOVA mapping
+ */
+struct smmu_proxy_map_resp {
+	struct smmu_proxy_resp_hdr hdr;
+	u64 iova;
+	u64 mapping_len;
+} __packed;
+
+/**
+ * struct smmu_proxy_unmap_req: The message format for an SMMU unmapping request from
+ * another VM.
+ * @hdr: Message header
+ * @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
+ * of the relevant VM
+ */
+struct smmu_proxy_unmap_req {
+	struct smmu_proxy_msg_hdr hdr;
+	u32 hdl;
+} __packed;
+
+/**
+ * struct smmu_proxy_unmap_resp: The message format for an SMMU unmapping
+ * request response.
+ * @hdr: Response header
+ */
+struct smmu_proxy_unmap_resp {
+	struct smmu_proxy_resp_hdr hdr;
+} __packed;
+
+#endif /* SMMU_PROXY_MSGQ_H */

+ 323 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-pvm.c

@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "qti-smmu-proxy-common.h"
+
+#include <linux/qti-smmu-proxy-callbacks.h>
+#include <linux/qcom-dma-mapping.h>
+#include <linux/of.h>
+
+static void *msgq_hdl;
+
+DEFINE_MUTEX(sender_mutex);
+
+static const struct file_operations smmu_proxy_dev_fops;
+
+int smmu_proxy_unmap(void *data)
+{
+	struct dma_buf *dmabuf;
+	void *buf;
+	size_t size;
+	int ret;
+	struct smmu_proxy_unmap_req *req;
+	struct smmu_proxy_unmap_resp *resp;
+
+	mutex_lock(&sender_mutex);
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		pr_err("%s: Failed to allocate memory!\n", __func__);
+		goto out;
+	}
+
+	req = buf;
+
+	dmabuf = data;
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	req->hdr.msg_type = SMMU_PROXY_UNMAP;
+	req->hdr.msg_size = sizeof(*req);
+
+	ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	/*
+	 * No need to validate size -  gh_msgq_recv() ensures that sizeof(*resp) <
+	 * GH_MSGQ_MAX_MSG_SIZE_BYTES
+	 */
+	ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
+	if (ret < 0) {
+		pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	resp = buf;
+	if (resp->hdr.ret) {
+		ret = resp->hdr.ret;
+		pr_err("%s: Unmap call failed on remote VM, rc: %d\n", __func__,
+		       resp->hdr.ret);
+	}
+
+free_buf:
+	kfree(buf);
+out:
+	mutex_unlock(&sender_mutex);
+
+	return ret;
+}
+
+int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
+		   struct dma_buf *dmabuf)
+{
+	void *buf;
+	size_t size;
+	int ret = 0;
+	int n_acl_entries, i;
+	int vmids[2] = { VMID_TVM, VMID_OEMVM };
+	int perms[2] = { PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+	struct csf_version csf_version;
+	struct mem_buf_lend_kernel_arg arg = {0};
+	struct smmu_proxy_map_req *req;
+	struct smmu_proxy_map_resp *resp;
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		return ret;
+	}
+
+	/*
+	 * We enter this function iff the CSF version is 2.5.* . If CSF 2.5.1
+	 * is in use, we set n_acl_entries to two, in order to assign this
+	 * memory to the TVM and OEM VM. If CSF 2.5.0 is in use, we just assign
+	 * it to the TVM.
+	 */
+	n_acl_entries = csf_version.min_ver == 1 ? 2 : 1;
+
+	mutex_lock(&sender_mutex);
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (mem_buf_dma_buf_exclusive_owner(dmabuf)) {
+		arg.vmids = vmids;
+		arg.perms = perms;
+		arg.nr_acl_entries = n_acl_entries;
+
+		ret = mem_buf_lend(dmabuf, &arg);
+		if (ret) {
+			pr_err("%s: Failed to lend buf rc: %d\n", __func__, ret);
+			goto free_buf;
+		}
+	}
+
+	/* Prepare the message */
+	req = buf;
+	req->acl_desc.n_acl_entries = n_acl_entries;
+	for (i = 0; i < n_acl_entries; i++) {
+		req->acl_desc.acl_entries[i].vmid = vmids[i];
+		req->acl_desc.acl_entries[i].perms = perms[i];
+	}
+
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	ret = of_property_read_u32(client_dev->of_node,
+				   "qti,smmu-proxy-cb-id",
+				   &req->cb_id);
+	if (ret) {
+		dev_err(client_dev, "%s: Err reading 'qti,smmu-proxy-cb-id' rc: %d\n",
+			__func__, ret);
+		goto free_buf;
+	}
+
+	req->hdr.msg_type = SMMU_PROXY_MAP;
+	req->hdr.msg_size = offsetof(struct smmu_proxy_map_req,
+				acl_desc.acl_entries[n_acl_entries]);
+
+	ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	/*
+	 * No need to validate size -  gh_msgq_recv() ensures that sizeof(*resp) <
+	 * GH_MSGQ_MAX_MSG_SIZE_BYTES
+	 */
+	ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
+	if (ret < 0) {
+		pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	resp = buf;
+
+	if (resp->hdr.ret) {
+		ret = resp->hdr.ret;
+		pr_err_ratelimited("%s: Map call failed on remote VM, rc: %d\n", __func__,
+				   resp->hdr.ret);
+		goto free_buf;
+	}
+
+	ret = mem_buf_dma_buf_set_destructor(dmabuf, smmu_proxy_unmap, dmabuf);
+	if (ret) {
+		pr_err_ratelimited("%s: Failed to set vmperm destructor, rc: %d\n",
+				   __func__, ret);
+		goto free_buf;
+	}
+
+	sg_dma_address(proxy_iova->sgl) = resp->iova;
+	sg_dma_len(proxy_iova->sgl) = resp->mapping_len;
+	/*
+	 * We set the number of entries to one here, as we only allow the mapping to go
+	 * through on the TVM if the sg_table returned by dma_buf_map_attachment has one
+	 * entry.
+	 */
+	proxy_iova->nents = 1;
+
+free_buf:
+	kfree(buf);
+out:
+	mutex_unlock(&sender_mutex);
+
+	return ret;
+}
+
+void smmu_proxy_unmap_nop(struct device *client_dev, struct sg_table *table,
+			  struct dma_buf *dmabuf)
+{
+
+}
+
+
+static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg)
+{
+	unsigned int dir = _IOC_DIR(cmd);
+	union smmu_proxy_ioctl_arg ioctl_arg;
+	int ret;
+
+	if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
+		return -EINVAL;
+
+	if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	if (!(dir & _IOC_WRITE))
+		memset(&ioctl_arg, 0, sizeof(ioctl_arg));
+
+	switch (cmd) {
+	case QTI_SMMU_PROXY_GET_VERSION_IOCTL:
+	{
+		struct csf_version *csf_version =
+			&ioctl_arg.csf_version;
+
+		ret = smmu_proxy_get_csf_version(csf_version);
+		if(ret)
+			return ret;
+
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &ioctl_arg,
+				 _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static const struct file_operations smmu_proxy_dev_fops = {
+	.unlocked_ioctl = smmu_proxy_dev_ioctl,
+	.compat_ioctl = compat_ptr_ioctl,
+};
+
+static int sender_probe_handler(struct platform_device *pdev)
+{
+	int ret;
+	struct csf_version csf_version;
+
+	msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
+	if (IS_ERR(msgq_hdl)) {
+		ret = PTR_ERR(msgq_hdl);
+		pr_err("%s: Queue registration failed rc: %ld!\n", __func__, PTR_ERR(msgq_hdl));
+		return ret;
+	}
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		pr_err("%s: Failed to get CSF version rc: %d\n", __func__, ret);
+		goto free_msgq;
+	}
+
+	if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
+		ret = qti_smmu_proxy_register_callbacks(NULL, NULL);
+	} else if (csf_version.arch_ver == 2 && csf_version.max_ver == 5) {
+		ret = qti_smmu_proxy_register_callbacks(smmu_proxy_map, smmu_proxy_unmap_nop);
+	} else {
+		pr_err("%s: Invalid CSF version: %d.%d\n", __func__, csf_version.arch_ver,
+			csf_version.max_ver);
+		goto free_msgq;
+	}
+
+	if (ret) {
+		pr_err("%s: Failed to set SMMU proxy callbacks rc: %d\n", __func__, ret);
+		goto free_msgq;
+	}
+
+	ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
+	if (ret) {
+		pr_err("%s: Failed to create character device rc: %d\n", __func__,
+		       ret);
+		goto set_callbacks_null;
+	}
+
+	return 0;
+
+set_callbacks_null:
+	qti_smmu_proxy_register_callbacks(NULL, NULL);
+free_msgq:
+	gh_msgq_unregister(msgq_hdl);
+	return ret;
+}
+
+static const struct of_device_id smmu_proxy_match_table[] = {
+	{.compatible = "smmu-proxy-sender"},
+	{},
+};
+
+static struct platform_driver smmu_proxy_driver = {
+	.probe = sender_probe_handler,
+	.driver = {
+		.name = "qti-smmu-proxy",
+		.of_match_table = smmu_proxy_match_table,
+	},
+};
+
+int __init init_smmu_proxy_driver(void)
+{
+	return platform_driver_register(&smmu_proxy_driver);
+}
+module_init(init_smmu_proxy_driver);
+
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");

+ 775 - 0
qcom/opensource/securemsm-kernel/smmu-proxy/qti-smmu-proxy-tvm.c

@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/kthread.h>
+
+#include <linux/qcom-iommu-util.h>
+#include <dt-bindings/arm/msm/qti-smmu-proxy-dt-ids.h>
+#include "qti-smmu-proxy-common.h"
+
+#define RECEIVER_COMPAT_STR "smmu-proxy-receiver"
+#define CB_COMPAT_STR "smmu-proxy-cb"
+
+static void *msgq_hdl;
+
+struct smmu_proxy_buffer_cb_info {
+	bool mapped;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *sg_table;
+};
+
+struct smmu_proxy_buffer_state {
+	bool locked;
+	struct smmu_proxy_buffer_cb_info cb_info[QTI_SMMU_PROXY_CB_IDS_LEN];
+	struct dma_buf *dmabuf;
+};
+
+static DEFINE_MUTEX(buffer_state_lock);
+static DEFINE_XARRAY(buffer_state_arr);
+
+static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
+struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
+
+struct task_struct *receiver_msgq_handler_thread;
+
+static int zero_dma_buf(struct dma_buf *dmabuf)
+{
+	int ret;
+	struct iosys_map vmap_struct = {0};
+
+	ret = dma_buf_vmap(dmabuf, &vmap_struct);
+	if (ret) {
+		pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
+		return ret;
+	}
+
+	/* Use DMA_TO_DEVICE since we are not reading anything */
+	ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
+	if (ret) {
+		pr_err("%s: dma_buf_begin_cpu_access() failed with %d\n", __func__, ret);
+		goto unmap;
+	}
+
+	memset(vmap_struct.vaddr, 0, dmabuf->size);
+	ret = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
+	if (ret)
+		pr_err("%s: dma_buf_end_cpu_access() failed with %d\n", __func__, ret);
+unmap:
+	dma_buf_vunmap(dmabuf, &vmap_struct);
+
+	if (ret)
+		pr_err("%s: Failed to properly zero the DMA-BUF\n", __func__);
+
+	return ret;
+}
+
+static int iommu_unmap_and_relinquish(u32 hdl)
+{
+	int cb_id, ret = 0;
+	struct smmu_proxy_buffer_state *buf_state;
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, hdl);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buf_state->locked) {
+		pr_err("%s: handle 0x%x is locked!\n", __func__, hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (cb_id = 0; cb_id < QTI_SMMU_PROXY_CB_IDS_LEN; cb_id++) {
+		if (buf_state->cb_info[cb_id].mapped) {
+			dma_buf_unmap_attachment(buf_state->cb_info[cb_id].attachment,
+						 buf_state->cb_info[cb_id].sg_table,
+						 DMA_BIDIRECTIONAL);
+			dma_buf_detach(buf_state->dmabuf,
+				       buf_state->cb_info[cb_id].attachment);
+			buf_state->cb_info[cb_id].mapped = false;
+
+			/* If nothing left is mapped for this CB, unprogram its SMR */
+			cb_map_counts[cb_id]--;
+			if (!cb_map_counts[cb_id]) {
+				ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
+				if (ret) {
+					pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
+					       __func__, cb_id, ret);
+					break;
+				}
+			}
+		}
+	}
+
+	ret = zero_dma_buf(buf_state->dmabuf);
+	if (!ret) {
+		dma_buf_put(buf_state->dmabuf);
+		flush_delayed_fput();
+	}
+
+	xa_erase(&buffer_state_arr, hdl);
+	kfree(buf_state);
+out:
+	mutex_unlock(&buffer_state_lock);
+
+	return ret;
+}
+
+static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
+{
+	struct smmu_proxy_unmap_resp *resp;
+	int ret = 0;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = iommu_unmap_and_relinquish(req->hdl);
+
+	resp->hdr.msg_type = SMMU_PROXY_UNMAP_RESP;
+	resp->hdr.msg_size = sizeof(*resp);
+	resp->hdr.ret = ret;
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
+	if (ret < 0)
+		pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
+	else
+		pr_debug("%s: response to mapping request sent\n", __func__);
+
+	kfree(resp);
+
+	return ret;
+}
+
+static
+inline
+struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
+					u32 cb_id)
+{
+	int ret;
+	struct dma_buf *dmabuf;
+	bool new_buf = false;
+	struct smmu_proxy_buffer_state *buf_state;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *table;
+
+	if (cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+		pr_err("%s: CB ID %d too large\n", __func__, cb_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!cb_devices[cb_id]) {
+		pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, retrieve_arg->memparcel_hdl);
+	if (buf_state) {
+		if (buf_state->cb_info[cb_id].mapped) {
+			table = buf_state->cb_info[cb_id].sg_table;
+			goto unlock;
+		}
+		if (buf_state->locked) {
+			pr_err("%s: handle 0x%x is locked!\n", __func__,
+			       retrieve_arg->memparcel_hdl);
+			ret = -EINVAL;
+			goto unlock_err;
+		}
+
+		dmabuf = buf_state->dmabuf;
+	} else {
+		new_buf = true;
+		dmabuf = mem_buf_retrieve(retrieve_arg);
+		if (IS_ERR(dmabuf)) {
+			ret = PTR_ERR(dmabuf);
+			pr_err("%s: Failed to retrieve DMA-BUF rc: %d\n", __func__, ret);
+			goto unlock_err;
+		}
+
+		ret = zero_dma_buf(dmabuf);
+		if (ret) {
+			pr_err("%s: Failed to zero the DMA-BUF rc: %d\n", __func__, ret);
+			goto free_buf;
+		}
+
+		buf_state = kzalloc(sizeof(*buf_state), GFP_KERNEL);
+		if (!buf_state) {
+			pr_err("%s: Unable to allocate memory for buf_state\n",
+			       __func__);
+			ret = -ENOMEM;
+			goto free_buf;
+		}
+
+		buf_state->dmabuf = dmabuf;
+	}
+
+	attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
+	if (IS_ERR(attachment)) {
+		ret = PTR_ERR(attachment);
+		pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
+		goto free_buf_state;
+	}
+
+	table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+	if (IS_ERR(table)) {
+		ret = PTR_ERR(table);
+		pr_err("%s: Failed to map rc: %d\n", __func__, ret);
+		goto detach;
+	}
+
+	if (table->nents != 1) {
+		ret = -EINVAL;
+		pr_err("%s: Buffer not mapped as one segment!\n", __func__);
+		goto unmap;
+	}
+
+	buf_state->cb_info[cb_id].mapped = true;
+	buf_state->cb_info[cb_id].attachment = attachment;
+	buf_state->cb_info[cb_id].sg_table = table;
+
+	if (!cb_map_counts[cb_id]) {
+		ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
+		if (ret) {
+			pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
+			       cb_id, ret);
+			goto unmap;
+		}
+	}
+	cb_map_counts[cb_id]++;
+
+	ret = xa_err(xa_store(&buffer_state_arr, retrieve_arg->memparcel_hdl, buf_state,
+		     GFP_KERNEL));
+	if (ret < 0) {
+		pr_err("%s: Failed to store new buffer in xarray rc: %d\n", __func__,
+		       ret);
+		goto dec_cb_map_count;
+	}
+
+unlock:
+	mutex_unlock(&buffer_state_lock);
+
+	return table;
+
+dec_cb_map_count:
+	cb_map_counts[cb_id]--;
+	if (!cb_map_counts[cb_id]) {
+		ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
+		if (ret)
+			pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
+			       __func__, cb_id, ret);
+	}
+unmap:
+	dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
+detach:
+	dma_buf_detach(dmabuf, attachment);
+free_buf_state:
+	if (new_buf)
+		kfree(buf_state);
+free_buf:
+	if (new_buf)
+		dma_buf_put(dmabuf);
+unlock_err:
+	mutex_unlock(&buffer_state_lock);
+
+	return ERR_PTR(ret);
+}
+
+static int process_map_request(struct smmu_proxy_map_req *req, size_t size)
+{
+	struct smmu_proxy_map_resp *resp;
+	int ret = 0;
+	u32 n_acl_entries = req->acl_desc.n_acl_entries;
+	size_t map_req_len = offsetof(struct smmu_proxy_map_req,
+				      acl_desc.acl_entries[n_acl_entries]);
+	struct mem_buf_retrieve_kernel_arg retrieve_arg = {0};
+	int i;
+	struct sg_table *table;
+
+	/*
+	 * Last entry of smmu_proxy_map_req is an array of arbitrary length.
+	 * Validate that the number of entries fits within the buffer given
+	 * to us by the message queue.
+	 */
+	if (map_req_len > size) {
+		pr_err("%s: Reported size of smmu_proxy_map_request (%ld bytes) greater than message size given by message queue (%ld bytes)\n",
+		       __func__, map_req_len, size);
+		return -EINVAL;
+	}
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return -ENOMEM;
+	}
+
+	retrieve_arg.vmids = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.vmids), GFP_KERNEL);
+	if (!retrieve_arg.vmids) {
+		ret = -ENOMEM;
+		goto free_resp;
+	}
+
+	retrieve_arg.perms = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.perms), GFP_KERNEL);
+	if (!retrieve_arg.perms) {
+		ret = -ENOMEM;
+		goto free_vmids;
+	}
+
+	retrieve_arg.fd_flags = O_RDWR;
+	retrieve_arg.memparcel_hdl = req->hdl;
+	retrieve_arg.sender_vmid = VMID_HLOS;
+	retrieve_arg.nr_acl_entries = n_acl_entries;
+
+	for (i = 0; i < n_acl_entries; i++) {
+		retrieve_arg.vmids[i] = req->acl_desc.acl_entries[i].vmid;
+		retrieve_arg.perms[i] = req->acl_desc.acl_entries[i].perms;
+	}
+
+	table = retrieve_and_iommu_map(&retrieve_arg, req->cb_id);
+	if (IS_ERR(table)) {
+		ret = PTR_ERR(table);
+		goto free_perms;
+	}
+
+	resp->hdr.msg_type = SMMU_PROXY_MAP_RESP;
+	resp->hdr.msg_size = sizeof(*resp);
+	resp->hdr.ret = ret;
+	resp->iova = sg_dma_address(table->sgl);
+	resp->mapping_len = sg_dma_len(table->sgl);
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
+	if (ret < 0) {
+		pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
+		iommu_unmap_and_relinquish(req->hdl);
+	} else {
+		pr_debug("%s: response to mapping request sent\n", __func__);
+	}
+
+free_perms:
+	kfree(retrieve_arg.perms);
+free_vmids:
+	kfree(retrieve_arg.vmids);
+free_resp:
+	kfree(resp);
+
+	return ret;
+}
+
+static void smmu_proxy_process_msg(void *buf, size_t size)
+{
+	struct smmu_proxy_msg_hdr *msg_hdr = buf;
+	struct smmu_proxy_resp_hdr *resp;
+	int ret = -EINVAL;
+
+	if (size < sizeof(*msg_hdr) || msg_hdr->msg_size != size) {
+		pr_err("%s: message received is not of a proper size: 0x%lx, 0x:%x\n",
+		       __func__, size, msg_hdr->msg_size);
+		goto handle_err;
+	}
+
+	switch (msg_hdr->msg_type) {
+	case SMMU_PROXY_MAP:
+		ret = process_map_request(buf, size);
+		break;
+	case SMMU_PROXY_UNMAP:
+		ret = process_unmap_request(buf, size);
+		break;
+	default:
+		pr_err("%s: received message of unknown type: %d\n", __func__,
+		       msg_hdr->msg_type);
+	}
+
+	if (!ret)
+		return;
+
+handle_err:
+	resp = kzalloc(sizeof(resp), GFP_KERNEL);
+	if (!resp) {
+		pr_err("%s: Failed to allocate memory for response\n", __func__);
+		return;
+	}
+
+	resp->msg_type = SMMU_PROXY_ERR_RESP;
+	resp->msg_size = sizeof(resp);
+	resp->ret = ret;
+
+	ret = gh_msgq_send(msgq_hdl, resp, resp->msg_size, 0);
+	if (ret < 0)
+		pr_err("%s: failed to send error response rc: %d\n", __func__, ret);
+	else
+		pr_debug("%s: response to mapping request sent\n", __func__);
+
+	kfree(resp);
+
+}
+
+static int receiver_msgq_handler(void *msgq_hdl)
+{
+	void *buf;
+	size_t size;
+	int ret;
+
+	buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	while (!kthread_should_stop()) {
+		ret = gh_msgq_recv(msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
+		if (ret < 0) {
+			pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
+		} else {
+			smmu_proxy_process_msg(buf, size);
+		}
+	}
+
+	kfree(buf);
+
+	return 0;
+}
+
+static int smmu_proxy_ac_lock_toggle(int dma_buf_fd, bool lock)
+{
+	int ret = 0;
+	struct smmu_proxy_buffer_state *buf_state;
+	struct dma_buf *dmabuf;
+	u32 handle;
+
+	dmabuf = dma_buf_get(dma_buf_fd);
+	if (IS_ERR(dmabuf)) {
+		pr_err("%s: unable to get dma-buf from FD %d, rc: %ld\n", __func__,
+		       dma_buf_fd, PTR_ERR(dmabuf));
+		return PTR_ERR(dmabuf);
+	}
+
+	ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &handle);
+	if (ret) {
+		pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
+		goto free_buf;
+	}
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, handle);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, handle);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (buf_state->locked == lock) {
+		pr_err("%s: handle 0x%x already %s!\n", __func__, handle,
+		       lock ? "locked" : "unlocked");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	buf_state->locked = lock;
+out:
+	mutex_unlock(&buffer_state_lock);
+free_buf:
+	dma_buf_put(dmabuf);
+
+	return ret;
+}
+
+/*
+ * Iterate over all buffers mapped to context bank @context_bank_id, and zero
+ * out the buffers. If there is a single error for any buffer, we bail out with
+ * an error and disregard the rest of the buffers mapped to @context_bank_id.
+ */
+int smmu_proxy_clear_all_buffers(void __user *context_bank_id_array,
+				 __u32 num_cb_ids)
+{
+	unsigned long handle;
+	struct smmu_proxy_buffer_state *buf_state;
+	__u32 cb_ids[QTI_SMMU_PROXY_CB_IDS_LEN];
+	int i, ret = 0;
+	bool found_mapped_cb;
+
+	/* Checking this allows us to keep cb_id_arr fixed in length */
+	if (num_cb_ids > QTI_SMMU_PROXY_CB_IDS_LEN) {
+		pr_err("%s: Invalid number of CB IDs: %u\n", __func__, num_cb_ids);
+		return -EINVAL;
+	}
+
+	ret = copy_struct_from_user(&cb_ids, sizeof(cb_ids), context_bank_id_array,
+				    sizeof(cb_ids));
+	if (ret) {
+		pr_err("%s: Failed to get CB IDs from user space rc %d\n", __func__, ret);
+		return ret;
+	}
+
+	for (i = 0; i < num_cb_ids; i++) {
+		if (cb_ids[i] >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+			pr_err("%s: Invalid CB ID of %u at pos %d\n", __func__, cb_ids[i], i);
+			return -EINVAL;
+		}
+	}
+
+	mutex_lock(&buffer_state_lock);
+	xa_for_each(&buffer_state_arr, handle, buf_state) {
+		found_mapped_cb = false;
+		for (i = 0; i < num_cb_ids; i++) {
+			if (buf_state->cb_info[cb_ids[i]].mapped) {
+				found_mapped_cb = true;
+				break;
+			}
+		}
+		if (!found_mapped_cb)
+			continue;
+
+		ret = zero_dma_buf(buf_state->dmabuf);
+		if (ret) {
+			pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
+			break;
+		}
+	}
+
+	mutex_unlock(&buffer_state_lock);
+	return ret;
+}
+
+static int smmu_proxy_get_dma_buf(struct smmu_proxy_get_dma_buf_ctl *get_dma_buf_ctl)
+{
+	struct smmu_proxy_buffer_state *buf_state;
+	int fd, ret = 0;
+
+	mutex_lock(&buffer_state_lock);
+	buf_state = xa_load(&buffer_state_arr, get_dma_buf_ctl->memparcel_hdl);
+	if (!buf_state) {
+		pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__,
+		       get_dma_buf_ctl->memparcel_hdl);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	get_dma_buf(buf_state->dmabuf);
+	fd = dma_buf_fd(buf_state->dmabuf, O_CLOEXEC);
+	if (fd < 0) {
+		ret = fd;
+		pr_err("%s: Failed to install FD for dma-buf rc: %d\n", __func__,
+		       ret);
+		dma_buf_put(buf_state->dmabuf);
+	} else {
+		get_dma_buf_ctl->dma_buf_fd = fd;
+	}
+out:
+	mutex_unlock(&buffer_state_lock);
+
+	return ret;
+}
+
+static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg)
+{
+	unsigned int dir = _IOC_DIR(cmd);
+	union smmu_proxy_ioctl_arg ioctl_arg;
+	int ret;
+
+	if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
+		return -EINVAL;
+
+	if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	if (!(dir & _IOC_WRITE))
+		memset(&ioctl_arg, 0, sizeof(ioctl_arg));
+
+	switch (cmd) {
+	case QTI_SMMU_PROXY_AC_LOCK_BUFFER:
+	{
+		struct smmu_proxy_acl_ctl *acl_ctl =
+			&ioctl_arg.acl_ctl;
+
+		ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, true);
+		if (ret)
+			return ret;
+
+		break;
+	}
+	case QTI_SMMU_PROXY_AC_UNLOCK_BUFFER:
+	{
+		struct smmu_proxy_acl_ctl *acl_ctl =
+			&ioctl_arg.acl_ctl;
+
+		ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, false);
+		if (ret)
+			return ret;
+
+		break;
+	}
+	case QTI_SMMU_PROXY_WIPE_BUFFERS:
+	{
+		struct smmu_proxy_wipe_buf_ctl *wipe_buf_ctl =
+			&ioctl_arg.wipe_buf_ctl;
+
+		ret = smmu_proxy_clear_all_buffers((void *) wipe_buf_ctl->context_bank_id_array,
+						   wipe_buf_ctl->num_cb_ids);
+		break;
+	}
+	case QTI_SMMU_PROXY_GET_DMA_BUF:
+	{
+		ret = smmu_proxy_get_dma_buf(&ioctl_arg.get_dma_buf_ctl);
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &ioctl_arg,
+				 _IOC_SIZE(cmd)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static const struct file_operations smmu_proxy_dev_fops = {
+	.unlocked_ioctl = smmu_proxy_dev_ioctl,
+	.compat_ioctl = compat_ptr_ioctl,
+};
+
+static int receiver_probe_handler(struct device *dev)
+{
+	int ret = 0;
+
+	msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
+	if (IS_ERR(msgq_hdl)) {
+		ret = PTR_ERR(msgq_hdl);
+		dev_err(dev, "Queue registration failed: %ld!\n", PTR_ERR(msgq_hdl));
+		return ret;
+	}
+
+	receiver_msgq_handler_thread = kthread_run(receiver_msgq_handler, msgq_hdl,
+						   "smmu_proxy_msgq_handler");
+	if (IS_ERR(receiver_msgq_handler_thread)) {
+		ret = PTR_ERR(receiver_msgq_handler_thread);
+		dev_err(dev, "Failed to launch receiver_msgq_handler thread: %ld\n",
+			PTR_ERR(receiver_msgq_handler_thread));
+		goto free_msgq;
+	}
+
+	ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
+	if (ret) {
+		pr_err("Failed to create character device with error %d\n", ret);
+		goto free_kthread;
+	}
+
+	return 0;
+free_kthread:
+	kthread_stop(receiver_msgq_handler_thread);
+free_msgq:
+	gh_msgq_unregister(msgq_hdl);
+	return ret;
+}
+
+static int proxy_fault_handler(struct iommu_domain *domain, struct device *dev,
+			       unsigned long iova, int flags, void *token)
+{
+	dev_err(dev, "Context fault with IOVA %lx and fault flags %d\n", iova, flags);
+	return -EINVAL;
+}
+
+static int cb_probe_handler(struct device *dev)
+{
+	int ret;
+	unsigned int context_bank_id;
+	struct iommu_domain *domain;
+
+	ret = of_property_read_u32(dev->of_node, "qti,cb-id", &context_bank_id);
+	if (ret) {
+		dev_err(dev, "Failed to read qti,cb-id property for device\n");
+		return -EINVAL;
+	}
+
+	if (context_bank_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
+		dev_err(dev, "Invalid CB ID: %u\n", context_bank_id);
+		return -EINVAL;
+	}
+
+	if (cb_devices[context_bank_id]) {
+		dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
+		return -EINVAL;
+	}
+
+	ret = dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dev, "Failed to set segment size\n");
+		return ret;
+	}
+
+	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+	if (ret) {
+		dev_err(dev, "Failed to set DMA-MASK\n");
+		return ret;
+	}
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (IS_ERR_OR_NULL(domain)) {
+		dev_err(dev, "%s: Failed to get iommu domain\n", __func__);
+		return -EINVAL;
+	}
+
+	iommu_set_fault_handler(domain, proxy_fault_handler, NULL);
+	cb_devices[context_bank_id] = dev;
+
+	return 0;
+}
+
+static int smmu_proxy_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node, CB_COMPAT_STR)) {
+		return cb_probe_handler(dev);
+	} else if (of_device_is_compatible(dev->of_node, RECEIVER_COMPAT_STR)) {
+		return  receiver_probe_handler(dev);
+	} else {
+		return -EINVAL;
+	}
+}
+
+static const struct of_device_id smmu_proxy_match_table[] = {
+	{.compatible = RECEIVER_COMPAT_STR},
+	{.compatible = CB_COMPAT_STR},
+	{},
+};
+
+static struct platform_driver smmu_proxy_driver = {
+	.probe = smmu_proxy_probe,
+	.driver = {
+		.name = "qti-smmu-proxy",
+		.of_match_table = smmu_proxy_match_table,
+	},
+};
+
+int __init init_smmu_proxy_driver(void)
+{
+	int ret;
+	struct csf_version csf_version;
+
+	ret = smmu_proxy_get_csf_version(&csf_version);
+	if (ret) {
+		pr_err("%s: Unable to get CSF version\n", __func__);
+		return ret;
+	}
+
+	if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
+		pr_err("%s: CSF 2.5 not in use, not loading module\n", __func__);
+		return -EINVAL;
+	}
+
+	return platform_driver_register(&smmu_proxy_driver);
+}
+module_init(init_smmu_proxy_driver);
+
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_LICENSE("GPL v2");

+ 113 - 0
qcom/opensource/securemsm-kernel/ssg_kernel_headers.py

@@ -0,0 +1,113 @@
+# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 as published by
+# the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import filecmp
+import os
+import re
+import subprocess
+import sys
+
+def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
+    if not h.startswith(prefix):
+        print('error: expected prefix [%s] on header [%s]' % (prefix, h))
+        return False
+
+    # out_h is combining the relative path to the header file (made in gen_smcinvoke_headers()) to the gen_dir out/soong/.temp/sbox/<temp hash value>/out/
+    # ex. out/soong/.temp/sbox/<temp hash value>/out/linux/smcinvoke.h
+    # After the build is complete, you can find the headers that you exposed located in the following gen path:
+    # out/soong/.intermediates/.../qti_generate_smcinvoke_kernel_headers/gen/
+    if 'include/uapi' in h:
+        out_h = os.path.join(gen_dir,'include', h[len(prefix):])
+    else:
+        out_h = os.path.join(gen_dir, h[len(prefix):])
+    (out_h_dirname, out_h_basename) = os.path.split(out_h)
+    env = os.environ.copy()
+    env["LOC_UNIFDEF"] = unifdef
+    cmd = ["sh", headers_install, h, out_h]
+
+    if verbose:
+        print('run_headers_install: cmd is %s' % cmd)
+
+    result = subprocess.call(cmd, env=env)
+
+    if result != 0:
+        print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
+        return False
+    return True
+
+def gen_smcinvoke_headers(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose):
+    error_count = 0
+    # smcinvoke_headers_to_expose is a string list of individual paths to headers to expose
+    # They are passed using Android.bp variable substition: $(locations <label>) ex. $(locations linux/*.h)
+    # Note <label> has to be a rule to find the file, it cannot be the file itself.
+    for h in smcinvoke_headers_to_expose:
+        # h will be the relative path from the repo root directory securemsm-kernel ex. <parent directory structure>/securemsm-kernel/linux/smcinvoke.h
+        # So we need to split the string and keep the directory structure we want to expose i.e. just linux/smcinvoke.h
+        topDirectory = 'securemsm-kernel'
+        if 'include/uapi' in h:
+            directorySplitLocation = '/'+ topDirectory +'/'
+            smcinvoke_headers_to_expose_prefix = os.path.join(h.split(directorySplitLocation)[0], topDirectory, 'include', 'uapi') + os.sep
+            if not run_headers_install(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose_prefix, h):
+                    error_count += 1
+        else:
+            directorySplitLocation = '/'+ topDirectory +'/'
+            smcinvoke_headers_to_expose_prefix = os.path.join(h.split(directorySplitLocation)[0], topDirectory) + os.sep
+            if not run_headers_install(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose_prefix, h):
+                    error_count += 1
+    return error_count
+
+def main():
+    """Parse command line arguments and perform top level control."""
+    parser = argparse.ArgumentParser(
+            description=__doc__,
+            formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    # Arguments that apply to every invocation of this script.
+    parser.add_argument(
+            '--verbose', action='store_true',
+            help='Print output that describes the workings of this script.')
+    parser.add_argument(
+            '--header_arch', required=True,
+            help='The arch for which to generate headers.')
+    parser.add_argument(
+            '--gen_dir', required=True,
+            help='Where to place the generated files.')
+    parser.add_argument(
+            '--smcinvoke_headers_to_expose', required=True, nargs='*',
+            help='The list of smcinvoke header files.')
+    parser.add_argument(
+            '--headers_install', required=True,
+            help='The headers_install tool to process input headers.')
+    parser.add_argument(
+            '--unifdef',
+            required=True,
+            help='The unifdef tool used by headers_install.')
+
+    args = parser.parse_args()
+
+    if args.verbose:
+        print('header_arch [%s]' % args.header_arch)
+        print('gen_dir [%s]' % args.gen_dir)
+        print('smcinvoke_headers_to_expose [%s]' % args.smcinvoke_headers_to_expose)
+        print('headers_install [%s]' % args.headers_install)
+        print('unifdef [%s]' % args.unifdef)
+
+
+    return gen_smcinvoke_headers(args.verbose, args.gen_dir,
+            args.headers_install, args.unifdef, args.smcinvoke_headers_to_expose)
+
+if __name__ == '__main__':
+    sys.exit(main())

+ 1969 - 0
qcom/opensource/securemsm-kernel/tz_log/tz_log.c

@@ -0,0 +1,1969 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s:[%s][%d]: " fmt, KBUILD_MODNAME, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/dma-buf.h>
+#include <linux/version.h>
+#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
+#include <linux/firmware/qcom/qcom_scm.h>
+#else
+#include <linux/qcom_scm.h>
+#endif
+#include <linux/qtee_shmbridge.h>
+#include <linux/proc_fs.h>
+#if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
+#include <linux/tmelog.h>
+#endif
+
+#include "misc/qseecomi.h"
+
+/* QSEE_LOG_BUF_SIZE = 32K */
+#define QSEE_LOG_BUF_SIZE 0x8000
+
+/* enlarged qsee log buf size is 128K by default */
+#define QSEE_LOG_BUF_SIZE_V2 0x20000
+
+/* Tme log buffer size 20K */
+#define TME_LOG_BUF_SIZE 0x5000
+
+/* TZ Diagnostic Area legacy version number */
+#define TZBSP_DIAG_MAJOR_VERSION_LEGACY	2
+
+/* TZ Diagnostic Area version number */
+#define TZBSP_FVER_MAJOR_MINOR_MASK     0x3FF  /* 10 bits */
+#define TZBSP_FVER_MAJOR_SHIFT          22
+#define TZBSP_FVER_MINOR_SHIFT          12
+#define TZBSP_DIAG_MAJOR_VERSION_V9     9
+#define TZBSP_DIAG_MINOR_VERSION_V2     2
+#define TZBSP_DIAG_MINOR_VERSION_V21    3
+#define TZBSP_DIAG_MINOR_VERSION_V22    4
+
+/* TZ Diag Feature Version Id */
+#define QCOM_SCM_FEAT_DIAG_ID           0x06
+
+/*
+ * Preprocessor Definitions and Constants
+ */
+#define TZBSP_MAX_CPU_COUNT 0x08
+/*
+ * Number of VMID Tables
+ */
+#define TZBSP_DIAG_NUM_OF_VMID 16
+/*
+ * VMID Description length
+ */
+#define TZBSP_DIAG_VMID_DESC_LEN 7
+/*
+ * Number of Interrupts
+ */
+#define TZBSP_DIAG_INT_NUM  32
+/*
+ * Length of descriptive name associated with Interrupt
+ */
+#define TZBSP_MAX_INT_DESC 16
+/*
+ * TZ 3.X version info
+ */
+#define QSEE_VERSION_TZ_3_X 0x800000
+/*
+ * TZ 4.X version info
+ */
+#define QSEE_VERSION_TZ_4_X 0x1000000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
+#define ENCRYPTED_TZ_LOG_ID 0
+#define ENCRYPTED_QSEE_LOG_ID 1
+
+/*
+ * Directory for TZ DBG logs
+ */
+#define TZDBG_DIR_NAME "tzdbg"
+
+/*
+ * VMID Table
+ */
+struct tzdbg_vmid_t {
+	uint8_t vmid; /* Virtual Machine Identifier */
+	uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN];	/* ASCII Text */
+};
+/*
+ * Boot Info Table
+ */
+struct tzdbg_boot_info_t {
+	uint32_t wb_entry_cnt;	/* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;	/* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;	/* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;	/* Power Collapse exit CPU counter */
+	uint32_t warm_jmp_addr;	/* Last Warmboot Jump Address */
+	uint32_t spare;	/* Reserved for future use. */
+};
+/*
+ * Boot Info Table for 64-bit
+ */
+struct tzdbg_boot_info64_t {
+	uint32_t wb_entry_cnt;  /* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;   /* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;  /* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;   /* Power Collapse exit CPU counter */
+	uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
+	uint32_t psci_exit_cnt;   /* PSCI syscall exit CPU Counter */
+	uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
+	uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
+};
+/*
+ * Reset Info Table
+ */
+struct tzdbg_reset_info_t {
+	uint32_t reset_type;	/* Reset Reason */
+	uint32_t reset_cnt;	/* Number of resets occurred/CPU */
+};
+/*
+ * Interrupt Info Table
+ */
+struct tzdbg_int_t {
+	/*
+	 * Type of Interrupt/exception
+	 */
+	uint16_t int_info;
+	/*
+	 * Availability of the slot
+	 */
+	uint8_t avail;
+	/*
+	 * Reserved for future use
+	 */
+	uint8_t spare;
+	/*
+	 * Interrupt # for IRQ and FIQ
+	 */
+	uint32_t int_num;
+	/*
+	 * ASCII text describing type of interrupt e.g:
+	 * Secure Timer, EBI XPU. This string is always null terminated,
+	 * supporting at most TZBSP_MAX_INT_DESC characters.
+	 * Any additional characters are truncated.
+	 */
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
+};
+
+/*
+ * Interrupt Info Table used in tz version >=4.X
+ */
+struct tzdbg_int_t_tz40 {
+	uint16_t int_info;
+	uint8_t avail;
+	uint8_t spare;
+	uint32_t int_num;
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
+};
+
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+	/* Wake source info : APCS_GICC_HPPIR */
+	uint32_t HPPIR;
+	/* Wake source info : APCS_GICC_AHPPIR */
+	uint32_t AHPPIR;
+};
+
+/*
+ * Log ring buffer position
+ */
+struct tzdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+struct tzdbg_log_pos_v2_t {
+	uint32_t wrap;
+	uint32_t offset;
+};
+
+ /*
+  * Log ring buffer
+  */
+struct tzdbg_log_t {
+	struct tzdbg_log_pos_t	log_pos;
+	/* open ended array to the end of the 4K IMEM buffer */
+	uint8_t					log_buf[];
+};
+
+struct tzdbg_log_v2_t {
+	struct tzdbg_log_pos_v2_t	log_pos;
+	/* open ended array to the end of the 4K IMEM buffer */
+	uint8_t					log_buf[];
+};
+
+struct tzbsp_encr_info_for_log_chunk_t {
+	uint32_t size_to_encr;
+	uint8_t nonce[TZBSP_NONCE_LEN];
+	uint8_t tag[TZBSP_TAG_LEN];
+};
+
+/*
+ * Only `ENTIRE_LOG` will be used unless the
+ * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2.
+ * If this is true, the diag log will be encrypted in two
+ * separate chunks: a smaller chunk containing only error
+ * fatal logs and a bigger "rest of the log" chunk. In this
+ * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be
+ * used instead of `ENTIRE_LOG`.
+ */
+enum tzbsp_encr_info_for_log_chunks_idx_t {
+	BIG_LOG_CHUNK = 0,
+	ENTIRE_LOG = 1,
+	ERR_FATAL_LOG_CHUNK = 1,
+	MAX_NUM_OF_CHUNKS,
+};
+
+struct tzbsp_encr_info_t {
+	uint32_t num_of_chunks;
+	struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS];
+	uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+};
+
+/*
+ * Diagnostic Table
+ * Note: This is the reference data structure for tz diagnostic table
+ * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
+ * copied into buffer from i/o memory.
+ */
+struct tzdbg_t {
+	uint32_t magic_num;
+	uint32_t version;
+	/*
+	 * Number of CPU's
+	 */
+	uint32_t cpu_count;
+	/*
+	 * Offset of VMID Table
+	 */
+	uint32_t vmid_info_off;
+	/*
+	 * Offset of Boot Table
+	 */
+	uint32_t boot_info_off;
+	/*
+	 * Offset of Reset info Table
+	 */
+	uint32_t reset_info_off;
+	/*
+	 * Offset of Interrupt info Table
+	 */
+	uint32_t int_info_off;
+	/*
+	 * Ring Buffer Offset
+	 */
+	uint32_t ring_off;
+	/*
+	 * Ring Buffer Length
+	 */
+	uint32_t ring_len;
+
+	/* Offset for Wakeup info */
+	uint32_t wakeup_info_off;
+
+	union {
+		/* The elements in below structure have to be used for TZ where
+		 * diag version = TZBSP_DIAG_MINOR_VERSION_V2
+		 */
+		struct {
+
+			/*
+			 * VMID to EE Mapping
+			 */
+			struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
+			/*
+			 * Boot Info
+			 */
+			struct tzdbg_boot_info_t  boot_info[TZBSP_MAX_CPU_COUNT];
+			/*
+			 * Reset Info
+			 */
+			struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
+			uint32_t num_interrupts;
+			struct tzdbg_int_t  int_info[TZBSP_DIAG_INT_NUM];
+			/* Wake up info */
+			struct tzbsp_diag_wakeup_info_t  wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+			uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+			uint8_t nonce[TZBSP_NONCE_LEN];
+
+			uint8_t tag[TZBSP_TAG_LEN];
+		};
+		/* The elements in below structure have to be used for TZ where
+		 * diag version = TZBSP_DIAG_MINOR_VERSION_V21
+		 */
+		struct {
+
+			uint32_t encr_info_for_log_off;
+
+			/*
+			 * VMID to EE Mapping
+			 */
+			struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID];
+			/*
+			 * Boot Info
+			 */
+			struct tzdbg_boot_info_t  boot_info_v2[TZBSP_MAX_CPU_COUNT];
+			/*
+			 * Reset Info
+			 */
+			struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT];
+			uint32_t num_interrupts_v2;
+			struct tzdbg_int_t  int_info_v2[TZBSP_DIAG_INT_NUM];
+
+			/* Wake up info */
+			struct tzbsp_diag_wakeup_info_t  wakeup_info_v2[TZBSP_MAX_CPU_COUNT];
+
+			struct tzbsp_encr_info_t encr_info_for_log;
+		};
+	};
+
+	/*
+	 * We need at least 2K for the ring buffer
+	 */
+	struct tzdbg_log_t ring_buffer;	/* TZ Ring Buffer */
+};
+
+struct hypdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+struct rmdbg_log_hdr_t {
+	uint32_t write_idx;
+	uint32_t size;
+};
+struct rmdbg_log_pos_t {
+	uint32_t read_idx;
+	uint32_t size;
+};
+struct hypdbg_boot_info_t {
+	uint32_t warm_entry_cnt;
+	uint32_t warm_exit_cnt;
+};
+
+struct hypdbg_t {
+	/* Magic Number */
+	uint32_t magic_num;
+
+	/* Number of CPU's */
+	uint32_t cpu_count;
+
+	/* Ring Buffer Offset */
+	uint32_t ring_off;
+
+	/* Ring buffer position mgmt */
+	struct hypdbg_log_pos_t log_pos;
+	uint32_t log_len;
+
+	/* S2 fault numbers */
+	uint32_t s2_fault_counter;
+
+	/* Boot Info */
+	struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+
+	/* Ring buffer pointer */
+	uint8_t log_buf_p[];
+};
+
+struct tme_log_pos {
+	uint32_t offset;
+	size_t size;
+};
+
+/*
+ * Enumeration order for VMID's
+ */
+enum tzdbg_stats_type {
+	TZDBG_BOOT = 0,
+	TZDBG_RESET,
+	TZDBG_INTERRUPT,
+	TZDBG_VMID,
+	TZDBG_GENERAL,
+	TZDBG_LOG,
+	TZDBG_QSEE_LOG,
+	TZDBG_HYP_GENERAL,
+	TZDBG_HYP_LOG,
+	TZDBG_RM_LOG,
+	TZDBG_TME_LOG,
+	TZDBG_STATS_MAX
+};
+
+struct tzdbg_stat {
+	size_t display_len;
+	size_t display_offset;
+	char *name;
+	char *data;
+	bool avail;
+};
+
+struct tzdbg {
+	void __iomem *virt_iobase;
+	void __iomem *hyp_virt_iobase;
+	void __iomem *rmlog_virt_iobase;
+	void __iomem *tmelog_virt_iobase;
+	struct tzdbg_t *diag_buf;
+	struct hypdbg_t *hyp_diag_buf;
+	uint8_t *rm_diag_buf;
+	uint8_t *tme_buf;
+	char *disp_buf;
+	int debug_tz[TZDBG_STATS_MAX];
+	struct tzdbg_stat stat[TZDBG_STATS_MAX];
+	uint32_t hyp_debug_rw_buf_size;
+	uint32_t rmlog_rw_buf_size;
+	bool is_hyplog_enabled;
+	uint32_t tz_version;
+	bool is_encrypted_log_enabled;
+	bool is_enlarged_buf;
+	bool is_full_encrypted_tz_logs_supported;
+	bool is_full_encrypted_tz_logs_enabled;
+	int tz_diag_minor_version;
+	int tz_diag_major_version;
+};
+
+struct tzbsp_encr_log_t {
+	/* Magic Number */
+	uint32_t magic_num;
+	/* version NUMBER */
+	uint32_t version;
+	/* encrypted log size */
+	uint32_t encr_log_buff_size;
+	/* Wrap value*/
+	uint16_t wrap_count;
+	/* AES encryption key wrapped up with oem public key*/
+	uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+	/* Nonce used for encryption*/
+	uint8_t nonce[TZBSP_NONCE_LEN];
+	/* Tag to be used for Validation */
+	uint8_t tag[TZBSP_TAG_LEN];
+	/* Encrypted log buffer */
+	uint8_t log_buf[1];
+};
+
+struct encrypted_log_info {
+	phys_addr_t paddr;
+	void *vaddr;
+	size_t size;
+	uint64_t shmb_handle;
+};
+
+static struct tzdbg tzdbg = {
+	.stat[TZDBG_BOOT].name = "boot",
+	.stat[TZDBG_RESET].name = "reset",
+	.stat[TZDBG_INTERRUPT].name = "interrupt",
+	.stat[TZDBG_VMID].name = "vmid",
+	.stat[TZDBG_GENERAL].name = "general",
+	.stat[TZDBG_LOG].name = "log",
+	.stat[TZDBG_QSEE_LOG].name = "qsee_log",
+	.stat[TZDBG_HYP_GENERAL].name = "hyp_general",
+	.stat[TZDBG_HYP_LOG].name = "hyp_log",
+	.stat[TZDBG_RM_LOG].name = "rm_log",
+	.stat[TZDBG_TME_LOG].name = "tme_log",
+};
+
+static struct tzdbg_log_t *g_qsee_log;
+static struct tzdbg_log_v2_t *g_qsee_log_v2;
+static dma_addr_t coh_pmem;
+static uint32_t debug_rw_buf_size;
+static uint32_t display_buf_size;
+static uint32_t qseelog_buf_size;
+static phys_addr_t disp_buf_paddr;
+static uint32_t tmecrashdump_address_offset;
+
+static uint64_t qseelog_shmbridge_handle;
+static struct encrypted_log_info enc_qseelog_info;
+static struct encrypted_log_info enc_tzlog_info;
+
+/*
+ * Debugfs data structure and functions
+ */
+
+static int _disp_tz_general_stats(void)
+{
+	int len = 0;
+
+	len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
+			"   Version        : 0x%x\n"
+			"   Magic Number   : 0x%x\n"
+			"   Number of CPU  : %d\n",
+			tzdbg.diag_buf->version,
+			tzdbg.diag_buf->magic_num,
+			tzdbg.diag_buf->cpu_count);
+	tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_vmid_stats(void)
+{
+	int i, num_vmid;
+	int len = 0;
+	struct tzdbg_vmid_t *ptr;
+
+	ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->vmid_info_off);
+	num_vmid = ((tzdbg.diag_buf->boot_info_off -
+				tzdbg.diag_buf->vmid_info_off)/
+					(sizeof(struct tzdbg_vmid_t)));
+
+	for (i = 0; i < num_vmid; i++) {
+		if (ptr->vmid < 0xFF) {
+			len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"   0x%x        %s\n",
+				(uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
+		}
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_boot_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_boot_info_t *ptr = NULL;
+	struct tzdbg_boot_info64_t *ptr_64 = NULL;
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+	if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+		ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	} else {
+		ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	}
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+			len += scnprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address : 0x%llx\n"
+					"     Warmboot entry CPU counter : 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter : 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n"
+					"     Psci entry CPU counter : 0x%x\n"
+					"     Psci exit CPU counter : 0x%x\n"
+					"     Warmboot Jump Address Instruction : 0x%x\n",
+					i, (uint64_t)ptr_64->warm_jmp_addr,
+					ptr_64->wb_entry_cnt,
+					ptr_64->wb_exit_cnt,
+					ptr_64->pc_entry_cnt,
+					ptr_64->pc_exit_cnt,
+					ptr_64->psci_entry_cnt,
+					ptr_64->psci_exit_cnt,
+					ptr_64->warm_jmp_instr);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr_64++;
+		} else {
+			len += scnprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address     : 0x%x\n"
+					"     Warmboot entry CPU counter: 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter: 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n",
+					i, ptr->warm_jmp_addr,
+					ptr->wb_entry_cnt,
+					ptr->wb_exit_cnt,
+					ptr->pc_entry_cnt,
+					ptr->pc_exit_cnt);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr++;
+		}
+	}
+	tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_reset_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_reset_info_t *ptr;
+
+	ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->reset_info_off);
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Reset Type (reason)       : 0x%x\n"
+				"     Reset counter             : 0x%x\n",
+				i, ptr->reset_type, ptr->reset_cnt);
+
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+
+		ptr++;
+	}
+	tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_interrupt_stats(void)
+{
+	int i, j;
+	int len = 0;
+	int *num_int;
+	void *ptr;
+	struct tzdbg_int_t *tzdbg_ptr;
+	struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
+
+	num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
+			(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
+	ptr = ((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->int_info_off);
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+
+	if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+		tzdbg_ptr = ptr;
+		for (i = 0; i < (*num_int); i++) {
+			len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr->int_num,
+				(uint32_t)tzdbg_ptr->int_info,
+				(uint8_t *)tzdbg_ptr->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr->int_count[j]);
+			}
+			len += scnprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			tzdbg_ptr++;
+		}
+	} else {
+		tzdbg_ptr_tz40 = ptr;
+		for (i = 0; i < (*num_int); i++) {
+			len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr_tz40->int_num,
+				(uint32_t)tzdbg_ptr_tz40->int_info,
+				(uint8_t *)tzdbg_ptr_tz40->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += scnprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr_tz40->int_count[j]);
+			}
+			len += scnprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			tzdbg_ptr_tz40++;
+		}
+	}
+
+	tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats_legacy(void)
+{
+	int len = 0;
+	unsigned char *ptr;
+
+	ptr = (unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->ring_off;
+	len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
+							"%s\n", ptr);
+
+	tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_log_stats(struct tzdbg_log_t *log,
+			struct tzdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = log->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start)
+		wrap_cnt = wrap_end - wrap_start;
+	else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = log->log_pos.wrap - 1;
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(log->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	}
+
+	pr_debug("diag_buf wrap = %u, offset = %u\n",
+		log->log_pos.wrap, log->log_pos.offset);
+	while (log_start->offset == log->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		unsigned long t = msleep_interruptible(50);
+
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+}
+
+		if (buf_idx == TZDBG_LOG)
+			memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	}
+
+	max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+	pr_debug("diag_buf wrap = %u, offset = %u\n",
+		log->log_pos.wrap, log->log_pos.offset);
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log,
+			struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = log->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start)
+		wrap_cnt = wrap_end - wrap_start;
+	else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = log->log_pos.wrap - 1;
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(log->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	}
+	pr_debug("diag_buf wrap = %u, offset = %u\n",
+		log->log_pos.wrap, log->log_pos.offset);
+
+	while (log_start->offset == log->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		unsigned long t = msleep_interruptible(50);
+
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		if (buf_idx == TZDBG_LOG)
+			memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	}
+
+	max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+	pr_debug("diag_buf wrap = %u, offset = %u\n",
+		log->log_pos.wrap, log->log_pos.offset);
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int __disp_hyp_log_stats(uint8_t *log,
+			struct hypdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
+	unsigned long t = 0;
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = hyp->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start)
+		wrap_cnt = wrap_end - wrap_start;
+	else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = hyp->log_pos.wrap - 1;
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(hyp->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == hyp->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		t = msleep_interruptible(50);
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		/* TZDBG_HYP_LOG */
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+						tzdbg.hyp_debug_rw_buf_size);
+	}
+
+	max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
+				tzdbg.hyp_debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len)
+{
+	uint32_t i = 0;
+	/*
+	 *  Transfer data from rm dialog buff to display buffer in user space
+	 */
+	while ((i < max_len) && (i < display_buf_size)) {
+		tzdbg.disp_buf[i] = log_ptr[i];
+		i++;
+	}
+	if (i != max_len)
+		pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n",
+			i, display_buf_size);
+	tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf;
+	return i;
+}
+
+static int print_text(char *intro_message,
+			unsigned char *text_addr,
+			unsigned int size,
+			char *buf, uint32_t buf_len)
+{
+	unsigned int   i;
+	int len = 0;
+
+	pr_debug("begin address %p, size %d\n", text_addr, size);
+	len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message);
+	for (i = 0;  i < size;  i++) {
+		if (buf_len <= len + 6) {
+			pr_err("buffer not enough, buf_len %d, len %d\n",
+				buf_len, len);
+			return buf_len;
+		}
+		len += scnprintf(buf + len, buf_len - len, "%02hhx ",
+					text_addr[i]);
+		if ((i & 0x1f) == 0x1f)
+			len += scnprintf(buf + len, buf_len - len, "%c", '\n');
+	}
+	len += scnprintf(buf + len, buf_len - len, "%c", '\n');
+	return len;
+}
+
+static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info,
+				enum tzdbg_stats_type type, uint32_t log_id)
+{
+	int ret = 0, len = 0;
+	struct tzbsp_encr_log_t *encr_log_head;
+	uint32_t size = 0;
+
+	if ((!tzdbg.is_full_encrypted_tz_logs_supported) &&
+		(tzdbg.is_full_encrypted_tz_logs_enabled))
+		pr_info("TZ not supporting full encrypted log functionality\n");
+	ret = qcom_scm_request_encrypted_log(enc_log_info->paddr,
+		enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported,
+		tzdbg.is_full_encrypted_tz_logs_enabled);
+	if (ret)
+		return 0;
+	encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr);
+	pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n",
+		display_buf_size, encr_log_head->encr_log_buff_size);
+	size = encr_log_head->encr_log_buff_size;
+
+	len += scnprintf(tzdbg.disp_buf + len,
+			(display_buf_size - 1) - len,
+			"\n-------- New Encrypted %s --------\n",
+			((log_id == ENCRYPTED_QSEE_LOG_ID) ?
+				"QSEE Log" : "TZ Dialog"));
+
+	len += scnprintf(tzdbg.disp_buf + len,
+			(display_buf_size - 1) - len,
+			"\nMagic_Num :\n0x%x\n"
+			"\nVerion :\n%d\n"
+			"\nEncr_Log_Buff_Size :\n%d\n"
+			"\nWrap_Count :\n%d\n",
+			encr_log_head->magic_num,
+			encr_log_head->version,
+			encr_log_head->encr_log_buff_size,
+			encr_log_head->wrap_count);
+
+	len += print_text("\nKey : ", encr_log_head->key,
+			TZBSP_AES_256_ENCRYPTED_KEY_SIZE,
+			tzdbg.disp_buf + len, display_buf_size);
+	len += print_text("\nNonce : ", encr_log_head->nonce,
+			TZBSP_NONCE_LEN,
+			tzdbg.disp_buf + len, display_buf_size - len);
+	len += print_text("\nTag : ", encr_log_head->tag,
+			TZBSP_TAG_LEN,
+			tzdbg.disp_buf + len, display_buf_size - len);
+
+	if (len > display_buf_size - size)
+		pr_warn("Cannot fit all info into the buffer\n");
+
+	pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n",
+			size, display_buf_size, len);
+
+	len += print_text("\nLog : ", encr_log_head->log_buf, size,
+				tzdbg.disp_buf + len, display_buf_size - len);
+	memset(enc_log_info->vaddr, 0, enc_log_info->size);
+	tzdbg.stat[type].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
+	static struct tzdbg_log_pos_t log_start = {0};
+	struct tzdbg_log_v2_t *log_v2_ptr;
+	struct tzdbg_log_t *log_ptr;
+
+	log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
+			tzdbg.diag_buf->ring_off -
+			offsetof(struct tzdbg_log_t, log_buf));
+
+	log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf +
+			tzdbg.diag_buf->ring_off -
+			offsetof(struct tzdbg_log_v2_t, log_buf));
+
+	if (!tzdbg.is_enlarged_buf)
+		return _disp_log_stats(log_ptr, &log_start,
+				tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+
+	return _disp_log_stats_v2(log_v2_ptr, &log_start_v2,
+			tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+}
+
+static int _disp_hyp_log_stats(size_t count)
+{
+	static struct hypdbg_log_pos_t log_start = {0};
+	uint8_t *log_ptr;
+	uint32_t log_len;
+
+	log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
+				tzdbg.hyp_diag_buf->ring_off);
+	log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off;
+
+	return __disp_hyp_log_stats(log_ptr, &log_start,
+			log_len, count, TZDBG_HYP_LOG);
+}
+
+static int _disp_rm_log_stats(size_t count)
+{
+	static struct rmdbg_log_pos_t log_start = { 0 };
+	struct rmdbg_log_hdr_t *p_log_hdr = NULL;
+	uint8_t *log_ptr = NULL;
+	uint32_t log_len = 0;
+	static bool wrap_around = { false };
+
+	/* Return 0 to close the display file,if there is nothing else to do */
+	if ((log_start.size == 0x0) && wrap_around) {
+		wrap_around = false;
+		return 0;
+	}
+	/* Copy RM log data to tzdbg diag buffer for the first time */
+	/* Initialize the tracking data structure */
+	if (tzdbg.rmlog_rw_buf_size != 0) {
+		if (!wrap_around) {
+			memcpy_fromio((void *)tzdbg.rm_diag_buf,
+					tzdbg.rmlog_virt_iobase,
+					tzdbg.rmlog_rw_buf_size);
+			/* get RM header info first */
+			p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf;
+			/* Update RM log buffer index tracker and its size */
+			log_start.read_idx = 0x0;
+			log_start.size = p_log_hdr->size;
+		}
+		/* Update RM log buffer starting ptr */
+		log_ptr =
+			(uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf +
+				 sizeof(struct rmdbg_log_hdr_t));
+	} else {
+	/* Return 0 to close the display file,if there is nothing else to do */
+		pr_err("There is no RM log to read, size is %d!\n",
+			tzdbg.rmlog_rw_buf_size);
+		return 0;
+	}
+	log_len = log_start.size;
+	log_ptr += log_start.read_idx;
+	/* Check if we exceed the max length provided by user space */
+	log_len = (count > log_len) ? log_len : count;
+	/* Update tracking data structure */
+	log_start.size -= log_len;
+	log_start.read_idx += log_len;
+
+	if (log_start.size)
+		wrap_around =  true;
+	return __disp_rm_log_stats(log_ptr, log_len);
+}
+
+static int _disp_qsee_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+	static struct tzdbg_log_pos_v2_t log_start_v2 = {0};
+
+	if (!tzdbg.is_enlarged_buf)
+		return _disp_log_stats(g_qsee_log, &log_start,
+			QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
+			count, TZDBG_QSEE_LOG);
+
+	return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2,
+		QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t),
+		count, TZDBG_QSEE_LOG);
+}
+
+static int _disp_hyp_general_stats(size_t count)
+{
+	int len = 0;
+	int i;
+	struct hypdbg_boot_info_t *ptr = NULL;
+
+	len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
+			tzdbg.hyp_debug_rw_buf_size - 1,
+			"   Magic Number    : 0x%x\n"
+			"   CPU Count       : 0x%x\n"
+			"   S2 Fault Counter: 0x%x\n",
+			tzdbg.hyp_diag_buf->magic_num,
+			tzdbg.hyp_diag_buf->cpu_count,
+			tzdbg.hyp_diag_buf->s2_fault_counter);
+
+	ptr = tzdbg.hyp_diag_buf->boot_info;
+	for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
+		len += scnprintf((unsigned char *)tzdbg.disp_buf + len,
+				(tzdbg.hyp_debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Warmboot entry CPU counter: 0x%x\n"
+				"     Warmboot exit CPU counter : 0x%x\n",
+				i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
+
+		if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
+	return len;
+}
+
+#if IS_ENABLED(CONFIG_MSM_TMECOM_QMP)
+static int _disp_tme_log_stats(size_t count)
+{
+	static struct tme_log_pos log_start = { 0 };
+	static bool wrap_around = { false };
+	uint32_t buf_size;
+	uint8_t *log_ptr = NULL;
+	uint32_t log_len = 0;
+	int ret = 0;
+
+	/* Return 0 to close the display file */
+	if ((log_start.size == 0x0) && wrap_around) {
+		wrap_around = false;
+		return 0;
+	}
+
+	/* Copy TME log data to tzdbg diag buffer for the first time */
+	if (!wrap_around) {
+		if (tmelog_process_request(tmecrashdump_address_offset,
+								   TME_LOG_BUF_SIZE, &buf_size)) {
+			pr_err("Read tme log failed, ret=%d, buf_size: %#x\n", ret, buf_size);
+			return 0;
+		}
+		log_start.offset = 0x0;
+		log_start.size = buf_size;
+	}
+
+	log_ptr = tzdbg.tmelog_virt_iobase;
+	log_len = log_start.size;
+	log_ptr += log_start.offset;
+
+	/* Check if we exceed the max length provided by user space */
+	log_len = min(min((uint32_t)count, log_len), display_buf_size);
+
+	log_start.size -= log_len;
+	log_start.offset += log_len;
+	pr_debug("log_len: %d, log_start.offset: %#x, log_start.size: %#x\n",
+			log_len, log_start.offset, log_start.size);
+
+	if (log_start.size)
+		wrap_around =  true;
+
+	/* Copy TME log data to display buffer */
+	memcpy_fromio(tzdbg.disp_buf, log_ptr, log_len);
+
+	tzdbg.stat[TZDBG_TME_LOG].data = tzdbg.disp_buf;
+	return log_len;
+}
+#else
+static int _disp_tme_log_stats(size_t count)
+{
+	return 0;
+}
+#endif
+
+static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	int len = 0;
+
+	if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET ||
+		tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL ||
+		tz_id == TZDBG_VMID || tz_id == TZDBG_LOG)
+		memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf,
+				tzdbg.hyp_virt_iobase,
+				tzdbg.hyp_debug_rw_buf_size);
+
+	switch (tz_id) {
+	case TZDBG_BOOT:
+		len = _disp_tz_boot_stats();
+		break;
+	case TZDBG_RESET:
+		len = _disp_tz_reset_stats();
+		break;
+	case TZDBG_INTERRUPT:
+		len = _disp_tz_interrupt_stats();
+		break;
+	case TZDBG_GENERAL:
+		len = _disp_tz_general_stats();
+		break;
+	case TZDBG_VMID:
+		len = _disp_tz_vmid_stats();
+		break;
+	case TZDBG_LOG:
+		if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
+				(tzdbg.diag_buf->version >> 16)) {
+			len = _disp_tz_log_stats(count);
+			*offp = 0;
+		} else {
+			len = _disp_tz_log_stats_legacy();
+		}
+		break;
+	case TZDBG_QSEE_LOG:
+		len = _disp_qsee_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_HYP_GENERAL:
+		len = _disp_hyp_general_stats(count);
+		break;
+	case TZDBG_HYP_LOG:
+		len = _disp_hyp_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_RM_LOG:
+		len = _disp_rm_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_TME_LOG:
+		len = _disp_tme_log_stats(count);
+		*offp = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (len > count)
+		len = count;
+
+	return simple_read_from_buffer(buf, len, offp,
+				tzdbg.stat[tz_id].data, len);
+}
+
+static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	int len = 0, ret = 0;
+	struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]);
+
+	pr_debug("%s: tz_id = %d\n", __func__, tz_id);
+
+	if (tz_id >= TZDBG_STATS_MAX) {
+		pr_err("invalid encrypted log id %d\n", tz_id);
+		return ret;
+	}
+
+	if (!stat->display_len) {
+		if (tz_id == TZDBG_QSEE_LOG)
+			stat->display_len = _disp_encrpted_log_stats(
+					&enc_qseelog_info,
+					tz_id, ENCRYPTED_QSEE_LOG_ID);
+		else
+			stat->display_len = _disp_encrpted_log_stats(
+					&enc_tzlog_info,
+					tz_id, ENCRYPTED_TZ_LOG_ID);
+		stat->display_offset = 0;
+	}
+	len = stat->display_len;
+	if (len > count)
+		len = count;
+
+	*offp = 0;
+	ret = simple_read_from_buffer(buf, len, offp,
+				tzdbg.stat[tz_id].data + stat->display_offset,
+				count);
+	stat->display_offset += ret;
+	stat->display_len -= ret;
+	pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp));
+	pr_debug("display_len = %lu, offset = %lu\n",
+			stat->display_len, stat->display_offset);
+	return ret;
+}
+
+static ssize_t tzdbg_fs_read(struct file *file, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	struct seq_file *seq = file->private_data;
+	int tz_id = TZDBG_STATS_MAX;
+
+	if (seq)
+		tz_id = *(int *)(seq->private);
+	else {
+		pr_err("%s: Seq data null unable to proceed\n", __func__);
+		return 0;
+	}
+
+	if (!tzdbg.is_encrypted_log_enabled ||
+	    (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)
+	    || tz_id == TZDBG_RM_LOG || tz_id == TZDBG_TME_LOG)
+		return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp);
+	else
+		return tzdbg_fs_read_encrypted(tz_id, buf, count, offp);
+}
+
+static int tzdbg_procfs_open(struct inode *inode, struct file *file)
+{
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
+       return single_open(file, NULL, PDE_DATA(inode));
+#else
+       return single_open(file, NULL, pde_data(inode));
+#endif
+
+}
+
+static int tzdbg_procfs_release(struct inode *inode, struct file *file)
+{
+	return single_release(inode, file);
+}
+
+struct proc_ops tzdbg_fops = {
+	.proc_flags   = PROC_ENTRY_PERMANENT,
+	.proc_read    = tzdbg_fs_read,
+	.proc_open    = tzdbg_procfs_open,
+	.proc_release = tzdbg_procfs_release,
+};
+
+static int tzdbg_init_tme_log(struct platform_device *pdev, void __iomem *virt_iobase)
+{
+	/*
+	 * Tme logs are dumped in tme log ddr region but that region is not
+	 * accessible to hlos. Instead, collect logs at tme crashdump ddr
+	 * region with tmecom interface and then display logs reading from
+	 * crashdump region.
+	 */
+	if (of_property_read_u32((&pdev->dev)->of_node, "tmecrashdump-address-offset",
+				&tmecrashdump_address_offset)) {
+		pr_err("Tme Crashdump address offset need to be defined!\n");
+		return -EINVAL;
+	}
+
+	tzdbg.tmelog_virt_iobase =
+		devm_ioremap(&pdev->dev, tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
+	if (!tzdbg.tmelog_virt_iobase) {
+		pr_err("ERROR: Could not ioremap: start=%#x, len=%u\n",
+				tmecrashdump_address_offset, TME_LOG_BUF_SIZE);
+		return -ENXIO;
+	}
+
+	return 0;
+}
+
+/*
+ * Allocates log buffer from ION, registers the buffer at TZ
+ */
+static int tzdbg_register_qsee_log_buf(struct platform_device *pdev)
+{
+	int ret = 0;
+	void *buf = NULL;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+	uint32_t ns_vm_nums = 1;
+
+	if (tzdbg.is_enlarged_buf) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qseelog-buf-size-v2", &qseelog_buf_size)) {
+			pr_debug("Enlarged qseelog buf size isn't defined\n");
+			qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2;
+		}
+	}  else {
+		qseelog_buf_size = QSEE_LOG_BUF_SIZE;
+	}
+	pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size);
+
+	buf = dma_alloc_coherent(&pdev->dev,
+			qseelog_buf_size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (!tzdbg.is_encrypted_log_enabled) {
+		ret = qtee_shmbridge_register(coh_pmem,
+			qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums,
+			PERM_READ | PERM_WRITE,
+			&qseelog_shmbridge_handle);
+		if (ret) {
+			pr_err("failed to create bridge for qsee_log buf\n");
+			goto exit_free_mem;
+		}
+	}
+
+	g_qsee_log = (struct tzdbg_log_t *)buf;
+	g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
+
+	g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf;
+	g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0;
+
+	ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size);
+	if (ret != QSEOS_RESULT_SUCCESS) {
+		pr_err(
+		"%s: scm_call to register log buf failed, resp result =%d\n",
+		__func__, ret);
+		goto exit_dereg_bridge;
+	}
+
+	return ret;
+
+exit_dereg_bridge:
+	if (!tzdbg.is_encrypted_log_enabled)
+		qtee_shmbridge_deregister(qseelog_shmbridge_handle);
+exit_free_mem:
+	dma_free_coherent(&pdev->dev, qseelog_buf_size,
+			(void *)g_qsee_log, coh_pmem);
+	return ret;
+}
+
+static void tzdbg_free_qsee_log_buf(struct platform_device *pdev)
+{
+	if (!tzdbg.is_encrypted_log_enabled)
+		qtee_shmbridge_deregister(qseelog_shmbridge_handle);
+	dma_free_coherent(&pdev->dev, qseelog_buf_size,
+				(void *)g_qsee_log, coh_pmem);
+}
+
+static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev)
+{
+	int ret = 0;
+	uint32_t ns_vmids[] = {VMID_HLOS};
+	uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
+	uint32_t ns_vm_nums = 1;
+
+	if (!tzdbg.is_encrypted_log_enabled)
+		return 0;
+
+	/* max encrypted qsee log buf zize (include header, and page align) */
+	enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE;
+
+	enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev,
+					enc_qseelog_info.size,
+					&enc_qseelog_info.paddr, GFP_KERNEL);
+	if (enc_qseelog_info.vaddr == NULL)
+		return -ENOMEM;
+
+	ret = qtee_shmbridge_register(enc_qseelog_info.paddr,
+			enc_qseelog_info.size, ns_vmids,
+			ns_vm_perms, ns_vm_nums,
+			PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle);
+	if (ret) {
+		pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret);
+		goto exit_free_qseelog;
+	}
+	pr_debug("Alloc memory for encr_qsee_log, size = %zu\n",
+			enc_qseelog_info.size);
+
+	enc_tzlog_info.size = debug_rw_buf_size;
+	enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev,
+					enc_tzlog_info.size,
+					&enc_tzlog_info.paddr, GFP_KERNEL);
+	if (enc_tzlog_info.vaddr == NULL)
+		goto exit_unreg_qseelog;
+
+	ret = qtee_shmbridge_register(enc_tzlog_info.paddr,
+			enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums,
+			PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle);
+	if (ret) {
+		pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret);
+		goto exit_free_tzlog;
+	}
+	pr_debug("Alloc memory for encr_tz_log, size %zu\n",
+		enc_qseelog_info.size);
+
+	return 0;
+
+exit_free_tzlog:
+	dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
+			enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
+exit_unreg_qseelog:
+	qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
+exit_free_qseelog:
+	dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
+			enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
+	return -ENOMEM;
+}
+
+static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev)
+{
+	qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle);
+	dma_free_coherent(&pdev->dev, enc_tzlog_info.size,
+			enc_tzlog_info.vaddr, enc_tzlog_info.paddr);
+	qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle);
+	dma_free_coherent(&pdev->dev, enc_qseelog_info.size,
+			enc_qseelog_info.vaddr, enc_qseelog_info.paddr);
+}
+
+static int  tzdbg_fs_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+	struct proc_dir_entry *dent_dir;
+	struct proc_dir_entry *dent;
+
+	dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL);
+	if (dent_dir == NULL) {
+		dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TZDBG_STATS_MAX; i++) {
+		tzdbg.debug_tz[i] = i;
+		if (!tzdbg.stat[i].avail)
+			continue;
+
+		dent = proc_create_data(tzdbg.stat[i].name,
+				0444, dent_dir,
+				&tzdbg_fops, &tzdbg.debug_tz[i]);
+		if (dent == NULL) {
+			dev_err(&pdev->dev, "TZ proc_create_data failed\n");
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+	platform_set_drvdata(pdev, dent_dir);
+	return 0;
+err:
+	remove_proc_entry(TZDBG_DIR_NAME, NULL);
+
+	return rc;
+}
+
+static void tzdbg_fs_exit(struct platform_device *pdev)
+{
+	struct proc_dir_entry *dent_dir;
+
+	dent_dir = platform_get_drvdata(pdev);
+	if (dent_dir)
+		remove_proc_entry(TZDBG_DIR_NAME, NULL);
+}
+
+static int __update_hypdbg_base(struct platform_device *pdev,
+			void __iomem *virt_iobase)
+{
+	phys_addr_t hypdiag_phy_iobase;
+	uint32_t hyp_address_offset;
+	uint32_t hyp_size_offset;
+	struct hypdbg_t *hyp;
+	uint32_t *ptr = NULL;
+
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
+							&hyp_address_offset)) {
+		dev_err(&pdev->dev, "hyplog address offset is not defined\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
+							&hyp_size_offset)) {
+		dev_err(&pdev->dev, "hyplog size offset is not defined\n");
+		return -EINVAL;
+	}
+
+	hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
+	tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
+					hyp_size_offset);
+
+	tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev,
+					hypdiag_phy_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+	if (!tzdbg.hyp_virt_iobase) {
+		dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
+			&hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
+	hyp = tzdbg.hyp_diag_buf;
+	hyp->log_pos.wrap = hyp->log_pos.offset = 0;
+	return 0;
+}
+
+static int __update_rmlog_base(struct platform_device *pdev,
+			       void __iomem *virt_iobase)
+{
+	uint32_t rmlog_address;
+	uint32_t rmlog_size;
+	uint32_t *ptr = NULL;
+
+	/* if we don't get the node just ignore it */
+	if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address",
+							&rmlog_address)) {
+		dev_err(&pdev->dev, "RM log address is not defined\n");
+		tzdbg.rmlog_rw_buf_size = 0;
+		return 0;
+	}
+	/* if we don't get the node just ignore it */
+	if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size",
+							&rmlog_size)) {
+		dev_err(&pdev->dev, "RM log size is not defined\n");
+		tzdbg.rmlog_rw_buf_size = 0;
+		return 0;
+	}
+
+	tzdbg.rmlog_rw_buf_size = rmlog_size;
+
+	/* Check if there is RM log to read */
+	if (!tzdbg.rmlog_rw_buf_size) {
+		tzdbg.rmlog_virt_iobase = NULL;
+		tzdbg.rm_diag_buf = NULL;
+		dev_err(&pdev->dev, "RM log size is %d\n",
+			tzdbg.rmlog_rw_buf_size);
+		return 0;
+	}
+
+	tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev,
+					rmlog_address,
+					rmlog_size);
+	if (!tzdbg.rmlog_virt_iobase) {
+		dev_err(&pdev->dev, "ERROR could not ioremap: start=%u, len=%u\n",
+			rmlog_address, tzdbg.rmlog_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	tzdbg.rm_diag_buf = (uint8_t *)ptr;
+	return 0;
+}
+static int tzdbg_get_tz_version(void)
+{
+	u64 version;
+	int ret = 0;
+
+	ret = qcom_scm_get_tz_log_feat_id(&version);
+
+	if (ret) {
+		pr_err("%s: scm_call to get tz version failed\n",
+				__func__);
+		return ret;
+	}
+	tzdbg.tz_version = version;
+
+	ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version);
+	if (ret) {
+		pr_err("%s: scm_call to get tz diag version failed, ret = %d\n",
+				__func__, ret);
+		return ret;
+	}
+	pr_warn("tz diag version is %llu\n", version);
+	tzdbg.tz_diag_major_version =
+		((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
+	tzdbg.tz_diag_minor_version =
+		((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK);
+	if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) {
+		switch (tzdbg.tz_diag_minor_version) {
+		case TZBSP_DIAG_MINOR_VERSION_V2:
+		case TZBSP_DIAG_MINOR_VERSION_V21:
+		case TZBSP_DIAG_MINOR_VERSION_V22:
+			tzdbg.is_enlarged_buf = true;
+			break;
+		default:
+			tzdbg.is_enlarged_buf = false;
+		}
+	} else {
+		tzdbg.is_enlarged_buf = false;
+	}
+	return ret;
+}
+
+static void tzdbg_query_encrypted_log(void)
+{
+	int ret = 0;
+	uint64_t enabled;
+
+	ret = qcom_scm_query_encrypted_log_feature(&enabled);
+	if (ret) {
+		if (ret == -EIO)
+			pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n");
+		else
+			pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret);
+		tzdbg.is_encrypted_log_enabled = false;
+	} else {
+		pr_warn("encrypted qseelog enabled is %llu\n", enabled);
+		tzdbg.is_encrypted_log_enabled = enabled;
+	}
+}
+
+/*
+ * Driver functions
+ */
+static int tz_log_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	void __iomem *virt_iobase;
+	phys_addr_t tzdiag_phy_iobase;
+	uint32_t *ptr = NULL;
+	int ret = 0, i;
+
+	/*
+	 * By default all nodes will be created.
+	 * Mark avail as false later selectively if there's need to skip proc node creation.
+	 */
+	for (i = 0; i < TZDBG_STATS_MAX; i++)
+		tzdbg.stat[i].avail = true;
+
+	ret = tzdbg_get_tz_version();
+	if (ret)
+		return ret;
+
+	/*
+	 * Get address that stores the physical location diagnostic data
+	 */
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		dev_err(&pdev->dev,
+				"%s: ERROR Missing MEM resource\n", __func__);
+		return -ENXIO;
+	}
+
+	/*
+	 * Get the debug buffer size
+	 */
+	debug_rw_buf_size = resource_size(resource);
+
+	/*
+	 * Map address that stores the physical location diagnostic data
+	 */
+	virt_iobase = devm_ioremap(&pdev->dev, resource->start,
+				debug_rw_buf_size);
+	if (!virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &resource->start,
+			(unsigned int)(debug_rw_buf_size));
+		return -ENXIO;
+	}
+
+	if (pdev->dev.of_node) {
+		tzdbg.is_hyplog_enabled = of_property_read_bool(
+			(&pdev->dev)->of_node, "qcom,hyplog-enabled");
+		if (tzdbg.is_hyplog_enabled) {
+			ret = __update_hypdbg_base(pdev, virt_iobase);
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: fail to get hypdbg_base ret %d\n",
+					__func__, ret);
+				return -EINVAL;
+			}
+			ret = __update_rmlog_base(pdev, virt_iobase);
+			if (ret) {
+				dev_err(&pdev->dev,
+					"%s: fail to get rmlog_base ret %d\n",
+					__func__, ret);
+				return -EINVAL;
+			}
+		} else {
+			tzdbg.stat[TZDBG_HYP_LOG].avail = false;
+			tzdbg.stat[TZDBG_HYP_GENERAL].avail = false;
+			tzdbg.stat[TZDBG_RM_LOG].avail = false;
+			dev_info(&pdev->dev, "Hyp log service not support\n");
+		}
+	} else {
+		dev_dbg(&pdev->dev, "Device tree data is not found\n");
+	}
+
+	/*
+	 * Retrieve the address of diagnostic data
+	 */
+	tzdiag_phy_iobase = readl_relaxed(virt_iobase);
+
+	tzdbg_query_encrypted_log();
+	/*
+	 * Map the diagnostic information area if encryption is disabled
+	 */
+	if (!tzdbg.is_encrypted_log_enabled) {
+		tzdbg.virt_iobase = devm_ioremap(&pdev->dev,
+				tzdiag_phy_iobase, debug_rw_buf_size);
+
+		if (!tzdbg.virt_iobase) {
+			dev_err(&pdev->dev,
+				"%s: could not ioremap: start=%pr, len=%u\n",
+				__func__, &tzdiag_phy_iobase,
+				debug_rw_buf_size);
+			return -ENXIO;
+		}
+		/* allocate diag_buf */
+		ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
+		if (ptr == NULL)
+			return -ENOMEM;
+		tzdbg.diag_buf = (struct tzdbg_t *)ptr;
+	} else {
+		if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) &&
+			(tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22))
+			tzdbg.is_full_encrypted_tz_logs_supported = true;
+		if (pdev->dev.of_node) {
+			tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool(
+				(&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled");
+		}
+	}
+
+	/* Init for tme log */
+	ret = tzdbg_init_tme_log(pdev, virt_iobase);
+	if (ret < 0) {
+		tzdbg.stat[TZDBG_TME_LOG].avail = false;
+		pr_warn("Tme log initialization failed!\n");
+	}
+
+	/* register unencrypted qsee log buffer */
+	ret = tzdbg_register_qsee_log_buf(pdev);
+	if (ret)
+		goto exit_free_diag_buf;
+
+	/* allocate encrypted qsee and tz log buffer */
+	ret = tzdbg_allocate_encrypted_log_buf(pdev);
+	if (ret) {
+		dev_err(&pdev->dev,
+			" %s: Failed to allocate encrypted log buffer\n",
+			__func__);
+		goto exit_free_qsee_log_buf;
+	}
+
+	/* allocate display_buf */
+	if (UINT_MAX/4 < qseelog_buf_size) {
+		pr_err("display_buf_size integer overflow\n");
+		goto exit_free_qsee_log_buf;
+	}
+	display_buf_size = qseelog_buf_size * 4;
+	tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size,
+		&disp_buf_paddr, GFP_KERNEL);
+	if (tzdbg.disp_buf == NULL) {
+		ret = -ENOMEM;
+		goto exit_free_encr_log_buf;
+	}
+
+	if (tzdbg_fs_init(pdev))
+		goto exit_free_disp_buf;
+	return 0;
+
+exit_free_disp_buf:
+	dma_free_coherent(&pdev->dev, display_buf_size,
+			(void *)tzdbg.disp_buf, disp_buf_paddr);
+exit_free_encr_log_buf:
+	tzdbg_free_encrypted_log_buf(pdev);
+exit_free_qsee_log_buf:
+	tzdbg_free_qsee_log_buf(pdev);
+exit_free_diag_buf:
+	if (!tzdbg.is_encrypted_log_enabled)
+		kfree(tzdbg.diag_buf);
+	return -ENXIO;
+}
+
+static int tz_log_remove(struct platform_device *pdev)
+{
+	tzdbg_fs_exit(pdev);
+	dma_free_coherent(&pdev->dev, display_buf_size,
+			(void *)tzdbg.disp_buf, disp_buf_paddr);
+	tzdbg_free_encrypted_log_buf(pdev);
+	tzdbg_free_qsee_log_buf(pdev);
+	if (!tzdbg.is_encrypted_log_enabled)
+		kfree(tzdbg.diag_buf);
+	return 0;
+}
+
+static const struct of_device_id tzlog_match[] = {
+	{.compatible = "qcom,tz-log"},
+	{}
+};
+
+static struct platform_driver tz_log_driver = {
+	.probe		= tz_log_probe,
+	.remove		= tz_log_remove,
+	.driver		= {
+		.name = "tz_log",
+		.of_match_table = tzlog_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+module_platform_driver(tz_log_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TZ Log driver");