From 61bb176ca21a92b1b77630af5bffbf7749551655 Mon Sep 17 00:00:00 2001 From: Git User Date: Thu, 4 Nov 2021 14:40:51 -0700 Subject: [PATCH 001/202] Initial empty repository From e190b865569b26e71cfc7ef47e9c9cd021b4fc3d Mon Sep 17 00:00:00 2001 From: Smita Ghosh Date: Sat, 6 Nov 2021 17:21:49 -0700 Subject: [PATCH 002/202] secuemsm-kernel : Enable DLKM's from vendor SSG's kernel modules will be loaded through userspace init instead of kernel. Change-Id: Ibf88a6a56df65a933d451d39136060967595e5b7 --- Android.bp | 5 + Android.mk | 65 + Kbuild | 17 + Makefile | 13 + arch/arm64/boot/dts/Makefile | 5 + arch/arm64/boot/dts/securemsm-kernel.dtsi | 72 + config/ssg_smcinvoke.conf | 7 + crypto-qti/compat_qcedev.c | 535 ++ crypto-qti/compat_qcedev.h | 202 + crypto-qti/linux/fips_status.h | 38 + .../linux/platform_data/qcom_crypto_device.h | 18 + crypto-qti/linux/qcedev.h | 289 + crypto-qti/linux/qcota.h | 215 + crypto-qti/linux/qcrypto.h | 60 + crypto-qti/ota_crypto.c | 994 +++ crypto-qti/qce.h | 196 + crypto-qti/qce50.c | 6198 +++++++++++++++++ crypto-qti/qce50.h | 239 + crypto-qti/qce_ota.h | 22 + crypto-qti/qcedev.c | 2330 +++++++ crypto-qti/qcedev_smmu.c | 440 ++ crypto-qti/qcedev_smmu.h | 82 + crypto-qti/qcedevi.h | 126 + crypto-qti/qcrypto.c | 5495 +++++++++++++++ crypto-qti/qcryptohw_50.h | 521 ++ linux/platform_data/qcom_crypto_device.h | 18 + linux/qcedev.h | 289 + linux/qcrypto.h | 60 + linux/smcinvoke.h | 95 + securemsm_kernel_product_board.mk | 8 + securemsm_kernel_vendor_board.mk | 5 + smcinvoke/IClientEnv.h | 91 + smcinvoke/IQSEEComCompat.h | 71 + smcinvoke/IQSEEComCompatAppLoader.h | 99 + smcinvoke/misc/qseecom_kernel.h | 48 + smcinvoke/smcinvoke.c | 2449 +++++++ smcinvoke/smcinvoke.h | 103 + smcinvoke/smcinvoke_kernel.c | 479 ++ smcinvoke/smcinvoke_object.h | 195 + smcinvoke/trace_smcinvoke.h | 498 ++ ssg_kernel_headers.py | 96 + tz_log/tz_log.c | 1689 +++++ 42 files changed, 24477 insertions(+) create mode 100644 Android.bp create mode 100644 Android.mk create mode 100644 Kbuild create mode 100644 Makefile create mode 100644 arch/arm64/boot/dts/Makefile create mode 100644 arch/arm64/boot/dts/securemsm-kernel.dtsi create mode 100644 config/ssg_smcinvoke.conf create mode 100644 crypto-qti/compat_qcedev.c create mode 100644 crypto-qti/compat_qcedev.h create mode 100644 crypto-qti/linux/fips_status.h create mode 100644 crypto-qti/linux/platform_data/qcom_crypto_device.h create mode 100644 crypto-qti/linux/qcedev.h create mode 100644 crypto-qti/linux/qcota.h create mode 100644 crypto-qti/linux/qcrypto.h create mode 100644 crypto-qti/ota_crypto.c create mode 100644 crypto-qti/qce.h create mode 100644 crypto-qti/qce50.c create mode 100644 crypto-qti/qce50.h create mode 100644 crypto-qti/qce_ota.h create mode 100644 crypto-qti/qcedev.c create mode 100644 crypto-qti/qcedev_smmu.c create mode 100644 crypto-qti/qcedev_smmu.h create mode 100644 crypto-qti/qcedevi.h create mode 100644 crypto-qti/qcrypto.c create mode 100644 crypto-qti/qcryptohw_50.h create mode 100644 linux/platform_data/qcom_crypto_device.h create mode 100644 linux/qcedev.h create mode 100644 linux/qcrypto.h create mode 100644 linux/smcinvoke.h create mode 100644 securemsm_kernel_product_board.mk create mode 100644 securemsm_kernel_vendor_board.mk create mode 100644 smcinvoke/IClientEnv.h create mode 100644 smcinvoke/IQSEEComCompat.h create mode 100644 smcinvoke/IQSEEComCompatAppLoader.h create mode 100644 smcinvoke/misc/qseecom_kernel.h create mode 100644 smcinvoke/smcinvoke.c create mode 100644 smcinvoke/smcinvoke.h create mode 100644 smcinvoke/smcinvoke_kernel.c create mode 100644 smcinvoke/smcinvoke_object.h create mode 100644 smcinvoke/trace_smcinvoke.h create mode 100644 ssg_kernel_headers.py create mode 100644 tz_log/tz_log.c diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000000..3912ac5210 --- /dev/null +++ b/Android.bp @@ -0,0 +1,5 @@ +cc_library_headers { + name: "smcinvoke_kernel_headers", + vendor_available: true, + export_include_dirs: ["."], +} diff --git a/Android.mk b/Android.mk new file mode 100644 index 0000000000..d95cf8a5d1 --- /dev/null +++ b/Android.mk @@ -0,0 +1,65 @@ +# Android makefile for audio kernel modules + +LOCAL_PATH := $(call my-dir) +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + + + + +SSG_SRC_FILES := \ + $(wildcard $(LOCAL_PATH)/*) \ + $(wildcard $(LOCAL_PATH)/*/*) \ + $(wildcard $(LOCAL_PATH)/*/*/*) \ + $(wildcard $(LOCAL_PATH)/*/*/*/*) + + +#$(error $(SSG_SRC_FILES)) +include $(CLEAR_VARS) +#LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := smcinvoke_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := smcinvoke_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################## +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := tz_log_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := tz_log_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# +################################################## +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := qce50_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := qce50_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# +################################################## +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := qcedev-mod_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := qcedev-mod_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# +################################################## +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := qcrypto-msm_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := qcrypto-msm_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# diff --git a/Kbuild b/Kbuild new file mode 100644 index 0000000000..bf3e85214d --- /dev/null +++ b/Kbuild @@ -0,0 +1,17 @@ +include $(SSG_MODULE_ROOT)/config/ssg_smcinvoke.conf + +obj-m += smcinvoke_dlkm.o +smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o + +obj-m += tz_log_dlkm.o +tz_log_dlkm-objs := tz_log/tz_log.o + +obj-m += qce50_dlkm.o +qce50_dlkm-objs := crypto-qti/qce50.o + +obj-m += qcedev-mod_dlkm.o +qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o + +obj-m += qcrypto-msm_dlkm.o +qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o crypto-qti/des.o + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..3cdc7da653 --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +M=$(PWD) +SSG_MODULE_ROOT=$(KERNEL_SRC)/$(M) + +KBUILD_OPTIONS+= SSG_MODULE_ROOT=$(SSG_MODULE_ROOT) + +all: modules + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(M) clean + +%: + $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) + diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile new file mode 100644 index 0000000000..42c3b62880 --- /dev/null +++ b/arch/arm64/boot/dts/Makefile @@ -0,0 +1,5 @@ +dtbo-y +=securemsm-kernel.dtbo + +always-y := $(dtb-y) $(dtbo-y) +subdir-y := $(dts-dirs) +clean-files := *.dtb *.dtbo diff --git a/arch/arm64/boot/dts/securemsm-kernel.dtsi b/arch/arm64/boot/dts/securemsm-kernel.dtsi new file mode 100644 index 0000000000..ef3696abbf --- /dev/null +++ b/arch/arm64/boot/dts/securemsm-kernel.dtsi @@ -0,0 +1,72 @@ +/dts-v1/; +/plugin/; + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +&reserved_memory { + + user_contig_mem: user_contig_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x1000000>; + }; + qseecom_mem: qseecom_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x1400000>; + }; + + qseecom_ta_mem: qseecom_ta_region { + compatible = "shared-dma-pool"; + alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; + reusable; + alignment = <0x0 0x400000>; + size = <0x0 0x1000000>; + }; +}; +&firmware { + qcom_smcinvoke { + compatible = "qcom,smcinvoke"; + }; + + qcom_tzlog: tz-log@146AA720 { + + compatible = "qcom,tz-log"; + reg = <0x146AA720 0x3000>; + qcom,hyplog-enabled; + hyplog-address-offset = <0x410>; + hyplog-size-offset = <0x414>; + }; + + qcom,dma-heaps { + qcom,qseecom { + qcom,dma-heap-name = "qcom,qseecom"; + qcom,dma-heap-type = ; + memory-region = <&qseecom_mem>; + }; + + qcom,qseecom_ta { + qcom,dma-heap-name = "qcom,qseecom-ta"; + qcom,dma-heap-type = ; + memory-region = <&qseecom_ta_mem>; + }; + }; +}; + + + + diff --git a/config/ssg_smcinvoke.conf b/config/ssg_smcinvoke.conf new file mode 100644 index 0000000000..ae62b67f20 --- /dev/null +++ b/config/ssg_smcinvoke.conf @@ -0,0 +1,7 @@ +export CONFIG_QCOM_SMCINVOKE=m +export CONFIG_QTI_TZ_LOG=m +export CONFIG_CRYPTO_DEV_QCEDEV=m +export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m +export CONFIG_CRYPTO_DEV_QCRYPTO=m +export CONFIG_SCSI_UFS_CRYPTO=m +export CONFIG_SCSI_UFS_CRYPTO_QTI=m diff --git a/crypto-qti/compat_qcedev.c b/crypto-qti/compat_qcedev.c new file mode 100644 index 0000000000..27066b4a6c --- /dev/null +++ b/crypto-qti/compat_qcedev.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI CE 32-bit compatibility syscall for 64-bit systems + * + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include "linux/qcedev.h" +#include +#include "compat_qcedev.h" + +static void *compat_alloc_user_space(int size){ + return NULL; +} + +static int compat_get_qcedev_pmem_info( + struct compat_qcedev_pmem_info __user *pmem32, + struct qcedev_pmem_info __user *pmem) +{ + compat_ulong_t offset; + compat_int_t fd_src; + compat_int_t fd_dst; + int err, i; + uint32_t len; + + err = get_user(fd_src, &pmem32->fd_src); + err |= put_user(fd_src, &pmem->fd_src); + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(offset, &pmem32->src[i].offset); + err |= put_user(offset, &pmem->src[i].offset); + err |= get_user(len, &pmem32->src[i].len); + err |= put_user(len, &pmem->src[i].len); + } + + err |= get_user(fd_dst, &pmem32->fd_dst); + err |= put_user(fd_dst, &pmem->fd_dst); + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(offset, &pmem32->dst[i].offset); + err |= put_user(offset, &pmem->dst[i].offset); + err |= get_user(len, &pmem32->dst[i].len); + err |= put_user(len, &pmem->dst[i].len); + } + + return err; +} + +static int compat_put_qcedev_pmem_info( + struct compat_qcedev_pmem_info __user *pmem32, + struct qcedev_pmem_info __user *pmem) +{ + compat_ulong_t offset; + compat_int_t fd_src; + compat_int_t fd_dst; + int err, i; + uint32_t len; + + err = get_user(fd_src, &pmem->fd_src); + err |= put_user(fd_src, &pmem32->fd_src); + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(offset, &pmem->src[i].offset); + err |= put_user(offset, &pmem32->src[i].offset); + err |= get_user(len, &pmem->src[i].len); + err |= put_user(len, &pmem32->src[i].len); + } + + err |= get_user(fd_dst, &pmem->fd_dst); + err |= put_user(fd_dst, &pmem32->fd_dst); + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(offset, &pmem->dst[i].offset); + err |= put_user(offset, &pmem32->dst[i].offset); + err |= get_user(len, &pmem->dst[i].len); + err |= put_user(len, &pmem32->dst[i].len); + } + + return err; +} + +static int compat_get_qcedev_vbuf_info( + struct compat_qcedev_vbuf_info __user *vbuf32, + struct qcedev_vbuf_info __user *vbuf) +{ + compat_uptr_t vaddr; + int err = 0, i; + uint32_t len; + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, &vbuf32->src[i].vaddr); + err |= put_user(vaddr, + (compat_uptr_t __user *)&vbuf->src[i].vaddr); + err |= get_user(len, &vbuf32->src[i].len); + err |= put_user(len, &vbuf->src[i].len); + } + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, &vbuf32->dst[i].vaddr); + err |= put_user(vaddr, + (compat_uptr_t __user *)&vbuf->dst[i].vaddr); + err |= get_user(len, &vbuf32->dst[i].len); + err |= put_user(len, &vbuf->dst[i].len); + } + return err; +} + +static int compat_put_qcedev_vbuf_info( + struct compat_qcedev_vbuf_info __user *vbuf32, + struct qcedev_vbuf_info __user *vbuf) +{ + compat_uptr_t vaddr; + int err = 0, i; + uint32_t len; + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, + (compat_uptr_t __user *)&vbuf->src[i].vaddr); + err |= put_user(vaddr, &vbuf32->src[i].vaddr); + err |= get_user(len, &vbuf->src[i].len); + err |= put_user(len, &vbuf32->src[i].len); + } + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, + (compat_uptr_t __user *)&vbuf->dst[i].vaddr); + err |= put_user(vaddr, &vbuf32->dst[i].vaddr); + err |= get_user(len, &vbuf->dst[i].len); + err |= put_user(len, &vbuf32->dst[i].len); + } + return err; +} + +static int compat_get_qcedev_cipher_op_req( + struct compat_qcedev_cipher_op_req __user *data32, + struct qcedev_cipher_op_req __user *data) +{ + enum qcedev_cipher_mode_enum mode; + enum qcedev_cipher_alg_enum alg; + compat_ulong_t byteoffset; + enum qcedev_oper_enum op; + compat_ulong_t data_len; + compat_ulong_t encklen; + compat_ulong_t entries; + compat_ulong_t ivlen; + uint8_t in_place_op; + int err, i; + uint8_t use_pmem; + uint8_t enckey; + uint8_t iv; + + err = get_user(use_pmem, &data32->use_pmem); + err |= put_user(use_pmem, &data->use_pmem); + + if (use_pmem) + err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem); + else + err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf); + + err |= get_user(entries, &data32->entries); + err |= put_user(entries, &data->entries); + err |= get_user(data_len, &data32->data_len); + err |= put_user(data_len, &data->data_len); + err |= get_user(in_place_op, &data32->in_place_op); + err |= put_user(in_place_op, &data->in_place_op); + + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) { + err |= get_user(enckey, &(data32->enckey[i])); + err |= put_user(enckey, &(data->enckey[i])); + } + + err |= get_user(encklen, &data32->encklen); + err |= put_user(encklen, &data->encklen); + + for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) { + err |= get_user(iv, &(data32->iv[i])); + err |= put_user(iv, &(data->iv[i])); + } + + err |= get_user(ivlen, &data32->ivlen); + err |= put_user(ivlen, &data->ivlen); + err |= get_user(byteoffset, &data32->byteoffset); + err |= put_user(byteoffset, &data->byteoffset); + err |= get_user(alg, &data32->alg); + err |= put_user(alg, &data->alg); + err |= get_user(mode, &data32->mode); + err |= put_user(mode, &data->mode); + err |= get_user(op, &data32->op); + err |= put_user(op, &data->op); + + return err; +} + +static int compat_put_qcedev_cipher_op_req( + struct compat_qcedev_cipher_op_req __user *data32, + struct qcedev_cipher_op_req __user *data) +{ + enum qcedev_cipher_mode_enum mode; + enum qcedev_cipher_alg_enum alg; + compat_ulong_t byteoffset; + enum qcedev_oper_enum op; + compat_ulong_t data_len; + compat_ulong_t encklen; + compat_ulong_t entries; + compat_ulong_t ivlen; + uint8_t in_place_op; + int err, i; + uint8_t use_pmem; + uint8_t enckey; + uint8_t iv; + + err = get_user(use_pmem, &data->use_pmem); + err |= put_user(use_pmem, &data32->use_pmem); + + if (use_pmem) + err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem); + else + err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf); + + err |= get_user(entries, &data->entries); + err |= put_user(entries, &data32->entries); + err |= get_user(data_len, &data->data_len); + err |= put_user(data_len, &data32->data_len); + err |= get_user(in_place_op, &data->in_place_op); + err |= put_user(in_place_op, &data32->in_place_op); + + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) { + err |= get_user(enckey, &(data->enckey[i])); + err |= put_user(enckey, &(data32->enckey[i])); + } + + err |= get_user(encklen, &data->encklen); + err |= put_user(encklen, &data32->encklen); + + for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) { + err |= get_user(iv, &(data->iv[i])); + err |= put_user(iv, &(data32->iv[i])); + } + + err |= get_user(ivlen, &data->ivlen); + err |= put_user(ivlen, &data32->ivlen); + err |= get_user(byteoffset, &data->byteoffset); + err |= put_user(byteoffset, &data32->byteoffset); + err |= get_user(alg, &data->alg); + err |= put_user(alg, &data32->alg); + err |= get_user(mode, &data->mode); + err |= put_user(mode, &data32->mode); + err |= get_user(op, &data->op); + err |= put_user(op, &data32->op); + + return err; +} + +static int compat_xfer_qcedev_map_buf_req( + struct compat_qcedev_map_buf_req __user *data32, + struct qcedev_map_buf_req __user *data, bool to_get) +{ + int rc = 0, i, fd = -1; + uint32_t fd_size, fd_offset, num_fds, buf_vaddr; + + if (to_get) { + /* copy from compat struct */ + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + rc |= get_user(fd, &data32->fd[i]); + rc |= put_user(fd, &data->fd[i]); + rc |= get_user(fd_size, &data32->fd_size[i]); + rc |= put_user(fd_size, &data->fd_size[i]); + rc |= get_user(fd_offset, &data32->fd_offset[i]); + rc |= put_user(fd_offset, &data->fd_offset[i]); + rc |= get_user(buf_vaddr, &data32->buf_vaddr[i]); + rc |= put_user(buf_vaddr, &data->buf_vaddr[i]); + } + + rc |= get_user(num_fds, &data32->num_fds); + rc |= put_user(num_fds, &data->num_fds); + } else { + /* copy to compat struct */ + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + rc |= get_user(fd, &data->fd[i]); + rc |= put_user(fd, &data32->fd[i]); + rc |= get_user(fd_size, &data->fd_size[i]); + rc |= put_user(fd_size, &data32->fd_size[i]); + rc |= get_user(fd_offset, &data->fd_offset[i]); + rc |= put_user(fd_offset, &data32->fd_offset[i]); + rc |= get_user(buf_vaddr, &data->buf_vaddr[i]); + rc |= put_user(buf_vaddr, &data32->buf_vaddr[i]); + } + rc |= get_user(num_fds, &data->num_fds); + rc |= put_user(num_fds, &data32->num_fds); + } + + return rc; +} + +static int compat_xfer_qcedev_unmap_buf_req( + struct compat_qcedev_unmap_buf_req __user *data32, + struct qcedev_unmap_buf_req __user *data, bool to_get) +{ + int i, rc = 0, fd = -1; + uint32_t num_fds; + + if (to_get) { + /* copy from compat struct */ + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + rc |= get_user(fd, &data32->fd[i]); + rc |= put_user(fd, &data->fd[i]); + } + rc |= get_user(num_fds, &data32->num_fds); + rc |= put_user(num_fds, &data->num_fds); + } else { + /* copy to compat struct */ + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + rc |= get_user(fd, &data->fd[i]); + rc |= put_user(fd, &data32->fd[i]); + } + rc |= get_user(num_fds, &data->num_fds); + rc |= put_user(num_fds, &data32->num_fds); + } + return rc; +} + + +static int compat_get_qcedev_sha_op_req( + struct compat_qcedev_sha_op_req __user *data32, + struct qcedev_sha_op_req __user *data) +{ + enum qcedev_sha_alg_enum alg; + compat_ulong_t authklen; + compat_ulong_t data_len; + compat_ulong_t entries; + compat_ulong_t diglen; + compat_uptr_t authkey; + compat_uptr_t vaddr; + int err = 0, i; + uint8_t digest; + uint32_t len; + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, &data32->data[i].vaddr); + err |= put_user(vaddr, + (compat_uptr_t __user *)&data->data[i].vaddr); + err |= get_user(len, &data32->data[i].len); + err |= put_user(len, &data->data[i].len); + } + + err |= get_user(entries, &data32->entries); + err |= put_user(entries, &data->entries); + err |= get_user(data_len, &data32->data_len); + err |= put_user(data_len, &data->data_len); + + for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) { + err |= get_user(digest, &(data32->digest[i])); + err |= put_user(digest, &(data->digest[i])); + } + + err |= get_user(diglen, &data32->diglen); + err |= put_user(diglen, &data->diglen); + err |= get_user(authkey, &data32->authkey); + err |= put_user(authkey, (compat_uptr_t __user *)&data->authkey); + err |= get_user(authklen, &data32->authklen); + err |= put_user(authklen, &data->authklen); + err |= get_user(alg, &data32->alg); + err |= put_user(alg, &data->alg); + + return err; +} + +static int compat_put_qcedev_sha_op_req( + struct compat_qcedev_sha_op_req __user *data32, + struct qcedev_sha_op_req __user *data) +{ + enum qcedev_sha_alg_enum alg; + compat_ulong_t authklen; + compat_ulong_t data_len; + compat_ulong_t entries; + compat_ulong_t diglen; + compat_uptr_t authkey; + compat_uptr_t vaddr; + int err = 0, i; + uint8_t digest; + uint32_t len; + + for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) { + err |= get_user(vaddr, + (compat_uptr_t __user *)&data->data[i].vaddr); + err |= put_user(vaddr, &data32->data[i].vaddr); + err |= get_user(len, &data->data[i].len); + err |= put_user(len, &data32->data[i].len); + } + + err |= get_user(entries, &data->entries); + err |= put_user(entries, &data32->entries); + err |= get_user(data_len, &data->data_len); + err |= put_user(data_len, &data32->data_len); + + for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) { + err |= get_user(digest, &(data->digest[i])); + err |= put_user(digest, &(data32->digest[i])); + } + + err |= get_user(diglen, &data->diglen); + err |= put_user(diglen, &data32->diglen); + err |= get_user(authkey, + (compat_uptr_t __user *)&data->authkey); + err |= put_user(authkey, &data32->authkey); + err |= get_user(authklen, &data->authklen); + err |= put_user(authklen, &data32->authklen); + err |= get_user(alg, &data->alg); + err |= put_user(alg, &data32->alg); + + return err; +} + +static unsigned int convert_cmd(unsigned int cmd) +{ + switch (cmd) { + case COMPAT_QCEDEV_IOCTL_ENC_REQ: + return QCEDEV_IOCTL_ENC_REQ; + case COMPAT_QCEDEV_IOCTL_DEC_REQ: + return QCEDEV_IOCTL_DEC_REQ; + case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ: + return QCEDEV_IOCTL_SHA_INIT_REQ; + case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ: + return QCEDEV_IOCTL_SHA_UPDATE_REQ; + case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ: + return QCEDEV_IOCTL_SHA_FINAL_REQ; + case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: + return QCEDEV_IOCTL_GET_SHA_REQ; + case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ: + return QCEDEV_IOCTL_GET_CMAC_REQ; + case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ: + return QCEDEV_IOCTL_MAP_BUF_REQ; + case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ: + return QCEDEV_IOCTL_UNMAP_BUF_REQ; + default: + return cmd; + } + +} + +long compat_qcedev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + + switch (cmd) { + case COMPAT_QCEDEV_IOCTL_ENC_REQ: + case COMPAT_QCEDEV_IOCTL_DEC_REQ: { + struct compat_qcedev_cipher_op_req __user *data32; + struct qcedev_cipher_op_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EFAULT; + + err = compat_get_qcedev_cipher_op_req(data32, data); + if (err) + return err; + + ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data); + err = compat_put_qcedev_cipher_op_req(data32, data); + return ret ? ret : err; + } + case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ: + case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ: + case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ: + case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ: + case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: { + struct compat_qcedev_sha_op_req __user *data32; + struct qcedev_sha_op_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EFAULT; + + err = compat_get_qcedev_sha_op_req(data32, data); + if (err) + return err; + + ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data); + err = compat_put_qcedev_sha_op_req(data32, data); + return ret ? ret : err; + } + case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ: { + struct compat_qcedev_map_buf_req __user *data32; + struct qcedev_map_buf_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EINVAL; + + err = compat_xfer_qcedev_map_buf_req(data32, data, true); + if (err) + return err; + + ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data); + err = compat_xfer_qcedev_map_buf_req(data32, data, false); + return ret ? ret : err; + + break; + } + case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ: { + struct compat_qcedev_unmap_buf_req __user *data32; + struct qcedev_unmap_buf_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EINVAL; + + err = compat_xfer_qcedev_unmap_buf_req(data32, data, true); + if (err) + return err; + + ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data); + err = compat_xfer_qcedev_unmap_buf_req(data32, data, false); + return ret ? ret : err; + + break; + } + default: + return -ENOIOCTLCMD; + } + return 0; +} diff --git a/crypto-qti/compat_qcedev.h b/crypto-qti/compat_qcedev.h new file mode 100644 index 0000000000..9d1cab8e39 --- /dev/null +++ b/crypto-qti/compat_qcedev.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014,2017-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_COMPAT_QCEDEV__H +#define _UAPI_COMPAT_QCEDEV__H + +#include +#include + +#if IS_ENABLED(CONFIG_COMPAT) +#include + +/** + * struct compat_buf_info - Buffer information + * @offset: Offset from the base address of the buffer + * (Used when buffer is allocated using PMEM) + * @vaddr: Virtual buffer address pointer + * @len: Size of the buffer + */ +struct compat_buf_info { + union { + compat_ulong_t offset; + compat_uptr_t vaddr; + }; + compat_ulong_t len; +}; + +/** + * struct compat_qcedev_vbuf_info - Source and destination Buffer information + * @src: Array of buf_info for input/source + * @dst: Array of buf_info for output/destination + */ +struct compat_qcedev_vbuf_info { + struct compat_buf_info src[QCEDEV_MAX_BUFFERS]; + struct compat_buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct compat_qcedev_pmem_info - Stores PMEM buffer information + * @fd_src: Handle to /dev/adsp_pmem used to allocate + * memory for input/src buffer + * @src: Array of buf_info for input/source + * @fd_dst: Handle to /dev/adsp_pmem used to allocate + * memory for output/dst buffer + * @dst: Array of buf_info for output/destination + * @pmem_src_offset: The offset from input/src buffer + * (allocated by PMEM) + */ +struct compat_qcedev_pmem_info { + compat_int_t fd_src; + struct compat_buf_info src[QCEDEV_MAX_BUFFERS]; + compat_int_t fd_dst; + struct compat_buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct compat_qcedev_cipher_op_req - Holds the ciphering request information + * @use_pmem (IN): Flag to indicate if buffer source is PMEM + * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM + * @pmem (IN): Stores PMEM buffer information. + * Refer struct qcedev_pmem_info + * @vbuf (IN/OUT): Stores Source and destination Buffer information + * Refer to struct qcedev_vbuf_info + * @data_len (IN): Total Length of input/src and output/dst in bytes + * @in_place_op (IN): Indicates whether the operation is inplace where + * source == destination + * When using PMEM allocated memory, must set this to 1 + * @enckey (IN): 128 bits of confidentiality key + * enckey[0] bit 127-120, enckey[1] bit 119-112,.. + * enckey[15] bit 7-0 + * @encklen (IN): Length of the encryption key(set to 128 bits/16 + * bytes in the driver) + * @iv (IN/OUT): Initialization vector data + * This is updated by the driver, incremented by + * number of blocks encrypted/decrypted. + * @ivlen (IN): Length of the IV + * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set + * for AES-128 CTR mode only) + * @alg (IN): Type of ciphering algorithm: AES/DES/3DES + * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR + * Applicable when using AES algorithm only + * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or + * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY + * + * If use_pmem is set to 0, the driver assumes that memory was not allocated + * via PMEM, and kernel will need to allocate memory and copy data from user + * space buffer (data_src/dta_dst) and process accordingly and copy data back + * to the user space buffer + * + * If use_pmem is set to 1, the driver assumes that memory was allocated via + * PMEM. + * The kernel driver will use the fd_src to determine the kernel virtual address + * base that maps to the user space virtual address base for the buffer + * allocated in user space. + * The final input/src and output/dst buffer pointer will be determined + * by adding the offsets to the kernel virtual addr. + * + * If use of hardware key is supported in the target, user can configure the + * key parameters (encklen, enckey) to use the hardware key. + * In order to use the hardware key, set encklen to 0 and set the enckey + * data array to 0. + */ +struct compat_qcedev_cipher_op_req { + uint8_t use_pmem; + union { + struct compat_qcedev_pmem_info pmem; + struct compat_qcedev_vbuf_info vbuf; + }; + compat_ulong_t entries; + compat_ulong_t data_len; + uint8_t in_place_op; + uint8_t enckey[QCEDEV_MAX_KEY_SIZE]; + compat_ulong_t encklen; + uint8_t iv[QCEDEV_MAX_IV_SIZE]; + compat_ulong_t ivlen; + compat_ulong_t byteoffset; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_oper_enum op; +}; + +/** + * struct qcedev_sha_op_req - Holds the hashing request information + * @data (IN): Array of pointers to the data to be hashed + * @entries (IN): Number of buf_info entries in the data array + * @data_len (IN): Length of data to be hashed + * @digest (IN/OUT): Returns the hashed data information + * @diglen (OUT): Size of the hashed/digest data + * @authkey (IN): Pointer to authentication key for HMAC + * @authklen (IN): Size of the authentication key + * @alg (IN): Secure Hash algorithm + */ +struct compat_qcedev_sha_op_req { + struct compat_buf_info data[QCEDEV_MAX_BUFFERS]; + compat_ulong_t entries; + compat_ulong_t data_len; + uint8_t digest[QCEDEV_MAX_SHA_DIGEST]; + compat_ulong_t diglen; + compat_uptr_t authkey; + compat_ulong_t authklen; + enum qcedev_sha_alg_enum alg; +}; + +/** + * struct compact_qcedev_map_buf_req - Holds the mapping request information + * fd (IN): Array of fds. + * num_fds (IN): Number of fds in fd[]. + * fd_size (IN): Array of sizes corresponding to each fd in fd[]. + * fd_offset (IN): Array of offset corresponding to each fd in fd[]. + * vaddr (OUT): Array of mapped virtual address corresponding to + * each fd in fd[]. + */ +struct compat_qcedev_map_buf_req { + compat_long_t fd[QCEDEV_MAX_BUFFERS]; + compat_ulong_t num_fds; + compat_ulong_t fd_size[QCEDEV_MAX_BUFFERS]; + compat_ulong_t fd_offset[QCEDEV_MAX_BUFFERS]; + compat_u64 buf_vaddr[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct compat_qcedev_unmap_buf_req - Holds the hashing request information + * fd (IN): Array of fds to unmap + * num_fds (IN): Number of fds in fd[]. + */ +struct compat_qcedev_unmap_buf_req { + compat_long_t fd[QCEDEV_MAX_BUFFERS]; + compat_ulong_t num_fds; +}; + +struct file; +long qcedev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +long compat_qcedev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +#define COMPAT_QCEDEV_IOCTL_ENC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req) +#define COMPAT_QCEDEV_IOCTL_DEC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req) +#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req) +#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req) +#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req) +#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req) +#define COMPAT_QCEDEV_IOCTL_LOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 7) +#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 8) +#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req) +#define COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 10, struct compat_qcedev_map_buf_req) +#define COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 11, struct compat_qcedev_unmap_buf_req) +#endif /* CONFIG_COMPAT */ +#endif /* _UAPI_COMPAT_QCEDEV__H */ diff --git a/crypto-qti/linux/fips_status.h b/crypto-qti/linux/fips_status.h new file mode 100644 index 0000000000..559a229d6b --- /dev/null +++ b/crypto-qti/linux/fips_status.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _FIPS_STATUS__H +#define _FIPS_STATUS__H + +#include +#include + +/** + * fips_status: global FIPS140-2 status + * @FIPS140_STATUS_NA: + * Not a FIPS140-2 compliant Build. + * The flag status won't + * change throughout + * the lifetime + * @FIPS140_STATUS_PASS_CRYPTO: + * KAT self tests are passed. + * @FIPS140_STATUS_QCRYPTO_ALLOWED: + * Integrity test is passed. + * @FIPS140_STATUS_PASS: + * All tests are passed and build + * is in FIPS140-2 mode + * @FIPS140_STATUS_FAIL: + * One of the test is failed. + * This will block all requests + * to crypto modules + */ +enum fips_status { + FIPS140_STATUS_NA = 0, + FIPS140_STATUS_PASS_CRYPTO = 1, + FIPS140_STATUS_QCRYPTO_ALLOWED = 2, + FIPS140_STATUS_PASS = 3, + FIPS140_STATUS_FAIL = 0xFF +}; +#endif /* _FIPS_STATUS__H */ diff --git a/crypto-qti/linux/platform_data/qcom_crypto_device.h b/crypto-qti/linux/platform_data/qcom_crypto_device.h new file mode 100644 index 0000000000..819df7c5e5 --- /dev/null +++ b/crypto-qti/linux/platform_data/qcom_crypto_device.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __QCOM_CRYPTO_DEVICE__H +#define __QCOM_CRYPTO_DEVICE__H + +#include + +struct msm_ce_hw_support { + uint32_t ce_shared; + uint32_t shared_ce_resource; + uint32_t hw_key_support; + uint32_t sha_hmac; +}; + +#endif /* __QCOM_CRYPTO_DEVICE__H */ diff --git a/crypto-qti/linux/qcedev.h b/crypto-qti/linux/qcedev.h new file mode 100644 index 0000000000..6968e92c4b --- /dev/null +++ b/crypto-qti/linux/qcedev.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _QCEDEV__H +#define _QCEDEV__H + +#include +#include +#include "fips_status.h" + +#define QCEDEV_MAX_SHA_BLOCK_SIZE 64 +#define QCEDEV_MAX_BEARER 31 +#define QCEDEV_MAX_KEY_SIZE 64 +#define QCEDEV_MAX_IV_SIZE 32 + +#define QCEDEV_MAX_BUFFERS 16 +#define QCEDEV_MAX_SHA_DIGEST 32 + +#define QCEDEV_USE_PMEM 1 +#define QCEDEV_NO_PMEM 0 + +#define QCEDEV_AES_KEY_128 16 +#define QCEDEV_AES_KEY_192 24 +#define QCEDEV_AES_KEY_256 32 +/** + *qcedev_oper_enum: Operation types + * @QCEDEV_OPER_ENC: Encrypt + * @QCEDEV_OPER_DEC: Decrypt + * @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by + * user. Key already set by an external processor. + * @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by + * user. Key already set by an external processor. + */ +enum qcedev_oper_enum { + QCEDEV_OPER_DEC = 0, + QCEDEV_OPER_ENC = 1, + QCEDEV_OPER_DEC_NO_KEY = 2, + QCEDEV_OPER_ENC_NO_KEY = 3, + QCEDEV_OPER_LAST +}; + +/** + *qcedev_oper_enum: Cipher algorithm types + * @QCEDEV_ALG_DES: DES + * @QCEDEV_ALG_3DES: 3DES + * @QCEDEV_ALG_AES: AES + */ +enum qcedev_cipher_alg_enum { + QCEDEV_ALG_DES = 0, + QCEDEV_ALG_3DES = 1, + QCEDEV_ALG_AES = 2, + QCEDEV_ALG_LAST +}; + +/** + *qcedev_cipher_mode_enum : AES mode + * @QCEDEV_AES_MODE_CBC: CBC + * @QCEDEV_AES_MODE_ECB: ECB + * @QCEDEV_AES_MODE_CTR: CTR + * @QCEDEV_AES_MODE_XTS: XTS + * @QCEDEV_AES_MODE_CCM: CCM + * @QCEDEV_DES_MODE_CBC: CBC + * @QCEDEV_DES_MODE_ECB: ECB + */ +enum qcedev_cipher_mode_enum { + QCEDEV_AES_MODE_CBC = 0, + QCEDEV_AES_MODE_ECB = 1, + QCEDEV_AES_MODE_CTR = 2, + QCEDEV_AES_MODE_XTS = 3, + QCEDEV_AES_MODE_CCM = 4, + QCEDEV_DES_MODE_CBC = 5, + QCEDEV_DES_MODE_ECB = 6, + QCEDEV_AES_DES_MODE_LAST +}; + +/** + *enum qcedev_sha_alg_enum : Secure Hashing Algorithm + * @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits) + * @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit) + * @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits) + * @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit) + * @QCEDEV_ALG_AES_CMAC: Configurable MAC size + */ +enum qcedev_sha_alg_enum { + QCEDEV_ALG_SHA1 = 0, + QCEDEV_ALG_SHA256 = 1, + QCEDEV_ALG_SHA1_HMAC = 2, + QCEDEV_ALG_SHA256_HMAC = 3, + QCEDEV_ALG_AES_CMAC = 4, + QCEDEV_ALG_SHA_ALG_LAST +}; + +/** + * struct buf_info - Buffer information + * @offset: Offset from the base address of the buffer + * (Used when buffer is allocated using PMEM) + * @vaddr: Virtual buffer address pointer + * @len: Size of the buffer + */ +struct buf_info { + union { + __u32 offset; + __u8 *vaddr; + }; + __u32 len; +}; + +/** + * struct qcedev_vbuf_info - Source and destination Buffer information + * @src: Array of buf_info for input/source + * @dst: Array of buf_info for output/destination + */ +struct qcedev_vbuf_info { + struct buf_info src[QCEDEV_MAX_BUFFERS]; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_pmem_info - Stores PMEM buffer information + * @fd_src: Handle to /dev/adsp_pmem used to allocate + * memory for input/src buffer + * @src: Array of buf_info for input/source + * @fd_dst: Handle to /dev/adsp_pmem used to allocate + * memory for output/dst buffer + * @dst: Array of buf_info for output/destination + * @pmem_src_offset: The offset from input/src buffer + * (allocated by PMEM) + */ +struct qcedev_pmem_info { + int fd_src; + struct buf_info src[QCEDEV_MAX_BUFFERS]; + int fd_dst; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_cipher_op_req - Holds the ciphering request information + * @use_pmem (IN): Flag to indicate if buffer source is PMEM + * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM + * @pmem (IN): Stores PMEM buffer information. + * Refer struct qcedev_pmem_info + * @vbuf (IN/OUT): Stores Source and destination Buffer information + * Refer to struct qcedev_vbuf_info + * @data_len (IN): Total Length of input/src and output/dst in bytes + * @in_place_op (IN): Indicates whether the operation is inplace where + * source == destination + * When using PMEM allocated memory, must set this to 1 + * @enckey (IN): 128 bits of confidentiality key + * enckey[0] bit 127-120, enckey[1] bit 119-112,.. + * enckey[15] bit 7-0 + * @encklen (IN): Length of the encryption key(set to 128 bits/16 + * bytes in the driver) + * @iv (IN/OUT): Initialisation vector data + * This is updated by the driver, incremented by + * number of blocks encrypted/decrypted. + * @ivlen (IN): Length of the IV + * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set + * for AES-128 CTR mode only) + * @alg (IN): Type of ciphering algorithm: AES/DES/3DES + * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR + * Apllicabel when using AES algorithm only + * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or + * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY + * + *If use_pmem is set to 0, the driver assumes that memory was not allocated + * via PMEM, and kernel will need to allocate memory and copy data from user + * space buffer (data_src/dta_dst) and process accordingly and copy data back + * to the user space buffer + * + * If use_pmem is set to 1, the driver assumes that memory was allocated via + * PMEM. + * The kernel driver will use the fd_src to determine the kernel virtual address + * base that maps to the user space virtual address base for the buffer + * allocated in user space. + * The final input/src and output/dst buffer pointer will be determined + * by adding the offsets to the kernel virtual addr. + * + * If use of hardware key is supported in the target, user can configure the + * key parameters (encklen, enckey) to use the hardware key. + * In order to use the hardware key, set encklen to 0 and set the enckey + * data array to 0. + */ +struct qcedev_cipher_op_req { + __u8 use_pmem; + union { + struct qcedev_pmem_info pmem; + struct qcedev_vbuf_info vbuf; + }; + __u32 entries; + __u32 data_len; + __u8 in_place_op; + __u8 enckey[QCEDEV_MAX_KEY_SIZE]; + __u32 encklen; + __u8 iv[QCEDEV_MAX_IV_SIZE]; + __u32 ivlen; + __u32 byteoffset; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_oper_enum op; +}; + +/** + * struct qcedev_sha_op_req - Holds the hashing request information + * @data (IN): Array of pointers to the data to be hashed + * @entries (IN): Number of buf_info entries in the data array + * @data_len (IN): Length of data to be hashed + * @digest (IN/OUT): Returns the hashed data information + * @diglen (OUT): Size of the hashed/digest data + * @authkey (IN): Pointer to authentication key for HMAC + * @authklen (IN): Size of the authentication key + * @alg (IN): Secure Hash algorithm + */ +struct qcedev_sha_op_req { + struct buf_info data[QCEDEV_MAX_BUFFERS]; + __u32 entries; + __u32 data_len; + __u8 digest[QCEDEV_MAX_SHA_DIGEST]; + __u32 diglen; + __u8 *authkey; + __u32 authklen; + enum qcedev_sha_alg_enum alg; +}; + +/** + * struct qfips_verify_t - Holds data for FIPS Integrity test + * @kernel_size (IN): Size of kernel Image + * @kernel (IN): pointer to buffer containing the kernel Image + */ +struct qfips_verify_t { + unsigned int kernel_size; + void *kernel; +}; + +/** + * struct qcedev_map_buf_req - Holds the mapping request information + * fd (IN): Array of fds. + * num_fds (IN): Number of fds in fd[]. + * fd_size (IN): Array of sizes corresponding to each fd in fd[]. + * fd_offset (IN): Array of offset corresponding to each fd in fd[]. + * vaddr (OUT): Array of mapped virtual address corresponding to + * each fd in fd[]. + */ +struct qcedev_map_buf_req { + __s32 fd[QCEDEV_MAX_BUFFERS]; + __u32 num_fds; + __u32 fd_size[QCEDEV_MAX_BUFFERS]; + __u32 fd_offset[QCEDEV_MAX_BUFFERS]; + __u64 buf_vaddr[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_unmap_buf_req - Holds the hashing request information + * fd (IN): Array of fds to unmap + * num_fds (IN): Number of fds in fd[]. + */ +struct qcedev_unmap_buf_req { + __s32 fd[QCEDEV_MAX_BUFFERS]; + __u32 num_fds; +}; + +struct file; + +#define QCEDEV_IOC_MAGIC 0x87 + +#define QCEDEV_IOCTL_ENC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_DEC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_SHA_INIT_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_UPDATE_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_FINAL_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_GET_SHA_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_LOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 7) +#define QCEDEV_IOCTL_UNLOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 8) +#define QCEDEV_IOCTL_GET_CMAC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_MAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req) +#define QCEDEV_IOCTL_UNMAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req) +#endif /* _QCEDEV__H */ diff --git a/crypto-qti/linux/qcota.h b/crypto-qti/linux/qcota.h new file mode 100644 index 0000000000..1a1682e5e5 --- /dev/null +++ b/crypto-qti/linux/qcota.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _UAPI_QCOTA_H +#define _UAPI_QCOTA_H + +#include +#include + +#define QCE_OTA_MAX_BEARER 31 +#define OTA_KEY_SIZE 16 /* 128 bits of keys. */ + +enum qce_ota_dir_enum { + QCE_OTA_DIR_UPLINK = 0, + QCE_OTA_DIR_DOWNLINK = 1, + QCE_OTA_DIR_LAST +}; + +enum qce_ota_algo_enum { + QCE_OTA_ALGO_KASUMI = 0, + QCE_OTA_ALGO_SNOW3G = 1, + QCE_OTA_ALGO_LAST +}; + +/** + * struct qce_f8_req - qce f8 request + * @data_in: packets input data stream to be ciphered. + * If NULL, streaming mode operation. + * @data_out: ciphered packets output data. + * @data_len: length of data_in and data_out in bytes. + * @count_c: count-C, ciphering sequence number, 32 bit + * @bearer: 5 bit of radio bearer identifier. + * @ckey: 128 bits of confidentiality key, + * ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0. + * @direction: uplink or donwlink. + * @algorithm: Kasumi, or Snow3G. + * + * If data_in is NULL, the engine will run in a special mode called + * key stream mode. In this special mode, the engine will generate + * key stream output for the number of bytes specified in the + * data_len, based on the input parameters of direction, algorithm, + * ckey, bearer, and count_c. The data_len is restricted to + * the length of multiple of 16 bytes. Application can then take the + * output stream, do a exclusive or to the input data stream, and + * generate the final cipher data stream. + */ +struct qce_f8_req { + __u8 *data_in; + __u8 *data_out; + __u16 data_len; + __u32 count_c; + __u8 bearer; + __u8 ckey[OTA_KEY_SIZE]; + enum qce_ota_dir_enum direction; + enum qce_ota_algo_enum algorithm; +}; + +/** + * struct qce_f8_multi_pkt_req - qce f8 multiple packet request + * Muliptle packets with uniform size, and + * F8 ciphering parameters can be ciphered in a + * single request. + * + * @num_pkt: number of packets. + * + * @cipher_start: ciphering starts offset within a packet. + * + * @cipher_size: number of bytes to be ciphered within a packet. + * + * @qce_f8_req: description of the packet and F8 parameters. + * The following fields have special meaning for + * multiple packet operation, + * + * @data_len: data_len indicates the length of a packet. + * + * @data_in: packets are concatenated together in a byte + * stream started at data_in. + * + * @data_out: The returned ciphered output for multiple + * packets. + * Each packet ciphered output are concatenated + * together into a byte stream started at data_out. + * Note, each ciphered packet output area from + * offset 0 to cipher_start-1, and from offset + * cipher_size to data_len -1 are remained + * unaltered from packet input area. + * @count_c: count-C of the first packet, 32 bit. + * + * + * In one request, multiple packets can be ciphered, and output to the + * data_out stream. + * + * Packet data are laid out contiguously in sequence in data_in, + * and data_out area. Every packet is identical size. + * If the PDU is not byte aligned, set the data_len value of + * to the rounded up value of the packet size. Eg, PDU size of + * 253 bits, set the packet size to 32 bytes. Next packet starts on + * the next byte boundary. + * + * For each packet, data from offset 0 to cipher_start + * will be left unchanged and output to the data_out area. + * This area of the packet can be for the RLC header, which is not + * to be ciphered. + * + * The ciphering of a packet starts from offset cipher_start, for + * cipher_size bytes of data. Data starting from + * offset cipher_start + cipher_size to the end of packet will be left + * unchanged and output to the dataOut area. + * + * For each packet the input arguments of bearer, direction, + * ckey, algorithm have to be the same. count_c is the ciphering sequence + * number of the first packet. The 2nd packet's ciphering sequence + * number is assumed to be count_c + 1. The 3rd packet's ciphering sequence + * number is count_c + 2..... + * + */ +struct qce_f8_multi_pkt_req { + __u16 num_pkt; + __u16 cipher_start; + __u16 cipher_size; + struct qce_f8_req qce_f8_req; +}; + +/** + * struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request + * Muliptle packets with variable size, and + * F8 ciphering parameters can be ciphered in a + * single request. + * + * @num_pkt: number of packets. + * + * @cipher_iov[]: array of iov of packets to be ciphered. + * + * + * @qce_f8_req: description of the packet and F8 parameters. + * The following fields have special meaning for + * multiple packet operation, + * + * @data_len: ignored. + * + * @data_in: ignored. + * + * @data_out: ignored. + * + * @count_c: count-C of the first packet, 32 bit. + * + * + * In one request, multiple packets can be ciphered. + * + * The i-th packet are defined in cipher_iov[i-1]. + * The ciphering of i-th packet starts from offset 0 of the PDU specified + * by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data. + * If the PDU is not byte aligned, set the cipher_iov[i-1].size value + * to the rounded up value of the packet size. Eg, PDU size of + * 253 bits, set the packet size to 32 bytes. + * + * Ciphering are done in place. That is, the ciphering + * input and output data are both in cipher_iov[i-1].addr for the i-th + * packet. + * + * For each packet the input arguments of bearer, direction, + * ckey, algorithm have to be the same. count_c is the ciphering sequence + * number of the first packet. The 2nd packet's ciphering sequence + * number is assumed to be count_c + 1. The 3rd packet's ciphering sequence + * number is count_c + 2..... + */ + +#define MAX_NUM_V_MULTI_PKT 20 +struct cipher_iov { + unsigned char *addr; + unsigned short size; +}; + +struct qce_f8_variable_multi_pkt_req { + unsigned short num_pkt; + struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT]; + struct qce_f8_req qce_f8_req; +}; + +/** + * struct qce_f9_req - qce f9 request + * @message: message + * @msize: message size in bytes (include the last partial byte). + * @last_bits: valid bits in the last byte of message. + * @mac_i: 32 bit message authentication code, to be returned. + * @fresh: random 32 bit number, one per user. + * @count_i: 32 bit count-I integrity sequence number. + * @direction: uplink or donwlink. + * @ikey: 128 bits of integrity key, + * ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0. + * @algorithm: Kasumi, or Snow3G. + */ +struct qce_f9_req { + __u8 *message; + __u16 msize; + __u8 last_bits; + __u32 mac_i; + __u32 fresh; + __u32 count_i; + enum qce_ota_dir_enum direction; + __u8 ikey[OTA_KEY_SIZE]; + enum qce_ota_algo_enum algorithm; +}; + +#define QCOTA_IOC_MAGIC 0x85 + +#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req) +#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req) +#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req) +#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\ + struct qce_f8_variable_multi_pkt_req) + +#endif /* _UAPI_QCOTA_H */ diff --git a/crypto-qti/linux/qcrypto.h b/crypto-qti/linux/qcrypto.h new file mode 100644 index 0000000000..4c034a9c1e --- /dev/null +++ b/crypto-qti/linux/qcrypto.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ +#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ + +#include +#include +#include +#include + +#define QCRYPTO_CTX_KEY_MASK 0x000000ff +#define QCRYPTO_CTX_USE_HW_KEY 0x00000001 +#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002 + +#define QCRYPTO_CTX_XTS_MASK 0x0000ff00 +#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100 +#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200 + + +int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev); +int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev); +int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev); + +int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags); +int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags); +int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags); + +int qcrypto_cipher_clear_flag(struct skcipher_request *req, + unsigned int flags); +int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags); +int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags); + +struct crypto_engine_entry { + u32 hw_instance; + u32 ce_device; + int shared; +}; + +int qcrypto_get_num_engines(void); +void qcrypto_get_engine_list(size_t num_engines, + struct crypto_engine_entry *arr); +int qcrypto_cipher_set_device_hw(struct skcipher_request *req, + unsigned int fde_pfe, + unsigned int hw_inst); + + +struct qcrypto_func_set { + int (*cipher_set)(struct skcipher_request *req, + unsigned int fde_pfe, + unsigned int hw_inst); + int (*cipher_flag)(struct skcipher_request *req, unsigned int flags); + int (*get_num_engines)(void); + void (*get_engine_list)(size_t num_engines, + struct crypto_engine_entry *arr); +}; + +#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */ diff --git a/crypto-qti/ota_crypto.c b/crypto-qti/ota_crypto.c new file mode 100644 index 0000000000..bce36e7430 --- /dev/null +++ b/crypto-qti/ota_crypto.c @@ -0,0 +1,994 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI Over the Air (OTA) Crypto driver + * + * Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "linux/qcota.h" +#include "qce.h" +#include "qce_ota.h" + +enum qce_ota_oper_enum { + QCE_OTA_F8_OPER = 0, + QCE_OTA_MPKT_F8_OPER = 1, + QCE_OTA_F9_OPER = 2, + QCE_OTA_VAR_MPKT_F8_OPER = 3, + QCE_OTA_OPER_LAST +}; + +struct ota_dev_control; + +struct ota_async_req { + struct list_head rlist; + struct completion complete; + int err; + enum qce_ota_oper_enum op; + union { + struct qce_f9_req f9_req; + struct qce_f8_req f8_req; + struct qce_f8_multi_pkt_req f8_mp_req; + struct qce_f8_variable_multi_pkt_req f8_v_mp_req; + } req; + unsigned int steps; + struct ota_qce_dev *pqce; +}; + +/* + * Register ourselves as a char device /dev/qcota0 to be able to access the ota + * from userspace. + */ + + +#define QCOTA_DEV "qcota0" + + +struct ota_dev_control { + + /* char device */ + struct cdev cdev; + int minor; + struct list_head ready_commands; + unsigned int magic; + struct list_head qce_dev; + spinlock_t lock; + struct mutex register_lock; + bool registered; + uint32_t total_units; +}; + +struct ota_qce_dev { + struct list_head qlist; + /* qce handle */ + void *qce; + + /* platform device */ + struct platform_device *pdev; + + struct ota_async_req *active_command; + struct tasklet_struct done_tasklet; + struct ota_dev_control *podev; + uint32_t unit; + u64 total_req; + u64 err_req; +}; + +#define OTA_MAGIC 0x4f544143 + +static long qcota_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +static int qcota_open(struct inode *inode, struct file *file); +static int qcota_release(struct inode *inode, struct file *file); +static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq); +static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret); + +static const struct file_operations qcota_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qcota_ioctl, + .open = qcota_open, + .release = qcota_release, +}; + +static struct ota_dev_control qcota_dev = { + .magic = OTA_MAGIC, +}; + +static dev_t qcota_device_no; +static struct class *driver_class; +static struct device *class_dev; + +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 1024 + +struct qcota_stat { + u64 f8_req; + u64 f8_mp_req; + u64 f8_v_mp_req; + u64 f9_req; + u64 f8_op_success; + u64 f8_op_fail; + u64 f8_mp_op_success; + u64 f8_mp_op_fail; + u64 f8_v_mp_op_success; + u64 f8_v_mp_op_fail; + u64 f9_op_success; + u64 f9_op_fail; +}; +static struct qcota_stat _qcota_stat; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +static int _debug_qcota; + +static struct ota_dev_control *qcota_control(void) +{ + + return &qcota_dev; +} + +static int qcota_open(struct inode *inode, struct file *file) +{ + struct ota_dev_control *podev; + + podev = qcota_control(); + if (podev == NULL) { + pr_err("%s: no such device %d\n", __func__, + MINOR(inode->i_rdev)); + return -ENOENT; + } + + file->private_data = podev; + + return 0; +} + +static int qcota_release(struct inode *inode, struct file *file) +{ + struct ota_dev_control *podev; + + podev = file->private_data; + + if (podev != NULL && podev->magic != OTA_MAGIC) { + pr_err("%s: invalid handle %pK\n", + __func__, podev); + } + + file->private_data = NULL; + + return 0; +} + +static bool _next_v_mp_req(struct ota_async_req *areq) +{ + unsigned char *p; + + if (areq->err) + return false; + if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt) + return false; + + p = areq->req.f8_v_mp_req.qce_f8_req.data_in; + p += areq->req.f8_v_mp_req.qce_f8_req.data_len; + p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES); + + areq->req.f8_v_mp_req.qce_f8_req.data_out = p; + areq->req.f8_v_mp_req.qce_f8_req.data_in = p; + areq->req.f8_v_mp_req.qce_f8_req.data_len = + areq->req.f8_v_mp_req.cipher_iov[areq->steps].size; + + areq->req.f8_v_mp_req.qce_f8_req.count_c++; + return true; +} + +static void req_done(unsigned long data) +{ + struct ota_qce_dev *pqce = (struct ota_qce_dev *)data; + struct ota_dev_control *podev = pqce->podev; + struct ota_async_req *areq; + unsigned long flags; + struct ota_async_req *new_req = NULL; + int ret = 0; + bool schedule = true; + + spin_lock_irqsave(&podev->lock, flags); + areq = pqce->active_command; + if (unlikely(areq == NULL)) + pr_err("ota_crypto: %s, no active request\n", __func__); + else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) { + if (_next_v_mp_req(areq)) { + /* execute next subcommand */ + spin_unlock_irqrestore(&podev->lock, flags); + ret = start_req(pqce, areq); + if (unlikely(ret)) { + areq->err = ret; + schedule = true; + spin_lock_irqsave(&podev->lock, flags); + } else { + areq = NULL; + schedule = false; + } + } else { + /* done with this variable mp req */ + schedule = true; + } + } + while (schedule) { + if (!list_empty(&podev->ready_commands)) { + new_req = container_of(podev->ready_commands.next, + struct ota_async_req, rlist); + list_del(&new_req->rlist); + pqce->active_command = new_req; + spin_unlock_irqrestore(&podev->lock, flags); + + if (new_req) { + new_req->err = 0; + /* start a new request */ + ret = start_req(pqce, new_req); + } + if (unlikely(new_req && ret)) { + new_req->err = ret; + complete(&new_req->complete); + ret = 0; + new_req = NULL; + spin_lock_irqsave(&podev->lock, flags); + } else { + schedule = false; + } + } else { + pqce->active_command = NULL; + spin_unlock_irqrestore(&podev->lock, flags); + schedule = false; + } + } + if (areq) + complete(&areq->complete); +} + +static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv, + int ret) +{ + struct ota_async_req *areq = (struct ota_async_req *) cookie; + struct ota_qce_dev *pqce; + + pqce = areq->pqce; + areq->req.f9_req.mac_i = *((uint32_t *)icv); + + if (ret) { + pqce->err_req++; + areq->err = -ENXIO; + } else + areq->err = 0; + + tasklet_schedule(&pqce->done_tasklet); +} + +static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, + int ret) +{ + struct ota_async_req *areq = (struct ota_async_req *) cookie; + struct ota_qce_dev *pqce; + + pqce = areq->pqce; + + if (ret) { + pqce->err_req++; + areq->err = -ENXIO; + } else { + areq->err = 0; + } + + tasklet_schedule(&pqce->done_tasklet); +} + +static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq) +{ + struct qce_f9_req *pf9; + struct qce_f8_multi_pkt_req *p_mp_f8; + struct qce_f8_req *pf8; + int ret = 0; + + /* command should be on the podev->active_command */ + areq->pqce = pqce; + + switch (areq->op) { + case QCE_OTA_F8_OPER: + pf8 = &areq->req.f8_req; + ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb); + break; + case QCE_OTA_MPKT_F8_OPER: + p_mp_f8 = &areq->req.f8_mp_req; + ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb); + break; + + case QCE_OTA_F9_OPER: + pf9 = &areq->req.f9_req; + ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb); + break; + + case QCE_OTA_VAR_MPKT_F8_OPER: + pf8 = &areq->req.f8_v_mp_req.qce_f8_req; + ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb); + break; + + default: + ret = -ENOTSUPP; + break; + } + areq->err = ret; + pqce->total_req++; + if (ret) + pqce->err_req++; + return ret; +} + +static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev) +{ + /* do this function with spinlock set */ + struct ota_qce_dev *p; + + if (unlikely(list_empty(&podev->qce_dev))) { + pr_err("%s: no valid qce to schedule\n", __func__); + return NULL; + } + + list_for_each_entry(p, &podev->qce_dev, qlist) { + if (p->active_command == NULL) + return p; + } + return NULL; +} + +static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev) +{ + unsigned long flags; + int ret = 0; + struct qcota_stat *pstat; + struct ota_qce_dev *pqce; + + areq->err = 0; + + spin_lock_irqsave(&podev->lock, flags); + pqce = schedule_qce(podev); + if (pqce) { + pqce->active_command = areq; + spin_unlock_irqrestore(&podev->lock, flags); + + ret = start_req(pqce, areq); + if (ret != 0) { + spin_lock_irqsave(&podev->lock, flags); + pqce->active_command = NULL; + spin_unlock_irqrestore(&podev->lock, flags); + } + + } else { + list_add_tail(&areq->rlist, &podev->ready_commands); + spin_unlock_irqrestore(&podev->lock, flags); + } + + if (ret == 0) + wait_for_completion(&areq->complete); + + pstat = &_qcota_stat; + switch (areq->op) { + case QCE_OTA_F8_OPER: + if (areq->err) + pstat->f8_op_fail++; + else + pstat->f8_op_success++; + break; + + case QCE_OTA_MPKT_F8_OPER: + + if (areq->err) + pstat->f8_mp_op_fail++; + else + pstat->f8_mp_op_success++; + break; + + case QCE_OTA_F9_OPER: + if (areq->err) + pstat->f9_op_fail++; + else + pstat->f9_op_success++; + break; + case QCE_OTA_VAR_MPKT_F8_OPER: + default: + if (areq->err) + pstat->f8_v_mp_op_fail++; + else + pstat->f8_v_mp_op_success++; + break; + } + + return areq->err; +} + +static long qcota_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct ota_dev_control *podev; + uint8_t *user_src; + uint8_t *user_dst; + uint8_t *k_buf = NULL; + struct ota_async_req areq; + uint32_t total, temp; + struct qcota_stat *pstat; + int i; + uint8_t *p = NULL; + + podev = file->private_data; + if (podev == NULL || podev->magic != OTA_MAGIC) { + pr_err("%s: invalid handle %pK\n", + __func__, podev); + return -ENOENT; + } + + /* Verify user arguments. */ + if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC) + return -ENOTTY; + + init_completion(&areq.complete); + + pstat = &_qcota_stat; + + switch (cmd) { + case QCOTA_F9_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f9_req))) + return -EFAULT; + if (copy_from_user(&areq.req.f9_req, (void __user *)arg, + sizeof(struct qce_f9_req))) + return -EFAULT; + + user_src = areq.req.f9_req.message; + if (!access_ok(VERIFY_READ, (void __user *)user_src, + areq.req.f9_req.msize)) + return -EFAULT; + + if (areq.req.f9_req.msize == 0) + return 0; + + k_buf = memdup_user((const void __user *)user_src, + areq.req.f9_req.msize); + if (IS_ERR(k_buf)) + return -EFAULT; + + areq.req.f9_req.message = k_buf; + areq.op = QCE_OTA_F9_OPER; + + pstat->f9_req++; + err = submit_req(&areq, podev); + + areq.req.f9_req.message = user_src; + if (err == 0 && copy_to_user((void __user *)arg, + &areq.req.f9_req, sizeof(struct qce_f9_req))) { + err = -EFAULT; + } + kfree(k_buf); + break; + + case QCOTA_F8_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f8_req))) + return -EFAULT; + if (copy_from_user(&areq.req.f8_req, (void __user *)arg, + sizeof(struct qce_f8_req))) + return -EFAULT; + total = areq.req.f8_req.data_len; + user_src = areq.req.f8_req.data_in; + if (user_src != NULL) { + if (!access_ok(VERIFY_READ, (void __user *) + user_src, total)) + return -EFAULT; + + } + + user_dst = areq.req.f8_req.data_out; + if (!access_ok(VERIFY_WRITE, (void __user *) + user_dst, total)) + return -EFAULT; + + if (!total) + return 0; + k_buf = kmalloc(total, GFP_KERNEL); + if (k_buf == NULL) + return -ENOMEM; + + /* k_buf returned from kmalloc should be cache line aligned */ + if (user_src && copy_from_user(k_buf, + (void __user *)user_src, total)) { + kfree(k_buf); + return -EFAULT; + } + + if (user_src) + areq.req.f8_req.data_in = k_buf; + else + areq.req.f8_req.data_in = NULL; + areq.req.f8_req.data_out = k_buf; + + areq.op = QCE_OTA_F8_OPER; + + pstat->f8_req++; + err = submit_req(&areq, podev); + + if (err == 0 && copy_to_user(user_dst, k_buf, total)) + err = -EFAULT; + kfree(k_buf); + + break; + + case QCOTA_F8_MPKT_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f8_multi_pkt_req))) + return -EFAULT; + if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg, + sizeof(struct qce_f8_multi_pkt_req))) + return -EFAULT; + temp = areq.req.f8_mp_req.qce_f8_req.data_len; + if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start + + areq.req.f8_mp_req.cipher_size) + return -EINVAL; + total = (uint32_t) areq.req.f8_mp_req.num_pkt * + areq.req.f8_mp_req.qce_f8_req.data_len; + + user_src = areq.req.f8_mp_req.qce_f8_req.data_in; + if (!access_ok(VERIFY_READ, (void __user *) + user_src, total)) + return -EFAULT; + + user_dst = areq.req.f8_mp_req.qce_f8_req.data_out; + if (!access_ok(VERIFY_WRITE, (void __user *) + user_dst, total)) + return -EFAULT; + + if (!total) + return 0; + /* k_buf should be cache line aligned */ + k_buf = memdup_user((const void __user *)user_src, total); + if (IS_ERR(k_buf)) + return -EFAULT; + + areq.req.f8_mp_req.qce_f8_req.data_out = k_buf; + areq.req.f8_mp_req.qce_f8_req.data_in = k_buf; + + areq.op = QCE_OTA_MPKT_F8_OPER; + + pstat->f8_mp_req++; + err = submit_req(&areq, podev); + + if (err == 0 && copy_to_user(user_dst, k_buf, total)) + err = -EFAULT; + kfree(k_buf); + break; + + case QCOTA_F8_V_MPKT_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f8_variable_multi_pkt_req))) + return -EFAULT; + if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg, + sizeof(struct qce_f8_variable_multi_pkt_req))) + return -EFAULT; + + if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT) + return -EINVAL; + + for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) { + if (!access_ok(VERIFY_WRITE, (void __user *) + areq.req.f8_v_mp_req.cipher_iov[i].addr, + areq.req.f8_v_mp_req.cipher_iov[i].size)) + return -EFAULT; + total += areq.req.f8_v_mp_req.cipher_iov[i].size; + total = ALIGN(total, L1_CACHE_BYTES); + } + + if (!total) + return 0; + k_buf = kmalloc(total, GFP_KERNEL); + if (k_buf == NULL) + return -ENOMEM; + + for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) { + user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr; + if (copy_from_user(p, (void __user *)user_src, + areq.req.f8_v_mp_req.cipher_iov[i].size)) { + kfree(k_buf); + return -EFAULT; + } + p += areq.req.f8_v_mp_req.cipher_iov[i].size; + p = (uint8_t *) ALIGN(((uintptr_t)p), + L1_CACHE_BYTES); + } + + areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf; + areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf; + areq.req.f8_v_mp_req.qce_f8_req.data_len = + areq.req.f8_v_mp_req.cipher_iov[0].size; + areq.steps = 0; + areq.op = QCE_OTA_VAR_MPKT_F8_OPER; + + pstat->f8_v_mp_req++; + err = submit_req(&areq, podev); + + if (err != 0) { + kfree(k_buf); + return err; + } + + for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) { + user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr; + if (copy_to_user(user_dst, p, + areq.req.f8_v_mp_req.cipher_iov[i].size)) { + kfree(k_buf); + return -EFAULT; + } + p += areq.req.f8_v_mp_req.cipher_iov[i].size; + p = (uint8_t *) ALIGN(((uintptr_t)p), + L1_CACHE_BYTES); + } + kfree(k_buf); + break; + default: + return -ENOTTY; + } + + return err; +} + +static int qcota_probe(struct platform_device *pdev) +{ + void *handle = NULL; + int rc = 0; + struct ota_dev_control *podev; + struct ce_hw_support ce_support; + struct ota_qce_dev *pqce; + unsigned long flags; + + podev = &qcota_dev; + pqce = kzalloc(sizeof(*pqce), GFP_KERNEL); + if (!pqce) + return -ENOMEM; + + rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV); + if (rc < 0) { + pr_err("alloc_chrdev_region failed %d\n", rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, QCOTA_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("class_create failed %d\n", rc); + goto exit_unreg_chrdev_region; + } + + class_dev = device_create(driver_class, NULL, qcota_device_no, NULL, + QCOTA_DEV); + if (IS_ERR(class_dev)) { + pr_err("class_device_create failed %d\n", rc); + rc = -ENOMEM; + goto exit_destroy_class; + } + + cdev_init(&podev->cdev, &qcota_fops); + podev->cdev.owner = THIS_MODULE; + + rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1); + if (rc < 0) { + pr_err("cdev_add failed %d\n", rc); + goto exit_destroy_device; + } + podev->minor = 0; + + pqce->podev = podev; + pqce->active_command = NULL; + tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce); + + /* open qce */ + handle = qce_open(pdev, &rc); + if (handle == NULL) { + pr_err("%s: device %s, can not open qce\n", + __func__, pdev->name); + goto exit_del_cdev; + } + if (qce_hw_support(handle, &ce_support) < 0 || + !ce_support.ota) { + pr_err("%s: device %s, qce does not support ota capability\n", + __func__, pdev->name); + rc = -ENODEV; + goto err; + } + pqce->qce = handle; + pqce->pdev = pdev; + pqce->total_req = 0; + pqce->err_req = 0; + platform_set_drvdata(pdev, pqce); + + mutex_lock(&podev->register_lock); + rc = 0; + if (!podev->registered) { + if (rc == 0) { + pqce->unit = podev->total_units; + podev->total_units++; + podev->registered = true; + } + } else { + pqce->unit = podev->total_units; + podev->total_units++; + } + mutex_unlock(&podev->register_lock); + if (rc) { + pr_err("ion: failed to register misc device.\n"); + goto err; + } + + spin_lock_irqsave(&podev->lock, flags); + list_add_tail(&pqce->qlist, &podev->qce_dev); + spin_unlock_irqrestore(&podev->lock, flags); + + return 0; +err: + if (handle) + qce_close(handle); + + platform_set_drvdata(pdev, NULL); + tasklet_kill(&pqce->done_tasklet); + +exit_del_cdev: + cdev_del(&podev->cdev); +exit_destroy_device: + device_destroy(driver_class, qcota_device_no); +exit_destroy_class: + class_destroy(driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(qcota_device_no, 1); + + kfree(pqce); + return rc; +} + +static int qcota_remove(struct platform_device *pdev) +{ + struct ota_dev_control *podev; + struct ota_qce_dev *pqce; + unsigned long flags; + + pqce = platform_get_drvdata(pdev); + if (!pqce) + return 0; + if (pqce->qce) + qce_close(pqce->qce); + + podev = pqce->podev; + if (!podev) + goto ret; + + spin_lock_irqsave(&podev->lock, flags); + list_del(&pqce->qlist); + spin_unlock_irqrestore(&podev->lock, flags); + + mutex_lock(&podev->register_lock); + if (--podev->total_units == 0) { + cdev_del(&podev->cdev); + device_destroy(driver_class, qcota_device_no); + class_destroy(driver_class); + unregister_chrdev_region(qcota_device_no, 1); + podev->registered = false; + } + mutex_unlock(&podev->register_lock); +ret: + + tasklet_kill(&pqce->done_tasklet); + kfree(pqce); + return 0; +} + +static const struct of_device_id qcota_match[] = { + { .compatible = "qcom,qcota", + }, + {} +}; + +static struct platform_driver qcota_plat_driver = { + .probe = qcota_probe, + .remove = qcota_remove, + .driver = { + .name = "qcota", + .of_match_table = qcota_match, + }, +}; + +static int _disp_stats(void) +{ + struct qcota_stat *pstat; + int len = 0; + struct ota_dev_control *podev = &qcota_dev; + unsigned long flags; + struct ota_qce_dev *p; + + pstat = &_qcota_stat; + len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQTI OTA crypto accelerator Statistics:\n"); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 request : %llu\n", + pstat->f8_req); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 operation success : %llu\n", + pstat->f8_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 operation fail : %llu\n", + pstat->f8_op_fail); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP request : %llu\n", + pstat->f8_mp_req); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP operation success : %llu\n", + pstat->f8_mp_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP operation fail : %llu\n", + pstat->f8_mp_op_fail); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 Variable MP request : %llu\n", + pstat->f8_v_mp_req); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 Variable MP operation success: %llu\n", + pstat->f8_v_mp_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 Variable MP operation fail : %llu\n", + pstat->f8_v_mp_op_fail); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 request : %llu\n", + pstat->f9_req); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 operation success : %llu\n", + pstat->f9_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 operation fail : %llu\n", + pstat->f9_op_fail); + + spin_lock_irqsave(&podev->lock, flags); + + list_for_each_entry(p, &podev->qce_dev, qlist) { + len += scnprintf( + _debug_read_buf + len, + DEBUG_MAX_RW_BUF - len - 1, + " Engine %4d Req : %llu\n", + p->unit, + p->total_req + ); + len += scnprintf( + _debug_read_buf + len, + DEBUG_MAX_RW_BUF - len - 1, + " Engine %4d Req Error : %llu\n", + p->unit, + p->err_req + ); + } + + spin_unlock_irqrestore(&podev->lock, flags); + + return len; +} + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + int len; + + len = _disp_stats(); + if (len <= count) + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ota_dev_control *podev = &qcota_dev; + unsigned long flags; + struct ota_qce_dev *p; + + memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat)); + + spin_lock_irqsave(&podev->lock, flags); + + list_for_each_entry(p, &podev->qce_dev, qlist) { + p->total_req = 0; + p->err_req = 0; + } + + spin_unlock_irqrestore(&podev->lock, flags); + + return count; +} + +static const struct file_operations _debug_stats_ops = { + .open = simple_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcota_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcota", NULL); + if (IS_ERR(_debug_dent)) { + pr_err("qcota debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + snprintf(name, DEBUG_MAX_FNAME-1, "stats-0"); + _debug_qcota = 0; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcota, &_debug_stats_ops); + if (dent == NULL) { + pr_err("qcota debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int __init qcota_init(void) +{ + int rc; + struct ota_dev_control *podev; + + rc = _qcota_debug_init(); + if (rc) + return rc; + + podev = &qcota_dev; + INIT_LIST_HEAD(&podev->ready_commands); + INIT_LIST_HEAD(&podev->qce_dev); + spin_lock_init(&podev->lock); + mutex_init(&podev->register_lock); + podev->registered = false; + podev->total_units = 0; + + return platform_driver_register(&qcota_plat_driver); +} +static void __exit qcota_exit(void) +{ + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&qcota_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QTI Ota Crypto driver"); + +module_init(qcota_init); +module_exit(qcota_exit); diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h new file mode 100644 index 0000000000..c3d06b8739 --- /dev/null +++ b/crypto-qti/qce.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QTI Crypto Engine driver API + * + * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __CRYPTO_MSM_QCE_H +#define __CRYPTO_MSM_QCE_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* SHA digest size in bytes */ +#define SHA256_DIGESTSIZE 32 +#define SHA1_DIGESTSIZE 20 + +#define AES_CE_BLOCK_SIZE 16 + +/* key size in bytes */ +#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */ +#define SHA_HMAC_KEY_SIZE 64 +#define DES_KEY_SIZE 8 +#define TRIPLE_DES_KEY_SIZE 24 +#define AES128_KEY_SIZE 16 +#define AES192_KEY_SIZE 24 +#define AES256_KEY_SIZE 32 +#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE + +/* iv length in bytes */ +#define AES_IV_LENGTH 16 +#define DES_IV_LENGTH 8 +#define MAX_IV_LENGTH AES_IV_LENGTH + +/* Maximum number of bytes per transfer */ +#define QCE_MAX_OPER_DATA 0xFF00 + +/* Maximum Nonce bytes */ +#define MAX_NONCE 16 + +/* Crypto clock control flags */ +#define QCE_CLK_ENABLE_FIRST 1 +#define QCE_BW_REQUEST_FIRST 2 +#define QCE_CLK_DISABLE_FIRST 3 +#define QCE_BW_REQUEST_RESET_FIRST 4 + +/* interconnect average and peak bw for crypto device */ +#define CRYPTO_AVG_BW 393600 +#define CRYPTO_PEAK_BW 393600 + +typedef void (*qce_comp_func_ptr_t)(void *areq, + unsigned char *icv, unsigned char *iv, int ret); + +/* Cipher algorithms supported */ +enum qce_cipher_alg_enum { + CIPHER_ALG_DES = 0, + CIPHER_ALG_3DES = 1, + CIPHER_ALG_AES = 2, + CIPHER_ALG_LAST +}; + +/* Hash and hmac algorithms supported */ +enum qce_hash_alg_enum { + QCE_HASH_SHA1 = 0, + QCE_HASH_SHA256 = 1, + QCE_HASH_SHA1_HMAC = 2, + QCE_HASH_SHA256_HMAC = 3, + QCE_HASH_AES_CMAC = 4, + QCE_HASH_LAST +}; + +/* Cipher encryption/decryption operations */ +enum qce_cipher_dir_enum { + QCE_ENCRYPT = 0, + QCE_DECRYPT = 1, + QCE_CIPHER_DIR_LAST +}; + +/* Cipher algorithms modes */ +enum qce_cipher_mode_enum { + QCE_MODE_CBC = 0, + QCE_MODE_ECB = 1, + QCE_MODE_CTR = 2, + QCE_MODE_XTS = 3, + QCE_MODE_CCM = 4, + QCE_CIPHER_MODE_LAST +}; + +/* Cipher operation type */ +enum qce_req_op_enum { + QCE_REQ_ABLK_CIPHER = 0, + QCE_REQ_ABLK_CIPHER_NO_KEY = 1, + QCE_REQ_AEAD = 2, + QCE_REQ_LAST +}; + +/* Algorithms/features supported in CE HW engine */ +struct ce_hw_support { + bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/ + bool sha1_hmac; /* supports max HMAC key of 64 bytes*/ + bool sha256_hmac; /* supports max HMAC key of 64 bytes*/ + bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/ + bool cmac; + bool aes_key_192; + bool aes_xts; + bool aes_ccm; + bool ota; + bool aligned_only; + bool bam; + bool is_shared; + bool hw_key; + bool use_sw_aes_cbc_ecb_ctr_algo; + bool use_sw_aead_algo; + bool use_sw_aes_xts_algo; + bool use_sw_ahash_algo; + bool use_sw_hmac_algo; + bool use_sw_aes_ccm_algo; + bool clk_mgmt_sus_res; + bool req_bw_before_clk; + unsigned int ce_device; + unsigned int ce_hw_instance; + unsigned int max_request; +}; + +/* Sha operation parameters */ +struct qce_sha_req { + qce_comp_func_ptr_t qce_cb; /* call back */ + enum qce_hash_alg_enum alg; /* sha algorithm */ + unsigned char *digest; /* sha digest */ + struct scatterlist *src; /* pointer to scatter list entry */ + uint32_t auth_data[4]; /* byte count */ + unsigned char *authkey; /* auth key */ + unsigned int authklen; /* auth key length */ + bool first_blk; /* first block indicator */ + bool last_blk; /* last block indicator */ + unsigned int size; /* data length in bytes */ + void *areq; + unsigned int flags; +}; + +struct qce_req { + enum qce_req_op_enum op; /* operation type */ + qce_comp_func_ptr_t qce_cb; /* call back */ + void *areq; + enum qce_cipher_alg_enum alg; /* cipher algorithms*/ + enum qce_cipher_dir_enum dir; /* encryption? decryption? */ + enum qce_cipher_mode_enum mode; /* algorithm mode */ + enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */ + unsigned char *authkey; /* authentication key */ + unsigned int authklen; /* authentication key kength */ + unsigned int authsize; /* authentication key kength */ + unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */ + unsigned char *assoc; /* Ptr to formatted associated data */ + unsigned int assoclen; /* Formatted associated data length */ + struct scatterlist *asg; /* Formatted associated data sg */ + unsigned char *enckey; /* cipher key */ + unsigned int encklen; /* cipher key length */ + unsigned char *iv; /* initialization vector */ + unsigned int ivsize; /* initialization vector size*/ + unsigned int cryptlen; /* data length */ + unsigned int use_pmem; /* is source of data PMEM allocated? */ + struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/ + unsigned int flags; +}; + +struct qce_pm_table { + int (*suspend)(void *handle); + int (*resume)(void *handle); +}; + +extern struct qce_pm_table qce_pm_table; + +void *qce_open(struct platform_device *pdev, int *rc); +int qce_close(void *handle); +int qce_aead_req(void *handle, struct qce_req *req); +int qce_ablk_cipher_req(void *handle, struct qce_req *req); +int qce_hw_support(void *handle, struct ce_hw_support *support); +int qce_process_sha_req(void *handle, struct qce_sha_req *s_req); +int qce_enable_clk(void *handle); +int qce_disable_clk(void *handle); +void qce_get_driver_stats(void *handle); +void qce_clear_driver_stats(void *handle); +void qce_dump_req(void *handle); + +#endif /* __CRYPTO_MSM_QCE_H */ diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c new file mode 100644 index 0000000000..6d8df8489e --- /dev/null +++ b/crypto-qti/qce50.c @@ -0,0 +1,6198 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI Crypto Engine driver. + * + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/qcrypto.h" +#include +#include +#include +#include +#include + +#include "qce.h" +#include "qce50.h" +#include "qcryptohw_50.h" +#include "qce_ota.h" + +#define CRYPTO_SMMU_IOVA_START 0x10000000 +#define CRYPTO_SMMU_IOVA_SIZE 0x40000000 + +#define CRYPTO_CONFIG_RESET 0xE01EF +#define MAX_SPS_DESC_FIFO_SIZE 0xfff0 +#define QCE_MAX_NUM_DSCR 0x200 +#define QCE_SECTOR_SIZE 0x200 +#define CE_CLK_100MHZ 100000000 +#define CE_CLK_DIV 1000000 + +#define CRYPTO_CORE_MAJOR_VER_NUM 0x05 +#define CRYPTO_CORE_MINOR_VER_NUM 0x03 +#define CRYPTO_CORE_STEP_VER_NUM 0x1 + +#define CRYPTO_REQ_USER_PAT 0xdead0000 + +static DEFINE_MUTEX(bam_register_lock); +static DEFINE_MUTEX(qce_iomap_mutex); + +struct bam_registration_info { + struct list_head qlist; + unsigned long handle; + uint32_t cnt; + uint32_t bam_mem; + void __iomem *bam_iobase; + bool support_cmd_dscr; +}; +static LIST_HEAD(qce50_bam_list); + +/* Used to determine the mode */ +#define MAX_BUNCH_MODE_REQ 2 +/* Max number of request supported */ +#define MAX_QCE_BAM_REQ 8 +/* Interrupt flag will be set for every SET_INTR_AT_REQ request */ +#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2) +/* To create extra request space to hold dummy request */ +#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1) +/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */ +#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ +/* QCE driver modes */ +#define IN_INTERRUPT_MODE 0 +#define IN_BUNCH_MODE 1 +/* Dummy request data length */ +#define DUMMY_REQ_DATA_LEN 64 +/* Delay timer to expire when in bunch mode */ +#define DELAY_IN_JIFFIES 5 +/* Index to point the dummy request */ +#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ + +#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec)) + +enum qce_owner { + QCE_OWNER_NONE = 0, + QCE_OWNER_CLIENT = 1, + QCE_OWNER_TIMEOUT = 2 +}; + +struct dummy_request { + struct qce_sha_req sreq; + struct scatterlist sg; + struct ahash_request areq; +}; + +/* + * CE HW device structure. + * Each engine has an instance of the structure. + * Each engine can only handle one crypto operation at one time. It is up to + * the sw above to ensure single threading of operation on an engine. + */ +struct qce_device { + struct device *pdev; /* Handle to platform_device structure */ + struct bam_registration_info *pbam; + + unsigned char *coh_vmem; /* Allocated coherent virtual memory */ + dma_addr_t coh_pmem; /* Allocated coherent physical memory */ + int memsize; /* Memory allocated */ + unsigned char *iovec_vmem; /* Allocate iovec virtual memory */ + int iovec_memsize; /* Memory allocated */ + uint32_t bam_mem; /* bam physical address, from DT */ + uint32_t bam_mem_size; /* bam io size, from DT */ + int is_shared; /* CE HW is shared */ + bool support_cmd_dscr; + bool support_hw_key; + bool support_clk_mgmt_sus_res; + bool support_only_core_src_clk; + bool request_bw_before_clk; + + void __iomem *iobase; /* Virtual io base of CE HW */ + unsigned int phy_iobase; /* Physical io base of CE HW */ + + struct clk *ce_core_src_clk; /* Handle to CE src clk*/ + struct clk *ce_core_clk; /* Handle to CE clk */ + struct clk *ce_clk; /* Handle to CE clk */ + struct clk *ce_bus_clk; /* Handle to CE AXI clk*/ + bool no_get_around; + bool no_ccm_mac_status_get_around; + unsigned int ce_opp_freq_hz; + bool use_sw_aes_cbc_ecb_ctr_algo; + bool use_sw_aead_algo; + bool use_sw_aes_xts_algo; + bool use_sw_ahash_algo; + bool use_sw_hmac_algo; + bool use_sw_aes_ccm_algo; + uint32_t engines_avail; + struct qce_ce_cfg_reg_setting reg; + struct ce_bam_info ce_bam_info; + struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ]; + unsigned int ce_request_index; + enum qce_owner owner; + atomic_t no_of_queued_req; + struct timer_list timer; + struct dummy_request dummyreq; + unsigned int mode; + unsigned int intr_cadence; + unsigned int dev_no; + struct qce_driver_stats qce_stats; + atomic_t bunch_cmd_seq; + atomic_t last_intr_seq; + bool cadence_flag; + uint8_t *dummyreq_in_buf; + struct dma_iommu_mapping *smmu_mapping; + bool enable_s1_smmu; + bool no_clock_support; +}; + +static void print_notify_debug(struct sps_event_notify *notify); +static void _sps_producer_callback(struct sps_event_notify *notify); +static int qce_dummy_req(struct qce_device *pce_dev); + +static int _qce50_disp_stats; + +/* Standard initialization vector for SHA-1, source: FIPS 180-2 */ +static uint32_t _std_init_vector_sha1[] = { + 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 +}; + +/* Standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint32_t _std_init_vector_sha256[] = { + 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 +}; + +static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned int n; + + n = len / sizeof(uint32_t); + for (; n > 0; n--) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) | + (*(b+3) & 0xff); + b += sizeof(uint32_t); + iv++; + } + + n = len % sizeof(uint32_t); + if (n == 3) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00); + } else if (n == 2) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000); + } else if (n == 1) { + *iv = ((*b << 24) & 0xff000000); + } +} + +static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned int i, j; + unsigned char swap_iv[AES_IV_LENGTH]; + + memset(swap_iv, 0, AES_IV_LENGTH); + for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--) + swap_iv[i] = b[j]; + _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH); +} + +static int count_sg(struct scatterlist *sg, int nbytes) +{ + int i; + + for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) + nbytes -= sg->length; + return i; +} + +static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + int i; + + for (i = 0; i < nents; ++i) { + dma_map_sg(dev, sg, 1, direction); + sg = sg_next(sg); + } + + return nents; +} + +static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction) +{ + int i; + + for (i = 0; i < nents; ++i) { + dma_unmap_sg(dev, sg, 1, direction); + sg = sg_next(sg); + } + + return nents; +} + +static int _probe_ce_engine(struct qce_device *pce_dev) +{ + unsigned int rev; + unsigned int maj_rev, min_rev, step_rev; + + rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG); + /* + * Ensure previous instructions (setting the GO register) + * was completed before checking the version. + */ + mb(); + maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV; + min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV; + step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV; + + if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) { + pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n", + pce_dev->phy_iobase, maj_rev, min_rev, step_rev); + return -EIO; + } + + /* + * The majority of crypto HW bugs have been fixed in 5.3.0 and + * above. That allows a single sps transfer of consumer + * pipe, and a single sps transfer of producer pipe + * for a crypto request. no_get_around flag indicates this. + * + * In 5.3.1, the CCM MAC_FAILED in result dump issue is + * fixed. no_ccm_mac_status_get_around flag indicates this. + */ + pce_dev->no_get_around = (min_rev >= + CRYPTO_CORE_MINOR_VER_NUM) ? true : false; + if (min_rev > CRYPTO_CORE_MINOR_VER_NUM) + pce_dev->no_ccm_mac_status_get_around = true; + else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) && + (step_rev >= CRYPTO_CORE_STEP_VER_NUM)) + pce_dev->no_ccm_mac_status_get_around = true; + else + pce_dev->no_ccm_mac_status_get_around = false; + + pce_dev->ce_bam_info.minor_version = min_rev; + + pce_dev->engines_avail = readl_relaxed(pce_dev->iobase + + CRYPTO_ENGINES_AVAIL); + dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n", + maj_rev, min_rev, step_rev, pce_dev->phy_iobase); + + pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE; + + dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n", + pce_dev->ce_bam_info.ce_device, pce_dev->iobase, + pce_dev->ce_bam_info.dest_pipe_index, + pce_dev->ce_bam_info.src_pipe_index, + pce_dev->ce_bam_info.bam_iobase, + pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail); + return 0; +}; + +static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo( + struct qce_device *pce_dev, + int req_info, struct qce_sha_req *sreq) +{ + struct ce_sps_data *pce_sps_data; + struct qce_cmdlistptr_ops *cmdlistptr; + + pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; + cmdlistptr = &pce_sps_data->cmdlistptr; + switch (sreq->alg) { + case QCE_HASH_SHA1: + return &cmdlistptr->auth_sha1; + case QCE_HASH_SHA256: + return &cmdlistptr->auth_sha256; + case QCE_HASH_SHA1_HMAC: + return &cmdlistptr->auth_sha1_hmac; + case QCE_HASH_SHA256_HMAC: + return &cmdlistptr->auth_sha256_hmac; + case QCE_HASH_AES_CMAC: + if (sreq->authklen == AES128_KEY_SIZE) + return &cmdlistptr->auth_aes_128_cmac; + return &cmdlistptr->auth_aes_256_cmac; + default: + return NULL; + } + return NULL; +} + +static int _ce_setup_hash(struct qce_device *pce_dev, + struct qce_sha_req *sreq, + struct qce_cmdlist_info *cmdlistinfo) +{ + uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; + uint32_t diglen; + int i; + uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + bool sha1 = false; + struct sps_command_element *pce = NULL; + bool use_hw_key = false; + bool use_pipe_key = false; + uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t); + uint32_t auth_cfg; + + if ((sreq->alg == QCE_HASH_SHA1_HMAC) || + (sreq->alg == QCE_HASH_SHA256_HMAC) || + (sreq->alg == QCE_HASH_AES_CMAC)) { + + + /* no more check for null key. use flag */ + if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) + == QCRYPTO_CTX_USE_HW_KEY) + use_hw_key = true; + else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == + QCRYPTO_CTX_USE_PIPE_KEY) + use_pipe_key = true; + pce = cmdlistinfo->go_proc; + if (use_hw_key) { + pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG + + pce_dev->phy_iobase); + } else { + pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + + pce_dev->phy_iobase); + pce = cmdlistinfo->auth_key; + if (!use_pipe_key) { + _byte_stream_to_net_words(mackey32, + sreq->authkey, + sreq->authklen); + for (i = 0; i < authk_size_in_word; i++, pce++) + pce->data = mackey32[i]; + } + } + } + + if (sreq->alg == QCE_HASH_AES_CMAC) + goto go_proc; + + /* if not the last, the size has to be on the block boundary */ + if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE)) + return -EIO; + + switch (sreq->alg) { + case QCE_HASH_SHA1: + case QCE_HASH_SHA1_HMAC: + diglen = SHA1_DIGEST_SIZE; + sha1 = true; + break; + case QCE_HASH_SHA256: + case QCE_HASH_SHA256_HMAC: + diglen = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } + + /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */ + if (sreq->first_blk) { + if (sha1) { + for (i = 0; i < 5; i++) + auth32[i] = _std_init_vector_sha1[i]; + } else { + for (i = 0; i < 8; i++) + auth32[i] = _std_init_vector_sha256[i]; + } + } else { + _byte_stream_to_net_words(auth32, sreq->digest, diglen); + } + + pce = cmdlistinfo->auth_iv; + for (i = 0; i < 5; i++, pce++) + pce->data = auth32[i]; + + if ((sreq->alg == QCE_HASH_SHA256) || + (sreq->alg == QCE_HASH_SHA256_HMAC)) { + for (i = 5; i < 8; i++, pce++) + pce->data = auth32[i]; + } + + /* write auth_bytecnt 0/1, start with 0 */ + pce = cmdlistinfo->auth_bytecount; + for (i = 0; i < 2; i++, pce++) + pce->data = sreq->auth_data[i]; + + /* Set/reset last bit in CFG register */ + pce = cmdlistinfo->auth_seg_cfg; + auth_cfg = pce->data & ~(1 << CRYPTO_LAST | + 1 << CRYPTO_FIRST | + 1 << CRYPTO_USE_PIPE_KEY_AUTH | + 1 << CRYPTO_USE_HW_KEY_AUTH); + if (sreq->last_blk) + auth_cfg |= 1 << CRYPTO_LAST; + if (sreq->first_blk) + auth_cfg |= 1 << CRYPTO_FIRST; + if (use_hw_key) + auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH; + if (use_pipe_key) + auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH; + pce->data = auth_cfg; +go_proc: + /* write auth seg size */ + pce = cmdlistinfo->auth_seg_size; + pce->data = sreq->size; + + pce = cmdlistinfo->encr_seg_cfg; + pce->data = 0; + + /* write auth seg size start*/ + pce = cmdlistinfo->auth_seg_start; + pce->data = 0; + + /* write seg size */ + pce = cmdlistinfo->seg_size; + + /* always ensure there is input data. ZLT does not work for bam-ndp */ + if (sreq->size) + pce->data = sreq->size; + else + pce->data = pce_dev->ce_bam_info.ce_burst_size; + + return 0; +} + +static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo( + struct qce_device *pce_dev, + int req_info, struct qce_req *creq) +{ + struct ce_sps_data *pce_sps_data; + struct qce_cmdlistptr_ops *cmdlistptr; + + pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; + cmdlistptr = &pce_sps_data->cmdlistptr; + switch (creq->alg) { + case CIPHER_ALG_DES: + switch (creq->mode) { + case QCE_MODE_CBC: + if (creq->auth_alg == QCE_HASH_SHA1_HMAC) + return &cmdlistptr->aead_hmac_sha1_cbc_des; + else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) + return &cmdlistptr->aead_hmac_sha256_cbc_des; + else + return NULL; + break; + default: + return NULL; + } + break; + case CIPHER_ALG_3DES: + switch (creq->mode) { + case QCE_MODE_CBC: + if (creq->auth_alg == QCE_HASH_SHA1_HMAC) + return &cmdlistptr->aead_hmac_sha1_cbc_3des; + else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) + return &cmdlistptr->aead_hmac_sha256_cbc_3des; + else + return NULL; + break; + default: + return NULL; + } + break; + case CIPHER_ALG_AES: + switch (creq->mode) { + case QCE_MODE_CBC: + if (creq->encklen == AES128_KEY_SIZE) { + if (creq->auth_alg == QCE_HASH_SHA1_HMAC) + return + &cmdlistptr->aead_hmac_sha1_cbc_aes_128; + else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) + return + &cmdlistptr->aead_hmac_sha256_cbc_aes_128; + else + return NULL; + } else if (creq->encklen == AES256_KEY_SIZE) { + if (creq->auth_alg == QCE_HASH_SHA1_HMAC) + return &cmdlistptr->aead_hmac_sha1_cbc_aes_256; + else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) + return + &cmdlistptr->aead_hmac_sha256_cbc_aes_256; + else + return NULL; + } else + return NULL; + break; + default: + return NULL; + } + break; + + default: + return NULL; + } + return NULL; +} + +static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req, + uint32_t totallen_in, uint32_t coffset, + struct qce_cmdlist_info *cmdlistinfo) +{ + int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); + int i; + uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0}; + struct sps_command_element *pce; + uint32_t a_cfg; + uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0}; + uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0}; + uint32_t enck_size_in_word = 0; + uint32_t enciv_in_word; + uint32_t key_size; + uint32_t encr_cfg = 0; + uint32_t ivsize = q_req->ivsize; + + key_size = q_req->encklen; + enck_size_in_word = key_size/sizeof(uint32_t); + + switch (q_req->alg) { + case CIPHER_ALG_DES: + enciv_in_word = 2; + break; + case CIPHER_ALG_3DES: + enciv_in_word = 2; + break; + case CIPHER_ALG_AES: + if ((key_size != AES128_KEY_SIZE) && + (key_size != AES256_KEY_SIZE)) + return -EINVAL; + enciv_in_word = 4; + break; + default: + return -EINVAL; + } + + /* only support cbc mode */ + if (q_req->mode != QCE_MODE_CBC) + return -EINVAL; + + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + pce = cmdlistinfo->encr_cntr_iv; + for (i = 0; i < enciv_in_word; i++, pce++) + pce->data = enciv32[i]; + + /* + * write encr key + * do not use hw key or pipe key + */ + _byte_stream_to_net_words(enckey32, q_req->enckey, key_size); + pce = cmdlistinfo->encr_key; + for (i = 0; i < enck_size_in_word; i++, pce++) + pce->data = enckey32[i]; + + /* write encr seg cfg */ + pce = cmdlistinfo->encr_seg_cfg; + encr_cfg = pce->data; + if (q_req->dir == QCE_ENCRYPT) + encr_cfg |= (1 << CRYPTO_ENCODE); + else + encr_cfg &= ~(1 << CRYPTO_ENCODE); + pce->data = encr_cfg; + + /* we only support sha1-hmac and sha256-hmac at this point */ + _byte_stream_to_net_words(mackey32, q_req->authkey, + q_req->authklen); + pce = cmdlistinfo->auth_key; + for (i = 0; i < authk_size_in_word; i++, pce++) + pce->data = mackey32[i]; + pce = cmdlistinfo->auth_iv; + + if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) + for (i = 0; i < 5; i++, pce++) + pce->data = _std_init_vector_sha1[i]; + else + for (i = 0; i < 8; i++, pce++) + pce->data = _std_init_vector_sha256[i]; + + /* write auth_bytecnt 0/1, start with 0 */ + pce = cmdlistinfo->auth_bytecount; + for (i = 0; i < 2; i++, pce++) + pce->data = 0; + + pce = cmdlistinfo->auth_seg_cfg; + a_cfg = pce->data; + a_cfg &= ~(CRYPTO_AUTH_POS_MASK); + if (q_req->dir == QCE_ENCRYPT) + a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); + else + a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + pce->data = a_cfg; + + /* write auth seg size */ + pce = cmdlistinfo->auth_seg_size; + pce->data = totallen_in; + + /* write auth seg size start*/ + pce = cmdlistinfo->auth_seg_start; + pce->data = 0; + + /* write seg size */ + pce = cmdlistinfo->seg_size; + pce->data = totallen_in; + + /* write encr seg size */ + pce = cmdlistinfo->encr_seg_size; + pce->data = q_req->cryptlen; + + /* write encr seg start */ + pce = cmdlistinfo->encr_seg_start; + pce->data = (coffset & 0xffff); + + return 0; + +} + +static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo( + struct qce_device *pce_dev, + int req_info, struct qce_req *creq) +{ + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + struct qce_cmdlistptr_ops *cmdlistptr; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + cmdlistptr = &pce_sps_data->cmdlistptr; + if (creq->alg != CIPHER_ALG_AES) { + switch (creq->alg) { + case CIPHER_ALG_DES: + if (creq->mode == QCE_MODE_ECB) + return &cmdlistptr->cipher_des_ecb; + return &cmdlistptr->cipher_des_cbc; + case CIPHER_ALG_3DES: + if (creq->mode == QCE_MODE_ECB) + return &cmdlistptr->cipher_3des_ecb; + return &cmdlistptr->cipher_3des_cbc; + default: + return NULL; + } + } else { + switch (creq->mode) { + case QCE_MODE_ECB: + if (creq->encklen == AES128_KEY_SIZE) + return &cmdlistptr->cipher_aes_128_ecb; + return &cmdlistptr->cipher_aes_256_ecb; + case QCE_MODE_CBC: + case QCE_MODE_CTR: + if (creq->encklen == AES128_KEY_SIZE) + return &cmdlistptr->cipher_aes_128_cbc_ctr; + return &cmdlistptr->cipher_aes_256_cbc_ctr; + case QCE_MODE_XTS: + if (creq->encklen/2 == AES128_KEY_SIZE) + return &cmdlistptr->cipher_aes_128_xts; + return &cmdlistptr->cipher_aes_256_xts; + case QCE_MODE_CCM: + if (creq->encklen == AES128_KEY_SIZE) + return &cmdlistptr->aead_aes_128_ccm; + return &cmdlistptr->aead_aes_256_ccm; + default: + return NULL; + } + } + return NULL; +} + +static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, + uint32_t totallen_in, uint32_t coffset, + struct qce_cmdlist_info *cmdlistinfo) +{ + uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { + 0, 0, 0, 0}; + uint32_t enck_size_in_word = 0; + uint32_t key_size; + bool use_hw_key = false; + bool use_pipe_key = false; + uint32_t encr_cfg = 0; + uint32_t ivsize = creq->ivsize; + int i; + struct sps_command_element *pce = NULL; + + if (creq->mode == QCE_MODE_XTS) + key_size = creq->encklen/2; + else + key_size = creq->encklen; + + pce = cmdlistinfo->go_proc; + if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { + use_hw_key = true; + } else { + if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == + QCRYPTO_CTX_USE_PIPE_KEY) + use_pipe_key = true; + } + pce = cmdlistinfo->go_proc; + if (use_hw_key) + pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG + + pce_dev->phy_iobase); + else + pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + + pce_dev->phy_iobase); + if (!use_pipe_key && !use_hw_key) { + _byte_stream_to_net_words(enckey32, creq->enckey, key_size); + enck_size_in_word = key_size/sizeof(uint32_t); + } + + if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) { + uint32_t authklen32 = creq->encklen/sizeof(uint32_t); + uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t); + uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0}; + uint32_t auth_cfg = 0; + + /* write nonce */ + _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE); + pce = cmdlistinfo->auth_nonce_info; + for (i = 0; i < noncelen32; i++, pce++) + pce->data = nonce32[i]; + + if (creq->authklen == AES128_KEY_SIZE) + auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128; + else { + if (creq->authklen == AES256_KEY_SIZE) + auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256; + } + if (creq->dir == QCE_ENCRYPT) + auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + else + auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); + auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE); + + if (use_hw_key) { + auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH); + } else { + auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); + /* write auth key */ + pce = cmdlistinfo->auth_key; + for (i = 0; i < authklen32; i++, pce++) + pce->data = enckey32[i]; + } + + pce = cmdlistinfo->auth_seg_cfg; + pce->data = auth_cfg; + + pce = cmdlistinfo->auth_seg_size; + if (creq->dir == QCE_ENCRYPT) + pce->data = totallen_in; + else + pce->data = totallen_in - creq->authsize; + pce = cmdlistinfo->auth_seg_start; + pce->data = 0; + } else { + if (creq->op != QCE_REQ_AEAD) { + pce = cmdlistinfo->auth_seg_cfg; + pce->data = 0; + } + } + switch (creq->mode) { + case QCE_MODE_ECB: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256; + break; + case QCE_MODE_CBC: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; + break; + case QCE_MODE_XTS: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256; + break; + case QCE_MODE_CCM: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256; + encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) | + (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); + break; + case QCE_MODE_CTR: + default: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; + break; + } + + switch (creq->alg) { + case CIPHER_ALG_DES: + if (creq->mode != QCE_MODE_ECB) { + if (ivsize > MAX_IV_LENGTH) { + pr_err("%s: error: Invalid length parameter\n", + __func__); + return -EINVAL; + } + _byte_stream_to_net_words(enciv32, creq->iv, ivsize); + pce = cmdlistinfo->encr_cntr_iv; + pce->data = enciv32[0]; + pce++; + pce->data = enciv32[1]; + } + if (!use_hw_key) { + pce = cmdlistinfo->encr_key; + pce->data = enckey32[0]; + pce++; + pce->data = enckey32[1]; + } + break; + case CIPHER_ALG_3DES: + if (creq->mode != QCE_MODE_ECB) { + if (ivsize > MAX_IV_LENGTH) { + pr_err("%s: error: Invalid length parameter\n", + __func__); + return -EINVAL; + } + _byte_stream_to_net_words(enciv32, creq->iv, ivsize); + pce = cmdlistinfo->encr_cntr_iv; + pce->data = enciv32[0]; + pce++; + pce->data = enciv32[1]; + } + if (!use_hw_key) { + /* write encr key */ + pce = cmdlistinfo->encr_key; + for (i = 0; i < 6; i++, pce++) + pce->data = enckey32[i]; + } + break; + case CIPHER_ALG_AES: + default: + if (creq->mode == QCE_MODE_XTS) { + uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] + = {0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t xtsklen = + creq->encklen/(2 * sizeof(uint32_t)); + + if (!use_hw_key && !use_pipe_key) { + _byte_stream_to_net_words(xtskey32, + (creq->enckey + creq->encklen/2), + creq->encklen/2); + /* write xts encr key */ + pce = cmdlistinfo->encr_xts_key; + for (i = 0; i < xtsklen; i++, pce++) + pce->data = xtskey32[i]; + } + /* write xts du size */ + pce = cmdlistinfo->encr_xts_du_size; + switch (creq->flags & QCRYPTO_CTX_XTS_MASK) { + case QCRYPTO_CTX_XTS_DU_SIZE_512B: + pce->data = min((unsigned int)QCE_SECTOR_SIZE, + creq->cryptlen); + break; + case QCRYPTO_CTX_XTS_DU_SIZE_1KB: + pce->data = + min((unsigned int)QCE_SECTOR_SIZE * 2, + creq->cryptlen); + break; + default: + pce->data = creq->cryptlen; + break; + } + } + if (creq->mode != QCE_MODE_ECB) { + if (ivsize > MAX_IV_LENGTH) { + pr_err("%s: error: Invalid length parameter\n", + __func__); + return -EINVAL; + } + if (creq->mode == QCE_MODE_XTS) + _byte_stream_swap_to_net_words(enciv32, + creq->iv, ivsize); + else + _byte_stream_to_net_words(enciv32, creq->iv, + ivsize); + /* write encr cntr iv */ + pce = cmdlistinfo->encr_cntr_iv; + for (i = 0; i < 4; i++, pce++) + pce->data = enciv32[i]; + + if (creq->mode == QCE_MODE_CCM) { + /* write cntr iv for ccm */ + pce = cmdlistinfo->encr_ccm_cntr_iv; + for (i = 0; i < 4; i++, pce++) + pce->data = enciv32[i]; + /* update cntr_iv[3] by one */ + pce = cmdlistinfo->encr_cntr_iv; + pce += 3; + pce->data += 1; + } + } + + if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { + encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + } else { + if (!use_hw_key) { + /* write encr key */ + pce = cmdlistinfo->encr_key; + for (i = 0; i < enck_size_in_word; i++, pce++) + pce->data = enckey32[i]; + } + } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ + break; + } /* end of switch (creq->mode) */ + + if (use_pipe_key) + encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED + << CRYPTO_USE_PIPE_KEY_ENCR); + + /* write encr seg cfg */ + pce = cmdlistinfo->encr_seg_cfg; + if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) { + if (creq->dir == QCE_ENCRYPT) + pce->data |= (1 << CRYPTO_ENCODE); + else + pce->data &= ~(1 << CRYPTO_ENCODE); + encr_cfg = pce->data; + } else { + encr_cfg |= + ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; + } + if (use_hw_key) + encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); + else + encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); + pce->data = encr_cfg; + + /* write encr seg size */ + pce = cmdlistinfo->encr_seg_size; + if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) + pce->data = (creq->cryptlen + creq->authsize); + else + pce->data = creq->cryptlen; + + /* write encr seg start */ + pce = cmdlistinfo->encr_seg_start; + pce->data = (coffset & 0xffff); + + /* write seg size */ + pce = cmdlistinfo->seg_size; + pce->data = totallen_in; + + return 0; +} + +static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req, + struct qce_cmdlist_info *cmdlistinfo) +{ + uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)]; + uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); + uint32_t cfg; + struct sps_command_element *pce; + int i; + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + cfg = pce_dev->reg.auth_cfg_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + default: + cfg = pce_dev->reg.auth_cfg_snow3g; + break; + } + + /* write key in CRYPTO_AUTH_IV0-3_REG */ + _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE); + pce = cmdlistinfo->auth_iv; + for (i = 0; i < key_size_in_word; i++, pce++) + pce->data = ikey32[i]; + + /* write last bits in CRYPTO_AUTH_IV4_REG */ + pce->data = req->last_bits; + + /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */ + pce = cmdlistinfo->auth_bytecount; + pce->data = req->fresh; + + /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */ + pce++; + pce->data = req->count_i; + + /* write auth seg cfg */ + pce = cmdlistinfo->auth_seg_cfg; + if (req->direction == QCE_OTA_DIR_DOWNLINK) + cfg |= BIT(CRYPTO_F9_DIRECTION); + pce->data = cfg; + + /* write auth seg size */ + pce = cmdlistinfo->auth_seg_size; + pce->data = req->msize; + + /* write auth seg start*/ + pce = cmdlistinfo->auth_seg_start; + pce->data = 0; + + /* write seg size */ + pce = cmdlistinfo->seg_size; + pce->data = req->msize; + + + /* write go */ + pce = cmdlistinfo->go_proc; + pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); + return 0; +} + +static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, + bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset, + uint16_t cipher_size, + struct qce_cmdlist_info *cmdlistinfo) +{ + uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)]; + uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); + uint32_t cfg; + struct sps_command_element *pce; + int i; + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + cfg = pce_dev->reg.encr_cfg_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + default: + cfg = pce_dev->reg.encr_cfg_snow3g; + break; + } + /* write key */ + _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE); + pce = cmdlistinfo->encr_key; + for (i = 0; i < key_size_in_word; i++, pce++) + pce->data = ckey32[i]; + + /* write encr seg cfg */ + pce = cmdlistinfo->encr_seg_cfg; + if (key_stream_mode) + cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE); + if (req->direction == QCE_OTA_DIR_DOWNLINK) + cfg |= BIT(CRYPTO_F8_DIRECTION); + pce->data = cfg; + + /* write encr seg start */ + pce = cmdlistinfo->encr_seg_start; + pce->data = (cipher_offset & 0xffff); + + /* write encr seg size */ + pce = cmdlistinfo->encr_seg_size; + pce->data = cipher_size; + + /* write seg size */ + pce = cmdlistinfo->seg_size; + pce->data = req->data_len; + + /* write cntr0_iv0 for countC */ + pce = cmdlistinfo->encr_cntr_iv; + pce->data = req->count_c; + /* write cntr1_iv1 for nPkts, and bearer */ + pce++; + if (npkts == 1) + npkts = 0; + pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | + npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT; + + /* write go */ + pce = cmdlistinfo->go_proc; + pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase); + + return 0; +} + +static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) +{ + int i, j, ents; + struct ce_sps_data *pce_sps_data; + struct sps_iovec *iovec; + uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD; + + pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; + iovec = pce_sps_data->in_transfer.iovec; + pr_info("==============================================\n"); + pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); + pr_info("==============================================\n"); + for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) { + pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, + iovec->addr, iovec->size, iovec->flags); + if (iovec->flags & cmd_flags) { + struct sps_command_element *pced; + + pced = (struct sps_command_element *) + (GET_VIRT_ADDR(iovec->addr)); + ents = iovec->size/(sizeof(struct sps_command_element)); + for (j = 0; j < ents; j++) { + pr_info(" [%d] [0x%x] 0x%x\n", j, + pced->addr, pced->data); + pced++; + } + } + iovec++; + } + + pr_info("==============================================\n"); + pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); + pr_info("==============================================\n"); + iovec = pce_sps_data->out_transfer.iovec; + for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) { + pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, + iovec->addr, iovec->size, iovec->flags); + iovec++; + } +} + +#ifdef QCE_DEBUG + +static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) +{ + _qce_dump_descr_fifos(pce_dev, req_info); +} + +#define QCE_WRITE_REG(val, addr) \ +{ \ + pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \ + writel_relaxed(val, addr); \ +} + +#else + +static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) +{ +} + +#define QCE_WRITE_REG(val, addr) \ + writel_relaxed(val, addr) + +#endif + +static int _ce_setup_hash_direct(struct qce_device *pce_dev, + struct qce_sha_req *sreq) +{ + uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; + uint32_t diglen; + bool use_hw_key = false; + bool use_pipe_key = false; + int i; + uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t); + bool sha1 = false; + uint32_t auth_cfg = 0; + + /* clear status */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* + * Ensure previous instructions (setting the CONFIG register) + * was completed before issuing starting to set other config register + * This is to ensure the configurations are done in correct endian-ness + * as set in the CONFIG registers + */ + mb(); + + if (sreq->alg == QCE_HASH_AES_CMAC) { + /* write seg_cfg */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + /* write seg_cfg */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + /* write seg_cfg */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); + + /* Clear auth_ivn, auth_keyn registers */ + for (i = 0; i < 16; i++) { + QCE_WRITE_REG(0, (pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); + QCE_WRITE_REG(0, (pce_dev->iobase + + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)))); + } + /* write auth_bytecnt 0/1/2/3, start with 0 */ + for (i = 0; i < 4; i++) + QCE_WRITE_REG(0, pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG + + i * sizeof(uint32_t)); + + if (sreq->authklen == AES128_KEY_SIZE) + auth_cfg = pce_dev->reg.auth_cfg_cmac_128; + else + auth_cfg = pce_dev->reg.auth_cfg_cmac_256; + } + + if ((sreq->alg == QCE_HASH_SHA1_HMAC) || + (sreq->alg == QCE_HASH_SHA256_HMAC) || + (sreq->alg == QCE_HASH_AES_CMAC)) { + + _byte_stream_to_net_words(mackey32, sreq->authkey, + sreq->authklen); + + /* no more check for null key. use flag to check*/ + + if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) == + QCRYPTO_CTX_USE_HW_KEY) { + use_hw_key = true; + } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == + QCRYPTO_CTX_USE_PIPE_KEY) { + use_pipe_key = true; + } else { + /* setup key */ + for (i = 0; i < authk_size_in_word; i++) + QCE_WRITE_REG(mackey32[i], (pce_dev->iobase + + (CRYPTO_AUTH_KEY0_REG + + i*sizeof(uint32_t)))); + } + } + + if (sreq->alg == QCE_HASH_AES_CMAC) + goto go_proc; + + /* if not the last, the size has to be on the block boundary */ + if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE)) + return -EIO; + + switch (sreq->alg) { + case QCE_HASH_SHA1: + auth_cfg = pce_dev->reg.auth_cfg_sha1; + diglen = SHA1_DIGEST_SIZE; + sha1 = true; + break; + case QCE_HASH_SHA1_HMAC: + auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1; + diglen = SHA1_DIGEST_SIZE; + sha1 = true; + break; + case QCE_HASH_SHA256: + auth_cfg = pce_dev->reg.auth_cfg_sha256; + diglen = SHA256_DIGEST_SIZE; + break; + case QCE_HASH_SHA256_HMAC: + auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256; + diglen = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } + + /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */ + if (sreq->first_blk) { + if (sha1) { + for (i = 0; i < 5; i++) + auth32[i] = _std_init_vector_sha1[i]; + } else { + for (i = 0; i < 8; i++) + auth32[i] = _std_init_vector_sha256[i]; + } + } else { + _byte_stream_to_net_words(auth32, sreq->digest, diglen); + } + + /* Set auth_ivn, auth_keyn registers */ + for (i = 0; i < 5; i++) + QCE_WRITE_REG(auth32[i], (pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); + + if ((sreq->alg == QCE_HASH_SHA256) || + (sreq->alg == QCE_HASH_SHA256_HMAC)) { + for (i = 5; i < 8; i++) + QCE_WRITE_REG(auth32[i], (pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); + } + + + /* write auth_bytecnt 0/1/2/3, start with 0 */ + for (i = 0; i < 2; i++) + QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG + + i * sizeof(uint32_t)); + + /* Set/reset last bit in CFG register */ + if (sreq->last_blk) + auth_cfg |= 1 << CRYPTO_LAST; + else + auth_cfg &= ~(1 << CRYPTO_LAST); + if (sreq->first_blk) + auth_cfg |= 1 << CRYPTO_FIRST; + else + auth_cfg &= ~(1 << CRYPTO_FIRST); + if (use_hw_key) + auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH; + if (use_pipe_key) + auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH; +go_proc: + /* write seg_cfg */ + QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + /* write auth seg_size */ + QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); + + /* write auth_seg_start */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); + + /* reset encr seg_cfg */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* write seg_size */ + QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* issue go to crypto */ + if (!use_hw_key) { + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), + pce_dev->iobase + CRYPTO_GOPROC_REG); + } else { + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), + pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG); + } + /* + * Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +} + +static int _ce_setup_aead_direct(struct qce_device *pce_dev, + struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset) +{ + int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); + int i; + uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0}; + uint32_t a_cfg; + uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0}; + uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0}; + uint32_t enck_size_in_word = 0; + uint32_t enciv_in_word; + uint32_t key_size; + uint32_t ivsize = q_req->ivsize; + uint32_t encr_cfg; + + + /* clear status */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* + * Ensure previous instructions (setting the CONFIG register) + * was completed before issuing starting to set other config register + * This is to ensure the configurations are done in correct endian-ness + * as set in the CONFIG registers + */ + mb(); + + key_size = q_req->encklen; + enck_size_in_word = key_size/sizeof(uint32_t); + + switch (q_req->alg) { + + case CIPHER_ALG_DES: + + switch (q_req->mode) { + case QCE_MODE_CBC: + encr_cfg = pce_dev->reg.encr_cfg_des_cbc; + break; + default: + return -EINVAL; + } + + enciv_in_word = 2; + break; + + case CIPHER_ALG_3DES: + + switch (q_req->mode) { + case QCE_MODE_CBC: + encr_cfg = pce_dev->reg.encr_cfg_3des_cbc; + break; + default: + return -EINVAL; + } + + enciv_in_word = 2; + + break; + + case CIPHER_ALG_AES: + + switch (q_req->mode) { + case QCE_MODE_CBC: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; + else if (key_size == AES256_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + enciv_in_word = 4; + break; + + default: + return -EINVAL; + } + + + + + /* write CNTR0_IV0_REG */ + if (q_req->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + for (i = 0; i < enciv_in_word; i++) + QCE_WRITE_REG(enciv32[i], pce_dev->iobase + + (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t))); + } + + /* + * write encr key + * do not use hw key or pipe key + */ + _byte_stream_to_net_words(enckey32, q_req->enckey, key_size); + for (i = 0; i < enck_size_in_word; i++) + QCE_WRITE_REG(enckey32[i], pce_dev->iobase + + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))); + + /* write encr seg cfg */ + if (q_req->dir == QCE_ENCRYPT) + encr_cfg |= (1 << CRYPTO_ENCODE); + QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* we only support sha1-hmac and sha256-hmac at this point */ + _byte_stream_to_net_words(mackey32, q_req->authkey, + q_req->authklen); + for (i = 0; i < authk_size_in_word; i++) + QCE_WRITE_REG(mackey32[i], pce_dev->iobase + + (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))); + + if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) { + for (i = 0; i < 5; i++) + QCE_WRITE_REG(_std_init_vector_sha1[i], + pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))); + } else { + for (i = 0; i < 8; i++) + QCE_WRITE_REG(_std_init_vector_sha256[i], + pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))); + } + + /* write auth_bytecnt 0/1, start with 0 */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); + + /* write encr seg size */ + QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase + + CRYPTO_ENCR_SEG_SIZE_REG); + + /* write encr start */ + QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase + + CRYPTO_ENCR_SEG_START_REG); + + if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) + a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac; + else + a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac; + + if (q_req->dir == QCE_ENCRYPT) + a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); + else + a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + /* write auth seg_cfg */ + QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + /* write auth seg_size */ + QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); + + /* write auth_seg_start */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); + + + /* write seg_size */ + QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + + + CRYPTO_CONFIG_REG)); + /* issue go to crypto */ + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), + pce_dev->iobase + CRYPTO_GOPROC_REG); + /* + * Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +} + +static int _ce_setup_cipher_direct(struct qce_device *pce_dev, + struct qce_req *creq, uint32_t totallen_in, uint32_t coffset) +{ + uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { + 0, 0, 0, 0}; + uint32_t enck_size_in_word = 0; + uint32_t key_size; + bool use_hw_key = false; + bool use_pipe_key = false; + uint32_t encr_cfg = 0; + uint32_t ivsize = creq->ivsize; + int i; + + /* clear status */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* + * Ensure previous instructions (setting the CONFIG register) + * was completed before issuing starting to set other config register + * This is to ensure the configurations are done in correct endian-ness + * as set in the CONFIG registers + */ + mb(); + + if (creq->mode == QCE_MODE_XTS) + key_size = creq->encklen/2; + else + key_size = creq->encklen; + + if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { + use_hw_key = true; + } else { + if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) == + QCRYPTO_CTX_USE_PIPE_KEY) + use_pipe_key = true; + } + if (!use_pipe_key && !use_hw_key) { + _byte_stream_to_net_words(enckey32, creq->enckey, key_size); + enck_size_in_word = key_size/sizeof(uint32_t); + } + if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) { + uint32_t authklen32 = creq->encklen/sizeof(uint32_t); + uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t); + uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0}; + uint32_t auth_cfg = 0; + + /* Clear auth_ivn, auth_keyn registers */ + for (i = 0; i < 16; i++) { + QCE_WRITE_REG(0, (pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); + QCE_WRITE_REG(0, (pce_dev->iobase + + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)))); + } + /* write auth_bytecnt 0/1/2/3, start with 0 */ + for (i = 0; i < 4; i++) + QCE_WRITE_REG(0, pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG + + i * sizeof(uint32_t)); + /* write nonce */ + _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE); + for (i = 0; i < noncelen32; i++) + QCE_WRITE_REG(nonce32[i], pce_dev->iobase + + CRYPTO_AUTH_INFO_NONCE0_REG + + (i*sizeof(uint32_t))); + + if (creq->authklen == AES128_KEY_SIZE) + auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128; + else { + if (creq->authklen == AES256_KEY_SIZE) + auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256; + } + if (creq->dir == QCE_ENCRYPT) + auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + else + auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); + auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE); + + if (use_hw_key) { + auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH); + } else { + auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); + /* write auth key */ + for (i = 0; i < authklen32; i++) + QCE_WRITE_REG(enckey32[i], pce_dev->iobase + + CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t))); + } + QCE_WRITE_REG(auth_cfg, pce_dev->iobase + + CRYPTO_AUTH_SEG_CFG_REG); + if (creq->dir == QCE_ENCRYPT) { + QCE_WRITE_REG(totallen_in, pce_dev->iobase + + CRYPTO_AUTH_SEG_SIZE_REG); + } else { + QCE_WRITE_REG((totallen_in - creq->authsize), + pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); + } + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); + } else { + if (creq->op != QCE_REQ_AEAD) + QCE_WRITE_REG(0, pce_dev->iobase + + CRYPTO_AUTH_SEG_CFG_REG); + } + /* + * Ensure previous instructions (write to all AUTH registers) + * was completed before accessing a register that is not in + * in the same 1K range. + */ + mb(); + switch (creq->mode) { + case QCE_MODE_ECB: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256; + break; + case QCE_MODE_CBC: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256; + break; + case QCE_MODE_XTS: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256; + break; + case QCE_MODE_CCM: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256; + break; + case QCE_MODE_CTR: + default: + if (key_size == AES128_KEY_SIZE) + encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128; + else + encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; + break; + } + + switch (creq->alg) { + case CIPHER_ALG_DES: + if (creq->mode != QCE_MODE_ECB) { + encr_cfg = pce_dev->reg.encr_cfg_des_cbc; + _byte_stream_to_net_words(enciv32, creq->iv, ivsize); + QCE_WRITE_REG(enciv32[0], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + QCE_WRITE_REG(enciv32[1], pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + } else { + encr_cfg = pce_dev->reg.encr_cfg_des_ecb; + } + if (!use_hw_key) { + QCE_WRITE_REG(enckey32[0], pce_dev->iobase + + CRYPTO_ENCR_KEY0_REG); + QCE_WRITE_REG(enckey32[1], pce_dev->iobase + + CRYPTO_ENCR_KEY1_REG); + } + break; + case CIPHER_ALG_3DES: + if (creq->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, creq->iv, ivsize); + QCE_WRITE_REG(enciv32[0], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + QCE_WRITE_REG(enciv32[1], pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + encr_cfg = pce_dev->reg.encr_cfg_3des_cbc; + } else { + encr_cfg = pce_dev->reg.encr_cfg_3des_ecb; + } + if (!use_hw_key) { + /* write encr key */ + for (i = 0; i < 6; i++) + QCE_WRITE_REG(enckey32[0], (pce_dev->iobase + + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)))); + } + break; + case CIPHER_ALG_AES: + default: + if (creq->mode == QCE_MODE_XTS) { + uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] + = {0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t xtsklen = + creq->encklen/(2 * sizeof(uint32_t)); + + if (!use_hw_key && !use_pipe_key) { + _byte_stream_to_net_words(xtskey32, + (creq->enckey + creq->encklen/2), + creq->encklen/2); + /* write xts encr key */ + for (i = 0; i < xtsklen; i++) + QCE_WRITE_REG(xtskey32[i], + pce_dev->iobase + + CRYPTO_ENCR_XTS_KEY0_REG + + (i * sizeof(uint32_t))); + } + /* write xts du size */ + switch (creq->flags & QCRYPTO_CTX_XTS_MASK) { + case QCRYPTO_CTX_XTS_DU_SIZE_512B: + QCE_WRITE_REG( + min((uint32_t)QCE_SECTOR_SIZE, + creq->cryptlen), pce_dev->iobase + + CRYPTO_ENCR_XTS_DU_SIZE_REG); + break; + case QCRYPTO_CTX_XTS_DU_SIZE_1KB: + QCE_WRITE_REG( + min((uint32_t)(QCE_SECTOR_SIZE * 2), + creq->cryptlen), pce_dev->iobase + + CRYPTO_ENCR_XTS_DU_SIZE_REG); + break; + default: + QCE_WRITE_REG(creq->cryptlen, + pce_dev->iobase + + CRYPTO_ENCR_XTS_DU_SIZE_REG); + break; + } + } + if (creq->mode != QCE_MODE_ECB) { + if (creq->mode == QCE_MODE_XTS) + _byte_stream_swap_to_net_words(enciv32, + creq->iv, ivsize); + else + _byte_stream_to_net_words(enciv32, creq->iv, + ivsize); + + /* write encr cntr iv */ + for (i = 0; i <= 3; i++) + QCE_WRITE_REG(enciv32[i], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG + + (i * sizeof(uint32_t))); + + if (creq->mode == QCE_MODE_CCM) { + /* write cntr iv for ccm */ + for (i = 0; i <= 3; i++) + QCE_WRITE_REG(enciv32[i], + pce_dev->iobase + + CRYPTO_ENCR_CCM_INT_CNTR0_REG + + (i * sizeof(uint32_t))); + /* update cntr_iv[3] by one */ + QCE_WRITE_REG((enciv32[3] + 1), + pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG + + (3 * sizeof(uint32_t))); + } + } + + if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { + encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + } else { + if (!use_hw_key && !use_pipe_key) { + for (i = 0; i < enck_size_in_word; i++) + QCE_WRITE_REG(enckey32[i], + pce_dev->iobase + + CRYPTO_ENCR_KEY0_REG + + (i * sizeof(uint32_t))); + } + } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ + break; + } /* end of switch (creq->mode) */ + + if (use_pipe_key) + encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED + << CRYPTO_USE_PIPE_KEY_ENCR); + + /* write encr seg cfg */ + encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; + if (use_hw_key) + encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); + else + encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); + /* write encr seg cfg */ + QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* write encr seg size */ + if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) { + QCE_WRITE_REG((creq->cryptlen + creq->authsize), + pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); + } else { + QCE_WRITE_REG(creq->cryptlen, + pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); + } + + /* write encr seg start */ + QCE_WRITE_REG((coffset & 0xffff), + pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG); + + /* write encr counter mask */ + QCE_WRITE_REG(0xffffffff, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG); + QCE_WRITE_REG(0xffffffff, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG0); + QCE_WRITE_REG(0xffffffff, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG1); + QCE_WRITE_REG(0xffffffff, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG2); + + /* write seg size */ + QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* issue go to crypto */ + if (!use_hw_key) { + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), + pce_dev->iobase + CRYPTO_GOPROC_REG); + } else { + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)), + pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG); + } + /* + * Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +} + +static int _ce_f9_setup_direct(struct qce_device *pce_dev, + struct qce_f9_req *req) +{ + uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)]; + uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); + uint32_t auth_cfg; + int i; + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + auth_cfg = pce_dev->reg.auth_cfg_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + default: + auth_cfg = pce_dev->reg.auth_cfg_snow3g; + break; + } + + /* clear status */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + + /* set big endian configuration */ + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* + * Ensure previous instructions (setting the CONFIG register) + * was completed before issuing starting to set other config register + * This is to ensure the configurations are done in correct endian-ness + * as set in the CONFIG registers + */ + mb(); + + /* write enc_seg_cfg */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* write ecn_seg_size */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); + + /* write key in CRYPTO_AUTH_IV0-3_REG */ + _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE); + for (i = 0; i < key_size_in_word; i++) + QCE_WRITE_REG(ikey32[i], (pce_dev->iobase + + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)))); + + /* write last bits in CRYPTO_AUTH_IV4_REG */ + QCE_WRITE_REG(req->last_bits, (pce_dev->iobase + + CRYPTO_AUTH_IV4_REG)); + + /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */ + QCE_WRITE_REG(req->fresh, (pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG)); + + /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */ + QCE_WRITE_REG(req->count_i, (pce_dev->iobase + + CRYPTO_AUTH_BYTECNT1_REG)); + + /* write auth seg cfg */ + if (req->direction == QCE_OTA_DIR_DOWNLINK) + auth_cfg |= BIT(CRYPTO_F9_DIRECTION); + QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + /* write auth seg size */ + QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); + + /* write auth seg start*/ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG); + + /* write seg size */ + QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + /* set little endian configuration before go*/ + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* write go */ + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), + pce_dev->iobase + CRYPTO_GOPROC_REG); + /* + * Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +} + +static int _ce_f8_setup_direct(struct qce_device *pce_dev, + struct qce_f8_req *req, bool key_stream_mode, + uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size) +{ + int i = 0; + uint32_t encr_cfg = 0; + uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)]; + uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t); + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + encr_cfg = pce_dev->reg.encr_cfg_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + default: + encr_cfg = pce_dev->reg.encr_cfg_snow3g; + break; + } + /* clear status */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + /* set big endian configuration */ + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* write auth seg configuration */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + /* write auth seg size */ + QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG); + + /* write key */ + _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE); + + for (i = 0; i < key_size_in_word; i++) + QCE_WRITE_REG(ckey32[i], (pce_dev->iobase + + (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t)))); + /* write encr seg cfg */ + if (key_stream_mode) + encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE); + if (req->direction == QCE_OTA_DIR_DOWNLINK) + encr_cfg |= BIT(CRYPTO_F8_DIRECTION); + QCE_WRITE_REG(encr_cfg, pce_dev->iobase + + CRYPTO_ENCR_SEG_CFG_REG); + + /* write encr seg start */ + QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase + + CRYPTO_ENCR_SEG_START_REG); + /* write encr seg size */ + QCE_WRITE_REG(cipher_size, pce_dev->iobase + + CRYPTO_ENCR_SEG_SIZE_REG); + + /* write seg size */ + QCE_WRITE_REG(req->data_len, pce_dev->iobase + + CRYPTO_SEG_SIZE_REG); + + /* write cntr0_iv0 for countC */ + QCE_WRITE_REG(req->count_c, pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + /* write cntr1_iv1 for nPkts, and bearer */ + if (npkts == 1) + npkts = 0; + QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | + npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT, + pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); + + /* set little endian configuration before go*/ + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + + CRYPTO_CONFIG_REG)); + /* write go */ + QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), + pce_dev->iobase + CRYPTO_GOPROC_REG); + /* + * Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +} + + +static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) +{ + int rc = 0; + struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info + [req_info].ce_sps; + + if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr) + return rc; + + rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe, + GET_PHYS_ADDR( + pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist), + 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK)); + if (rc) { + pr_err("sps_xfr_one() fail rc=%d\n", rc); + rc = -EINVAL; + } + return rc; +} + +static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, + bool is_complete); + +static int _aead_complete(struct qce_device *pce_dev, int req_info) +{ + struct aead_request *areq; + unsigned char mac[SHA256_DIGEST_SIZE]; + uint32_t ccm_fail_status = 0; + uint32_t result_dump_status = 0; + int32_t result_status = 0; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + qce_comp_func_ptr_t qce_callback; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + qce_callback = preq_info->qce_cb; + areq = (struct aead_request *) preq_info->areq; + if (areq->src != areq->dst) { + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + DMA_FROM_DEVICE); + } + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + + if (preq_info->asg) + qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg, + preq_info->assoc_nents, DMA_TO_DEVICE); + /* check MAC */ + memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]), + SHA256_DIGEST_SIZE); + + /* read status before unlock */ + if (preq_info->dir == QCE_DECRYPT) { + if (pce_dev->no_get_around) + if (pce_dev->no_ccm_mac_status_get_around) + ccm_fail_status = + be32_to_cpu(pce_sps_data->result->status); + else + ccm_fail_status = + be32_to_cpu(pce_sps_data->result_null->status); + else + ccm_fail_status = readl_relaxed(pce_dev->iobase + + CRYPTO_STATUS_REG); + } + if (_qce_unlock_other_pipes(pce_dev, req_info)) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, mac, NULL, -ENXIO); + return -ENXIO; + } + result_dump_status = be32_to_cpu(pce_sps_data->result->status); + pce_sps_data->result->status = 0; + + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR))) { + pr_err("aead operation error. Status %x\n", result_dump_status); + result_status = -ENXIO; + } else if (pce_sps_data->consumer_status | + pce_sps_data->producer_status) { + pr_err("aead sps operation error. sps status %x %x\n", + pce_sps_data->consumer_status, + pce_sps_data->producer_status); + result_status = -ENXIO; + } + + if (preq_info->mode == QCE_MODE_CCM) { + /* + * Not from result dump, instead, use the status we just + * read of device for MAC_FAILED. + */ + if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) && + (ccm_fail_status & (1 << CRYPTO_MAC_FAILED))) + result_status = -EBADMSG; + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, mac, NULL, result_status); + + } else { + uint32_t ivsize = 0; + struct crypto_aead *aead; + unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; + + aead = crypto_aead_reqtfm(areq); + ivsize = crypto_aead_ivsize(aead); + memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv), + sizeof(iv)); + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, mac, iv, result_status); + + } + return 0; +} + +static int _sha_complete(struct qce_device *pce_dev, int req_info) +{ + struct ahash_request *areq; + unsigned char digest[SHA256_DIGEST_SIZE]; + uint32_t bytecount32[2]; + int32_t result_status = 0; + uint32_t result_dump_status; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + qce_comp_func_ptr_t qce_callback; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + qce_callback = preq_info->qce_cb; + areq = (struct ahash_request *) preq_info->areq; + if (!areq) { + pr_err("sha operation error. areq is NULL\n"); + return -ENXIO; + } + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + DMA_TO_DEVICE); + memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]), + SHA256_DIGEST_SIZE); + _byte_stream_to_net_words(bytecount32, + (unsigned char *)pce_sps_data->result->auth_byte_count, + 2 * CRYPTO_REG_SIZE); + + if (_qce_unlock_other_pipes(pce_dev, req_info)) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, digest, (char *)bytecount32, + -ENXIO); + return -ENXIO; + } + + result_dump_status = be32_to_cpu(pce_sps_data->result->status); + pce_sps_data->result->status = 0; + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR))) { + + pr_err("sha operation error. Status %x\n", result_dump_status); + result_status = -ENXIO; + } else if (pce_sps_data->consumer_status) { + pr_err("sha sps operation error. sps status %x\n", + pce_sps_data->consumer_status); + result_status = -ENXIO; + } + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, digest, (char *)bytecount32, result_status); + return 0; +} + +static int _f9_complete(struct qce_device *pce_dev, int req_info) +{ + uint32_t mac_i; + int32_t result_status = 0; + uint32_t result_dump_status; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + qce_comp_func_ptr_t qce_callback; + void *areq; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + qce_callback = preq_info->qce_cb; + areq = preq_info->areq; + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + preq_info->ota_size, DMA_TO_DEVICE); + _byte_stream_to_net_words(&mac_i, + (char *)(&pce_sps_data->result->auth_iv[0]), + CRYPTO_REG_SIZE); + + if (_qce_unlock_other_pipes(pce_dev, req_info)) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, -ENXIO); + return -ENXIO; + } + + result_dump_status = be32_to_cpu(pce_sps_data->result->status); + pce_sps_data->result->status = 0; + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR))) { + pr_err("f9 operation error. Status %x\n", result_dump_status); + result_status = -ENXIO; + } else if (pce_sps_data->consumer_status | + pce_sps_data->producer_status) { + pr_err("f9 sps operation error. sps status %x %x\n", + pce_sps_data->consumer_status, + pce_sps_data->producer_status); + result_status = -ENXIO; + } + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, (char *)&mac_i, NULL, result_status); + + return 0; +} + +static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) +{ + struct skcipher_request *areq; + unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; + int32_t result_status = 0; + uint32_t result_dump_status; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + qce_comp_func_ptr_t qce_callback; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + qce_callback = preq_info->qce_cb; + areq = (struct skcipher_request *) preq_info->areq; + if (areq->src != areq->dst) { + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, + preq_info->dst_nents, DMA_FROM_DEVICE); + } + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + + if (_qce_unlock_other_pipes(pce_dev, req_info)) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, -ENXIO); + return -ENXIO; + } + result_dump_status = be32_to_cpu(pce_sps_data->result->status); + pce_sps_data->result->status = 0; + + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR))) { + pr_err("ablk_cipher operation error. Status %x\n", + result_dump_status); + result_status = -ENXIO; + } else if (pce_sps_data->consumer_status | + pce_sps_data->producer_status) { + pr_err("ablk_cipher sps operation error. sps status %x %x\n", + pce_sps_data->consumer_status, + pce_sps_data->producer_status); + result_status = -ENXIO; + } + + if (preq_info->mode == QCE_MODE_ECB) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status | + result_status); + } else { + if (pce_dev->ce_bam_info.minor_version == 0) { + if (preq_info->mode == QCE_MODE_CBC) { + if (preq_info->dir == QCE_DECRYPT) + memcpy(iv, (char *)preq_info->dec_iv, + sizeof(iv)); + else + memcpy(iv, (unsigned char *) + (sg_virt(areq->src) + + areq->src->length - 16), + sizeof(iv)); + } + if ((preq_info->mode == QCE_MODE_CTR) || + (preq_info->mode == QCE_MODE_XTS)) { + uint32_t num_blk = 0; + uint32_t cntr_iv3 = 0; + unsigned long long cntr_iv64 = 0; + unsigned char *b = (unsigned char *)(&cntr_iv3); + + memcpy(iv, areq->iv, sizeof(iv)); + if (preq_info->mode != QCE_MODE_XTS) + num_blk = areq->cryptlen/16; + else + num_blk = 1; + cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) | + (((*(iv + 13)) << 16) & 0xff0000) | + (((*(iv + 14)) << 8) & 0xff00) | + (*(iv + 15) & 0xff); + cntr_iv64 = + (((unsigned long long)cntr_iv3 & + 0xFFFFFFFFULL) + + (unsigned long long)num_blk) % + (unsigned long long)(0x100000000ULL); + + cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF); + *(iv + 15) = (char)(*b); + *(iv + 14) = (char)(*(b + 1)); + *(iv + 13) = (char)(*(b + 2)); + *(iv + 12) = (char)(*(b + 3)); + } + } else { + memcpy(iv, + (char *)(pce_sps_data->result->encr_cntr_iv), + sizeof(iv)); + } + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, iv, result_status); + } + return 0; +} + +static int _f8_complete(struct qce_device *pce_dev, int req_info) +{ + int32_t result_status = 0; + uint32_t result_dump_status; + uint32_t result_dump_status2; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + qce_comp_func_ptr_t qce_callback; + void *areq; + + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + qce_callback = preq_info->qce_cb; + areq = preq_info->areq; + if (preq_info->phy_ota_dst) + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, + preq_info->ota_size, DMA_FROM_DEVICE); + if (preq_info->phy_ota_src) + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + preq_info->ota_size, (preq_info->phy_ota_dst) ? + DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + + if (_qce_unlock_other_pipes(pce_dev, req_info)) { + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, -ENXIO); + return -ENXIO; + } + result_dump_status = be32_to_cpu(pce_sps_data->result->status); + result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2); + + if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR)))) { + pr_err( + "f8 oper error. Dump Sta %x Sta2 %x req %d\n", + result_dump_status, result_dump_status2, req_info); + result_status = -ENXIO; + } else if (pce_sps_data->consumer_status | + pce_sps_data->producer_status) { + pr_err("f8 sps operation error. sps status %x %x\n", + pce_sps_data->consumer_status, + pce_sps_data->producer_status); + result_status = -ENXIO; + } + pce_sps_data->result->status = 0; + pce_sps_data->result->status2 = 0; + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, result_status); + return 0; +} + +static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info) +{ + struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info] + .ce_sps; + pce_sps_data->in_transfer.iovec_count = 0; + pce_sps_data->out_transfer.iovec_count = 0; +} + +static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag) +{ + struct sps_iovec *iovec; + + if (sps_bam_pipe->iovec_count == 0) + return; + iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1); + iovec->flags |= flag; +} + +static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len, + struct sps_transfer *sps_bam_pipe) +{ + struct sps_iovec *iovec = sps_bam_pipe->iovec + + sps_bam_pipe->iovec_count; + uint32_t data_cnt; + + while (len > 0) { + if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { + pr_err("Num of descrptor %d exceed max (%d)\n", + sps_bam_pipe->iovec_count, + (uint32_t)QCE_MAX_NUM_DSCR); + return -ENOMEM; + } + if (len > SPS_MAX_PKT_SIZE) + data_cnt = SPS_MAX_PKT_SIZE; + else + data_cnt = len; + iovec->size = data_cnt; + iovec->addr = SPS_GET_LOWER_ADDR(paddr); + iovec->flags = SPS_GET_UPPER_ADDR(paddr); + sps_bam_pipe->iovec_count++; + iovec++; + paddr += data_cnt; + len -= data_cnt; + } + return 0; +} + +static int _qce_sps_add_sg_data(struct qce_device *pce_dev, + struct scatterlist *sg_src, uint32_t nbytes, + struct sps_transfer *sps_bam_pipe) +{ + uint32_t data_cnt, len; + dma_addr_t addr; + struct sps_iovec *iovec = sps_bam_pipe->iovec + + sps_bam_pipe->iovec_count; + + while (nbytes > 0 && sg_src) { + len = min(nbytes, sg_dma_len(sg_src)); + nbytes -= len; + addr = sg_dma_address(sg_src); + if (pce_dev->ce_bam_info.minor_version == 0) + len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size); + while (len > 0) { + if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { + pr_err("Num of descrptor %d exceed max (%d)\n", + sps_bam_pipe->iovec_count, + (uint32_t)QCE_MAX_NUM_DSCR); + return -ENOMEM; + } + if (len > SPS_MAX_PKT_SIZE) { + data_cnt = SPS_MAX_PKT_SIZE; + iovec->size = data_cnt; + iovec->addr = SPS_GET_LOWER_ADDR(addr); + iovec->flags = SPS_GET_UPPER_ADDR(addr); + } else { + data_cnt = len; + iovec->size = data_cnt; + iovec->addr = SPS_GET_LOWER_ADDR(addr); + iovec->flags = SPS_GET_UPPER_ADDR(addr); + } + iovec++; + sps_bam_pipe->iovec_count++; + addr += data_cnt; + len -= data_cnt; + } + sg_src = sg_next(sg_src); + } + return 0; +} + +static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev, + struct scatterlist *sg_src, uint32_t nbytes, uint32_t off, + struct sps_transfer *sps_bam_pipe) +{ + uint32_t data_cnt, len; + dma_addr_t addr; + struct sps_iovec *iovec = sps_bam_pipe->iovec + + sps_bam_pipe->iovec_count; + unsigned int res_within_sg; + + if (!sg_src) + return -ENOENT; + res_within_sg = sg_dma_len(sg_src); + + while (off > 0) { + if (!sg_src) { + pr_err("broken sg list off %d nbytes %d\n", + off, nbytes); + return -ENOENT; + } + len = sg_dma_len(sg_src); + if (off < len) { + res_within_sg = len - off; + break; + } + off -= len; + sg_src = sg_next(sg_src); + if (sg_src) + res_within_sg = sg_dma_len(sg_src); + } + while (nbytes > 0 && sg_src) { + len = min(nbytes, res_within_sg); + nbytes -= len; + addr = sg_dma_address(sg_src) + off; + if (pce_dev->ce_bam_info.minor_version == 0) + len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size); + while (len > 0) { + if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { + pr_err("Num of descrptor %d exceed max (%d)\n", + sps_bam_pipe->iovec_count, + (uint32_t)QCE_MAX_NUM_DSCR); + return -ENOMEM; + } + if (len > SPS_MAX_PKT_SIZE) { + data_cnt = SPS_MAX_PKT_SIZE; + iovec->size = data_cnt; + iovec->addr = SPS_GET_LOWER_ADDR(addr); + iovec->flags = SPS_GET_UPPER_ADDR(addr); + } else { + data_cnt = len; + iovec->size = data_cnt; + iovec->addr = SPS_GET_LOWER_ADDR(addr); + iovec->flags = SPS_GET_UPPER_ADDR(addr); + } + iovec++; + sps_bam_pipe->iovec_count++; + addr += data_cnt; + len -= data_cnt; + } + if (nbytes) { + sg_src = sg_next(sg_src); + if (!sg_src) { + pr_err("more data bytes %d\n", nbytes); + return -ENOMEM; + } + res_within_sg = sg_dma_len(sg_src); + off = 0; + } + } + return 0; +} + +static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag, + struct qce_cmdlist_info *cmdptr, + struct sps_transfer *sps_bam_pipe) +{ + dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist); + struct sps_iovec *iovec = sps_bam_pipe->iovec + + sps_bam_pipe->iovec_count; + iovec->size = cmdptr->size; + iovec->addr = SPS_GET_LOWER_ADDR(paddr); + iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag; + sps_bam_pipe->iovec_count++; + if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) { + pr_err("Num of descrptor %d exceed max (%d)\n", + sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR); + return -ENOMEM; + } + return 0; +} + +static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info) +{ + int rc = 0; + struct ce_sps_data *pce_sps_data; + + pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; + pce_sps_data->out_transfer.user = + (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT | + (unsigned int) req_info)); + pce_sps_data->in_transfer.user = + (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT | + (unsigned int) req_info)); + _qce_dump_descr_fifos_dbg(pce_dev, req_info); + + if (pce_sps_data->in_transfer.iovec_count) { + rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe, + &pce_sps_data->in_transfer); + if (rc) { + pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_bam_info.consumer.pipe, + rc); + goto ret; + } + } + rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, + &pce_sps_data->out_transfer); + if (rc) + pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc); +ret: + if (rc) + _qce_dump_descr_fifos(pce_dev, req_info); + return rc; +} + +/** + * Allocate and Connect a CE peripheral's SPS endpoint + * + * This function allocates endpoint context and + * connect it with memory endpoint by calling + * appropriate SPS driver APIs. + * + * Also registers a SPS callback function with + * SPS driver + * + * This function should only be called once typically + * during driver probe. + * + * @pce_dev - Pointer to qce_device structure + * @ep - Pointer to sps endpoint data structure + * @is_produce - 1 means Producer endpoint + * 0 means Consumer endpoint + * + * @return - 0 if successful else negative value. + * + */ +static int qce_sps_init_ep_conn(struct qce_device *pce_dev, + struct qce_sps_ep_conn_data *ep, + bool is_producer) +{ + int rc = 0; + struct sps_pipe *sps_pipe_info; + struct sps_connect *sps_connect_info = &ep->connect; + struct sps_register_event *sps_event = &ep->event; + + /* Allocate endpoint context */ + sps_pipe_info = sps_alloc_endpoint(); + if (!sps_pipe_info) { + pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n", + is_producer); + rc = -ENOMEM; + goto out; + } + /* Now save the sps pipe handle */ + ep->pipe = sps_pipe_info; + + /* Get default connection configuration for an endpoint */ + rc = sps_get_config(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto get_config_err; + } + + /* Modify the default connection configuration */ + if (is_producer) { + /* + * For CE producer transfer, source should be + * CE peripheral where as destination should + * be system memory. + */ + sps_connect_info->source = pce_dev->ce_bam_info.bam_handle; + sps_connect_info->destination = SPS_DEV_HANDLE_MEM; + /* Producer pipe will handle this connection */ + sps_connect_info->mode = SPS_MODE_SRC; + sps_connect_info->options = + SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE; + } else { + /* For CE consumer transfer, source should be + * system memory where as destination should + * CE peripheral + */ + sps_connect_info->source = SPS_DEV_HANDLE_MEM; + sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle; + sps_connect_info->mode = SPS_MODE_DEST; + sps_connect_info->options = + SPS_O_AUTO_ENABLE; + } + + /* Producer pipe index */ + sps_connect_info->src_pipe_index = + pce_dev->ce_bam_info.src_pipe_index; + /* Consumer pipe index */ + sps_connect_info->dest_pipe_index = + pce_dev->ce_bam_info.dest_pipe_index; + /* Set pipe group */ + sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index; + sps_connect_info->event_thresh = 0x10; + /* + * Max. no of scatter/gather buffers that can + * be passed by block layer = 32 (NR_SG). + * Each BAM descritor needs 64 bits (8 bytes). + * One BAM descriptor is required per buffer transfer. + * So we would require total 256 (32 * 8) bytes of descriptor FIFO. + * But due to HW limitation we need to allocate atleast one extra + * descriptor memory (256 bytes + 8 bytes). But in order to be + * in power of 2, we are allocating 512 bytes of memory. + */ + sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ * + sizeof(struct sps_iovec); + if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE) + sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE; + sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev, + sps_connect_info->desc.size, + &sps_connect_info->desc.phys_base, + GFP_KERNEL | __GFP_ZERO); + if (sps_connect_info->desc.base == NULL) { + rc = -ENOMEM; + pr_err("Can not allocate coherent memory for sps data\n"); + goto get_config_err; + } + + /* Establish connection between peripheral and memory endpoint */ + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto sps_connect_err; + } + + sps_event->mode = SPS_TRIGGER_CALLBACK; + sps_event->xfer_done = NULL; + sps_event->user = (void *)pce_dev; + if (is_producer) { + sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE; + sps_event->callback = _sps_producer_callback; + rc = sps_register_event(ep->pipe, sps_event); + if (rc) { + pr_err("Producer callback registration failed rc=%d\n", + rc); + goto sps_connect_err; + } + } else { + sps_event->options = SPS_O_EOT; + sps_event->callback = NULL; + } + + pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n", + is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)", + (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base); + goto out; + +sps_connect_err: + dma_free_coherent(pce_dev->pdev, + sps_connect_info->desc.size, + sps_connect_info->desc.base, + sps_connect_info->desc.phys_base); +get_config_err: + sps_free_endpoint(sps_pipe_info); +out: + return rc; +} + +/** + * Disconnect and Deallocate a CE peripheral's SPS endpoint + * + * This function disconnect endpoint and deallocates + * endpoint context. + * + * This function should only be called once typically + * during driver remove. + * + * @pce_dev - Pointer to qce_device structure + * @ep - Pointer to sps endpoint data structure + * + */ +static void qce_sps_exit_ep_conn(struct qce_device *pce_dev, + struct qce_sps_ep_conn_data *ep) +{ + struct sps_pipe *sps_pipe_info = ep->pipe; + struct sps_connect *sps_connect_info = &ep->connect; + + sps_disconnect(sps_pipe_info); + dma_free_coherent(pce_dev->pdev, + sps_connect_info->desc.size, + sps_connect_info->desc.base, + sps_connect_info->desc.phys_base); + sps_free_endpoint(sps_pipe_info); +} + +static void qce_sps_release_bam(struct qce_device *pce_dev) +{ + struct bam_registration_info *pbam; + + mutex_lock(&bam_register_lock); + pbam = pce_dev->pbam; + if (pbam == NULL) + goto ret; + + pbam->cnt--; + if (pbam->cnt > 0) + goto ret; + + if (pce_dev->ce_bam_info.bam_handle) { + sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle); + + pr_debug("deregister bam handle 0x%lx\n", + pce_dev->ce_bam_info.bam_handle); + pce_dev->ce_bam_info.bam_handle = 0; + } + iounmap(pbam->bam_iobase); + pr_debug("delete bam 0x%x\n", pbam->bam_mem); + list_del(&pbam->qlist); + kfree(pbam); + +ret: + pce_dev->pbam = NULL; + mutex_unlock(&bam_register_lock); +} + +static int qce_sps_get_bam(struct qce_device *pce_dev) +{ + int rc = 0; + struct sps_bam_props bam = {0}; + struct bam_registration_info *pbam = NULL; + struct bam_registration_info *p; + uint32_t bam_cfg = 0; + + + mutex_lock(&bam_register_lock); + + list_for_each_entry(p, &qce50_bam_list, qlist) { + if (p->bam_mem == pce_dev->bam_mem) { + pbam = p; /* found */ + break; + } + } + + if (pbam) { + pr_debug("found bam 0x%x\n", pbam->bam_mem); + pbam->cnt++; + pce_dev->ce_bam_info.bam_handle = pbam->handle; + pce_dev->ce_bam_info.bam_mem = pbam->bam_mem; + pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase; + pce_dev->pbam = pbam; + pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; + goto ret; + } + + pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL); + if (!pbam) { + rc = -ENOMEM; + goto ret; + } + pbam->cnt = 1; + pbam->bam_mem = pce_dev->bam_mem; + pbam->bam_iobase = ioremap(pce_dev->bam_mem, + pce_dev->bam_mem_size); + if (!pbam->bam_iobase) { + kfree(pbam); + rc = -ENOMEM; + pr_err("Can not map BAM io memory\n"); + goto ret; + } + pce_dev->ce_bam_info.bam_mem = pbam->bam_mem; + pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase; + pbam->handle = 0; + pr_debug("allocate bam 0x%x\n", pbam->bam_mem); + bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase + + CRYPTO_BAM_CNFG_BITS_REG); + pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ? + true : false; + if (!pbam->support_cmd_dscr) { + pr_info("qce50 don't support command descriptor. bam_cfg%x\n", + bam_cfg); + pce_dev->no_get_around = false; + } + pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; + + bam.phys_addr = pce_dev->ce_bam_info.bam_mem; + bam.virt_addr = pce_dev->ce_bam_info.bam_iobase; + + /* + * This event threshold value is only significant for BAM-to-BAM + * transfer. It's ignored for BAM-to-System mode transfer. + */ + bam.event_threshold = 0x10; /* Pipe event threshold */ + /* + * This threshold controls when the BAM publish + * the descriptor size on the sideband interface. + * SPS HW will only be used when + * data transfer size > 64 bytes. + */ + bam.summing_threshold = 64; + /* SPS driver wll handle the crypto BAM IRQ */ + bam.irq = (u32)pce_dev->ce_bam_info.bam_irq; + /* + * Set flag to indicate BAM global device control is managed + * remotely. + */ + if (!pce_dev->support_cmd_dscr || pce_dev->is_shared) + bam.manage = SPS_BAM_MGR_DEVICE_REMOTE; + else + bam.manage = SPS_BAM_MGR_LOCAL; + + bam.ee = pce_dev->ce_bam_info.bam_ee; + bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL; + bam.options |= SPS_BAM_CACHED_WP; + pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr); + pr_debug("bam virtual base=0x%pK\n", bam.virt_addr); + + /* Register CE Peripheral BAM device to SPS driver */ + rc = sps_register_bam_device(&bam, &pbam->handle); + if (rc) { + pr_err("sps_register_bam_device() failed! err=%d\n", rc); + rc = -EIO; + iounmap(pbam->bam_iobase); + kfree(pbam); + goto ret; + } + + pce_dev->pbam = pbam; + list_add_tail(&pbam->qlist, &qce50_bam_list); + pce_dev->ce_bam_info.bam_handle = pbam->handle; + +ret: + mutex_unlock(&bam_register_lock); + + return rc; +} +/** + * Initialize SPS HW connected with CE core + * + * This function register BAM HW resources with + * SPS driver and then initialize 2 SPS endpoints + * + * This function should only be called once typically + * during driver probe. + * + * @pce_dev - Pointer to qce_device structure + * + * @return - 0 if successful else negative value. + * + */ +static int qce_sps_init(struct qce_device *pce_dev) +{ + int rc = 0; + + rc = qce_sps_get_bam(pce_dev); + if (rc) + return rc; + pr_debug("BAM device registered. bam_handle=0x%lx\n", + pce_dev->ce_bam_info.bam_handle); + + rc = qce_sps_init_ep_conn(pce_dev, + &pce_dev->ce_bam_info.producer, true); + if (rc) + goto sps_connect_producer_err; + rc = qce_sps_init_ep_conn(pce_dev, + &pce_dev->ce_bam_info.consumer, false); + if (rc) + goto sps_connect_consumer_err; + + pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n", + (unsigned long long)pce_dev->ce_bam_info.bam_mem, + (unsigned int)pce_dev->ce_bam_info.bam_irq); + return rc; + +sps_connect_consumer_err: + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); +sps_connect_producer_err: + qce_sps_release_bam(pce_dev); + return rc; +} + +static inline int qce_alloc_req_info(struct qce_device *pce_dev) +{ + int i; + int request_index = pce_dev->ce_request_index; + + for (i = 0; i < MAX_QCE_BAM_REQ; i++) { + request_index++; + if (request_index >= MAX_QCE_BAM_REQ) + request_index = 0; + if (!atomic_xchg( + &pce_dev->ce_request_info[request_index].in_use, + true)) { + pce_dev->ce_request_index = request_index; + return request_index; + } + } + pr_warn("pcedev %d no reqs available no_of_queued_req %d\n", + pce_dev->dev_no, atomic_read( + &pce_dev->no_of_queued_req)); + return -EBUSY; +} + +static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, + bool is_complete) +{ + pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST; + if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use, + false)) { + if (req_info < MAX_QCE_BAM_REQ && is_complete) + atomic_dec(&pce_dev->no_of_queued_req); + } else + pr_warn("request info %d free already\n", req_info); +} + +static void print_notify_debug(struct sps_event_notify *notify) +{ + phys_addr_t addr = + DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags, + notify->data.transfer.iovec.addr); + pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n", + notify->event_id, &addr, + notify->data.transfer.iovec.size, + notify->data.transfer.iovec.flags, + notify->data.transfer.user); +} + +static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info) +{ + struct ce_request_info *preq_info; + + preq_info = &pce_dev->ce_request_info[req_info]; + + switch (preq_info->xfer_type) { + case QCE_XFER_CIPHERING: + _ablk_cipher_complete(pce_dev, req_info); + break; + case QCE_XFER_HASHING: + _sha_complete(pce_dev, req_info); + break; + case QCE_XFER_AEAD: + _aead_complete(pce_dev, req_info); + break; + case QCE_XFER_F8: + _f8_complete(pce_dev, req_info); + break; + case QCE_XFER_F9: + _f9_complete(pce_dev, req_info); + break; + default: + qce_free_req_info(pce_dev, req_info, true); + break; + } +} + +static void qce_multireq_timeout(struct timer_list *data) +{ + struct qce_device *pce_dev = from_timer(pce_dev, data, timer); + int ret = 0; + int last_seq; + unsigned long flags; + + last_seq = atomic_read(&pce_dev->bunch_cmd_seq); + if (last_seq == 0 || + last_seq != atomic_read(&pce_dev->last_intr_seq)) { + atomic_set(&pce_dev->last_intr_seq, last_seq); + mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES)); + return; + } + /* last bunch mode command time out */ + + /* + * From here to dummy request finish sps request and set owner back + * to none, we disable interrupt. + * So it won't get preempted or interrupted. If bam inerrupts happen + * between, and completion callback gets called from BAM, a new + * request may be issued by the client driver. Deadlock may happen. + */ + local_irq_save(flags); + if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT) + != QCE_OWNER_NONE) { + local_irq_restore(flags); + mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES)); + return; + } + + ret = qce_dummy_req(pce_dev); + if (ret) + pr_warn("pcedev %d: Failed to insert dummy req\n", + pce_dev->dev_no); + cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE); + pce_dev->mode = IN_INTERRUPT_MODE; + local_irq_restore(flags); + + del_timer(&(pce_dev->timer)); + pce_dev->qce_stats.no_of_timeouts++; + pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no); +} + +void qce_get_driver_stats(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (!_qce50_disp_stats) + return; + pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no, + pce_dev->qce_stats.no_of_timeouts); + pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no, + pce_dev->qce_stats.no_of_dummy_reqs); + if (pce_dev->mode) + pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no); + else + pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no); + pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no, + atomic_read(&pce_dev->no_of_queued_req)); +} +EXPORT_SYMBOL(qce_get_driver_stats); + +void qce_clear_driver_stats(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + pce_dev->qce_stats.no_of_timeouts = 0; + pce_dev->qce_stats.no_of_dummy_reqs = 0; +} +EXPORT_SYMBOL(qce_clear_driver_stats); + +static void _sps_producer_callback(struct sps_event_notify *notify) +{ + struct qce_device *pce_dev = (struct qce_device *) + ((struct sps_event_notify *)notify)->user; + int rc = 0; + unsigned int req_info; + struct ce_sps_data *pce_sps_data; + struct ce_request_info *preq_info; + + print_notify_debug(notify); + + req_info = (unsigned int)((uintptr_t)notify->data.transfer.user); + if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) { + pr_warn("request information %d out of range\n", req_info); + return; + } + + req_info = req_info & 0x00ff; + if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) { + pr_warn("request information %d out of range\n", req_info); + return; + } + + preq_info = &pce_dev->ce_request_info[req_info]; + + pce_sps_data = &preq_info->ce_sps; + if ((preq_info->xfer_type == QCE_XFER_CIPHERING || + preq_info->xfer_type == QCE_XFER_AEAD) && + pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) { + pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_sps_data->out_transfer.iovec_count = 0; + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer); + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, + &pce_sps_data->out_transfer); + if (rc) { + pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_bam_info.producer.pipe, + rc); + } + return; + } + + _qce_req_complete(pce_dev, req_info); +} + +/** + * De-initialize SPS HW connected with CE core + * + * This function deinitialize SPS endpoints and then + * deregisters BAM resources from SPS driver. + * + * This function should only be called once typically + * during driver remove. + * + * @pce_dev - Pointer to qce_device structure + * + */ +static void qce_sps_exit(struct qce_device *pce_dev) +{ + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer); + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); + qce_sps_release_bam(pce_dev); +} + +static void qce_add_cmd_element(struct qce_device *pdev, + struct sps_command_element **cmd_ptr, u32 addr, + u32 data, struct sps_command_element **populate) +{ + (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase); + (*cmd_ptr)->command = 0; + (*cmd_ptr)->data = data; + (*cmd_ptr)->mask = 0xFFFFFFFF; + (*cmd_ptr)->reserved = 0; + if (populate != NULL) + *populate = *cmd_ptr; + (*cmd_ptr)++; +} + +static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, enum qce_cipher_mode_enum mode, + bool key_128) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t encr_cfg = 0; + uint32_t key_reg = 0; + uint32_t xts_key_reg = 0; + uint32_t iv_reg = 0; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + ce_vaddr_start = (uintptr_t)(*pvaddr); + /* + * Designate chunks of the allocated memory to various + * command list pointers related to AES cipher operations defined + * in ce_cmdlistptrs_ops structure. + */ + switch (mode) { + case QCE_MODE_CBC: + case QCE_MODE_CTR: + if (key_128) { + cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr); + if (mode == QCE_MODE_CBC) + encr_cfg = pdev->reg.encr_cfg_aes_cbc_128; + else + encr_cfg = pdev->reg.encr_cfg_aes_ctr_128; + iv_reg = 4; + key_reg = 4; + xts_key_reg = 0; + } else { + cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr); + + if (mode == QCE_MODE_CBC) + encr_cfg = pdev->reg.encr_cfg_aes_cbc_256; + else + encr_cfg = pdev->reg.encr_cfg_aes_ctr_256; + iv_reg = 4; + key_reg = 8; + xts_key_reg = 0; + } + break; + case QCE_MODE_ECB: + if (key_128) { + cmdlistptr->cipher_aes_128_ecb.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_128_ecb); + + encr_cfg = pdev->reg.encr_cfg_aes_ecb_128; + iv_reg = 0; + key_reg = 4; + xts_key_reg = 0; + } else { + cmdlistptr->cipher_aes_256_ecb.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_256_ecb); + + encr_cfg = pdev->reg.encr_cfg_aes_ecb_256; + iv_reg = 0; + key_reg = 8; + xts_key_reg = 0; + } + break; + case QCE_MODE_XTS: + if (key_128) { + cmdlistptr->cipher_aes_128_xts.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_128_xts); + + encr_cfg = pdev->reg.encr_cfg_aes_xts_128; + iv_reg = 4; + key_reg = 4; + xts_key_reg = 4; + } else { + cmdlistptr->cipher_aes_256_xts.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_aes_256_xts); + + encr_cfg = pdev->reg.encr_cfg_aes_xts_256; + iv_reg = 4; + key_reg = 8; + xts_key_reg = 8; + } + break; + default: + pr_err("Unknown mode of operation %d received, exiting now\n", + mode); + return -EINVAL; + break; + } + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, + &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + &pcl_info->encr_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + &pcl_info->encr_seg_start); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, + (uint32_t)0xffffffff, &pcl_info->encr_mask); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, + &pcl_info->auth_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, + &pcl_info->encr_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + if (xts_key_reg) { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG, + 0, &pcl_info->encr_xts_key); + for (i = 1; i < xts_key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_XTS_KEY0_REG + + i * sizeof(uint32_t)), 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, + CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, + &pcl_info->encr_xts_du_size); + } + if (iv_reg) { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, + &pcl_info->encr_cntr_iv); + for (i = 1; i < iv_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), + 0, NULL); + } + /* Add dummy to align size to burst-size multiple */ + if (mode == QCE_MODE_XTS) { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, + 0, &pcl_info->auth_seg_size); + } else { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, + 0, &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, + 0, &pcl_info->auth_seg_size); + } + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, enum qce_cipher_alg_enum alg, + bool mode_cbc) +{ + + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t encr_cfg = 0; + uint32_t key_reg = 0; + uint32_t iv_reg = 0; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + ce_vaddr_start = (uintptr_t)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to cipher operations defined + * in ce_cmdlistptrs_ops structure. + */ + switch (alg) { + case CIPHER_ALG_DES: + if (mode_cbc) { + cmdlistptr->cipher_des_cbc.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_des_cbc); + + + encr_cfg = pdev->reg.encr_cfg_des_cbc; + iv_reg = 2; + key_reg = 2; + } else { + cmdlistptr->cipher_des_ecb.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_des_ecb); + + encr_cfg = pdev->reg.encr_cfg_des_ecb; + iv_reg = 0; + key_reg = 2; + } + break; + case CIPHER_ALG_3DES: + if (mode_cbc) { + cmdlistptr->cipher_3des_cbc.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_3des_cbc); + + encr_cfg = pdev->reg.encr_cfg_3des_cbc; + iv_reg = 2; + key_reg = 6; + } else { + cmdlistptr->cipher_3des_ecb.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_3des_ecb); + + encr_cfg = pdev->reg.encr_cfg_3des_ecb; + iv_reg = 0; + key_reg = 6; + } + break; + default: + pr_err("Unknown algorithms %d received, exiting now\n", alg); + return -EINVAL; + break; + } + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, + &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + &pcl_info->encr_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + &pcl_info->encr_seg_start); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, + &pcl_info->auth_seg_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, + &pcl_info->encr_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + if (iv_reg) { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, + &pcl_info->encr_cntr_iv); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0, + NULL); + } + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev, + int cri_index, unsigned char **pvaddr) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info + [cri_index].ce_sps.cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr_start = (uintptr_t)(*pvaddr); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + + cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->cipher_null); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, + pdev->ce_bam_info.ce_burst_size, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, + pdev->reg.encr_cfg_aes_ecb_128, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, + NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + return 0; +} + +static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, enum qce_hash_alg_enum alg, + bool key_128) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t key_reg = 0; + uint32_t auth_cfg = 0; + uint32_t iv_reg = 0; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr_start = (uintptr_t)(*pvaddr); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to authentication operations + * defined in ce_cmdlistptrs_ops structure. + */ + switch (alg) { + case QCE_HASH_SHA1: + cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_sha1); + + auth_cfg = pdev->reg.auth_cfg_sha1; + iv_reg = 5; + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + break; + case QCE_HASH_SHA256: + cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_sha256); + + auth_cfg = pdev->reg.auth_cfg_sha256; + iv_reg = 8; + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + /* 1 dummy write */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, + 0, NULL); + break; + case QCE_HASH_SHA1_HMAC: + cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_sha1_hmac); + + auth_cfg = pdev->reg.auth_cfg_hmac_sha1; + key_reg = 16; + iv_reg = 5; + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + break; + case QCE_HASH_SHA256_HMAC: + cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_sha256_hmac); + + auth_cfg = pdev->reg.auth_cfg_hmac_sha256; + key_reg = 16; + iv_reg = 8; + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, + NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + /* 1 dummy write */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, + 0, NULL); + break; + case QCE_HASH_AES_CMAC: + if (key_128) { + cmdlistptr->auth_aes_128_cmac.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_aes_128_cmac); + + auth_cfg = pdev->reg.auth_cfg_cmac_128; + key_reg = 4; + } else { + cmdlistptr->auth_aes_256_cmac.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->auth_aes_256_cmac); + + auth_cfg = pdev->reg.auth_cfg_cmac_256; + key_reg = 8; + } + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, + NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + /* 1 dummy write */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, + 0, NULL); + break; + default: + pr_err("Unknown algorithms %d received, exiting now\n", alg); + return -EINVAL; + break; + } + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, + &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, + auth_cfg, &pcl_info->auth_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, + &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, + &pcl_info->auth_seg_start); + + if (alg == QCE_HASH_AES_CMAC) { + /* reset auth iv, bytecount and key registers */ + for (i = 0; i < 16; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)), + 0, NULL); + for (i = 0; i < 16; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, + 0, NULL); + } else { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, + &pcl_info->auth_iv); + for (i = 1; i < iv_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, + 0, &pcl_info->auth_bytecount); + } + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); + + if (key_reg) { + qce_add_cmd_element(pdev, &ce_vaddr, + CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), + 0, NULL); + } + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_aead_cmdlistptrs(struct qce_device *pdev, + int cri_index, + unsigned char **pvaddr, + uint32_t alg, + uint32_t mode, + uint32_t key_size, + bool sha1) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmd; + struct qce_cmdlist_info *pcl_info = NULL; + uint32_t key_reg; + uint32_t iv_reg; + uint32_t i; + uint32_t enciv_in_word; + uint32_t encr_cfg; + + cmd = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + + ce_vaddr_start = (uintptr_t)(*pvaddr); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + + switch (alg) { + + case CIPHER_ALG_DES: + + switch (mode) { + + case QCE_MODE_CBC: + if (sha1) { + cmd->aead_hmac_sha1_cbc_des.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha1_cbc_des); + } else { + cmd->aead_hmac_sha256_cbc_des.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha256_cbc_des); + } + encr_cfg = pdev->reg.encr_cfg_des_cbc; + break; + default: + return -EINVAL; + } + + enciv_in_word = 2; + + break; + + case CIPHER_ALG_3DES: + switch (mode) { + + case QCE_MODE_CBC: + if (sha1) { + cmd->aead_hmac_sha1_cbc_3des.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha1_cbc_3des); + } else { + cmd->aead_hmac_sha256_cbc_3des.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha256_cbc_3des); + } + encr_cfg = pdev->reg.encr_cfg_3des_cbc; + break; + default: + return -EINVAL; + } + + enciv_in_word = 2; + + break; + + case CIPHER_ALG_AES: + switch (mode) { + + case QCE_MODE_CBC: + if (key_size == AES128_KEY_SIZE) { + if (sha1) { + cmd->aead_hmac_sha1_cbc_aes_128.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha1_cbc_aes_128); + } else { + cmd->aead_hmac_sha256_cbc_aes_128.cmdlist + = (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha256_cbc_aes_128); + } + encr_cfg = pdev->reg.encr_cfg_aes_cbc_128; + } else if (key_size == AES256_KEY_SIZE) { + if (sha1) { + cmd->aead_hmac_sha1_cbc_aes_256.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha1_cbc_aes_256); + } else { + cmd->aead_hmac_sha256_cbc_aes_256.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = + &(cmd->aead_hmac_sha256_cbc_aes_256); + } + encr_cfg = pdev->reg.encr_cfg_aes_cbc_256; + } else { + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + enciv_in_word = 4; + + break; + + default: + return -EINVAL; + } + + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + + key_reg = key_size/sizeof(uint32_t); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, + &pcl_info->encr_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + + if (mode != QCE_MODE_ECB) { + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, + &pcl_info->encr_cntr_iv); + for (i = 1; i < enciv_in_word; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), + 0, NULL); + } + + if (sha1) + iv_reg = 5; + else + iv_reg = 8; + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, + &pcl_info->auth_iv); + for (i = 1; i < iv_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, + 0, &pcl_info->auth_bytecount); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); + + key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0, + &pcl_info->auth_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, + &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + &pcl_info->encr_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + &pcl_info->encr_seg_start); + + if (sha1) + qce_add_cmd_element( + pdev, + &ce_vaddr, + CRYPTO_AUTH_SEG_CFG_REG, + pdev->reg.auth_cfg_aead_sha1_hmac, + &pcl_info->auth_seg_cfg); + else + qce_add_cmd_element( + pdev, + &ce_vaddr, + CRYPTO_AUTH_SEG_CFG_REG, + pdev->reg.auth_cfg_aead_sha256_hmac, + &pcl_info->auth_seg_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, + &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, + &pcl_info->auth_seg_start); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + return 0; +} + +static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, bool key_128) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info + [cri_index].ce_sps.cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t encr_cfg = 0; + uint32_t auth_cfg = 0; + uint32_t key_reg = 0; + + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr_start = (uintptr_t)(*pvaddr); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to aead operations + * defined in ce_cmdlistptrs_ops structure. + */ + if (key_128) { + cmdlistptr->aead_aes_128_ccm.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->aead_aes_128_ccm); + + auth_cfg = pdev->reg.auth_cfg_aes_ccm_128; + encr_cfg = pdev->reg.encr_cfg_aes_ccm_128; + key_reg = 4; + } else { + + cmdlistptr->aead_aes_256_ccm.cmdlist = + (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->aead_aes_256_ccm); + + auth_cfg = pdev->reg.auth_cfg_aes_ccm_256; + encr_cfg = pdev->reg.encr_cfg_aes_ccm_256; + + key_reg = 8; + } + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, + encr_cfg, &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + &pcl_info->encr_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + &pcl_info->encr_seg_start); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, + (uint32_t)0xffffffff, &pcl_info->encr_mask); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2, + (uint32_t)0xffffffff, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, + auth_cfg, &pcl_info->auth_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, + &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, + &pcl_info->auth_seg_start); + /* reset auth iv, bytecount and key registers */ + for (i = 0; i < 8; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)), + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, + 0, NULL); + for (i = 0; i < 16; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + /* set auth key */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0, + &pcl_info->auth_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + /* set NONCE info */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0, + &pcl_info->auth_nonce_info); + for (i = 1; i < 4; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_INFO_NONCE0_REG + + i * sizeof(uint32_t)), 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, + &pcl_info->encr_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, + &pcl_info->encr_cntr_iv); + for (i = 1; i < 4; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)), + 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0, + &pcl_info->encr_ccm_cntr_iv); + for (i = 1; i < 4; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)), + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, enum qce_ota_algo_enum alg) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t encr_cfg = 0; + uint32_t key_reg = 4; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + ce_vaddr_start = (uintptr_t)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to f8 cipher algorithm defined + * in ce_cmdlistptrs_ops structure. + */ + + switch (alg) { + case QCE_OTA_ALGO_KASUMI: + cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->f8_kasumi); + encr_cfg = pdev->reg.encr_cfg_kasumi; + break; + + case QCE_OTA_ALGO_SNOW3G: + default: + cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->f8_snow3g); + encr_cfg = pdev->reg.encr_cfg_snow3g; + break; + } + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, + 0, NULL); + /* set config to big endian */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg, + &pcl_info->encr_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, + &pcl_info->encr_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, + &pcl_info->encr_seg_start); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, + &pcl_info->auth_seg_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, + 0, &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, + 0, &pcl_info->auth_seg_start); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, + &pcl_info->encr_key); + for (i = 1; i < key_reg; i++) + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)), + 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0, + &pcl_info->encr_cntr_iv); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0, + NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr, enum qce_ota_algo_enum alg) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start; + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + int i = 0; + uint32_t auth_cfg = 0; + uint32_t iv_reg = 0; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr_start = (uintptr_t)(*pvaddr); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to authentication operations + * defined in ce_cmdlistptrs_ops structure. + */ + switch (alg) { + case QCE_OTA_ALGO_KASUMI: + cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->f9_kasumi); + auth_cfg = pdev->reg.auth_cfg_kasumi; + break; + + case QCE_OTA_ALGO_SNOW3G: + default: + cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->f9_snow3g); + auth_cfg = pdev->reg.auth_cfg_snow3g; + } + + /* clear status register */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, + 0, NULL); + /* set config to big endian */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); + + iv_reg = 5; + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0, + &pcl_info->seg_size); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, + &pcl_info->encr_seg_cfg); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, + auth_cfg, &pcl_info->auth_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, + &pcl_info->auth_seg_size); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, + &pcl_info->auth_seg_start); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0, + &pcl_info->auth_iv); + for (i = 1; i < iv_reg; i++) { + qce_add_cmd_element(pdev, &ce_vaddr, + (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)), + 0, NULL); + } + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG, + 0, &pcl_info->auth_bytecount); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + pdev->reg.crypto_cfg_le, NULL); + + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, + ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | + (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); + + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev, + int cri_index, unsigned char **pvaddr) +{ + struct sps_command_element *ce_vaddr; + uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr); + struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlist_info *pcl_info = NULL; + + cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; + *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), + pdev->ce_bam_info.ce_burst_size); + ce_vaddr = (struct sps_command_element *)(*pvaddr); + cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr; + pcl_info = &(cmdlistptr->unlock_all_pipes); + + /* + * Designate chunks of the allocated memory to command list + * to unlock pipes. + */ + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, + CRYPTO_CONFIG_RESET, NULL); + pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; + *pvaddr = (unsigned char *) ce_vaddr; + + return 0; +} + +static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index, + unsigned char **pvaddr) +{ + struct sps_command_element *ce_vaddr = + (struct sps_command_element *)(*pvaddr); + /* + * Designate chunks of the allocated memory to various + * command list pointers related to operations defined + * in ce_cmdlistptrs_ops structure. + */ + ce_vaddr = + (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr), + pdev->ce_bam_info.ce_burst_size); + *pvaddr = (unsigned char *) ce_vaddr; + + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC, + true); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR, + true); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB, + true); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS, + true); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC, + false); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR, + false); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB, + false); + _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS, + false); + + _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, + true); + _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, + false); + _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, + true); + _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, + false); + + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1, + false); + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256, + false); + + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC, + false); + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC, + false); + + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC, + true); + _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC, + false); + + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, + QCE_MODE_CBC, DES_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, + QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, + QCE_MODE_CBC, AES128_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, + QCE_MODE_CBC, AES256_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, + QCE_MODE_CBC, DES_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, + QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, + QCE_MODE_CBC, AES128_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, + QCE_MODE_CBC, AES256_KEY_SIZE, false); + + _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr); + + _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true); + _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false); + _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI); + _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G); + _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI); + _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G); + _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr); + + return 0; +} + +static int qce_setup_ce_sps_data(struct qce_device *pce_dev) +{ + unsigned char *vaddr; + int i; + unsigned char *iovec_vaddr; + int iovec_memsize; + + vaddr = pce_dev->coh_vmem; + vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr), + pce_dev->ce_bam_info.ce_burst_size); + iovec_vaddr = pce_dev->iovec_vmem; + iovec_memsize = pce_dev->iovec_memsize; + for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) { + /* Allow for 256 descriptor (cmd and data) entries per pipe */ + pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec = + (struct sps_iovec *)iovec_vaddr; + pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys = + virt_to_phys( + pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec); + iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE; + iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE; + pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec = + (struct sps_iovec *)iovec_vaddr; + pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys = + virt_to_phys( + pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec); + iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE; + iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE; + if (pce_dev->support_cmd_dscr) + qce_setup_cmdlistptrs(pce_dev, i, &vaddr); + vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr), + pce_dev->ce_bam_info.ce_burst_size); + pce_dev->ce_request_info[i].ce_sps.result_dump = + (uintptr_t)vaddr; + pce_dev->ce_request_info[i].ce_sps.result_dump_phy = + GET_PHYS_ADDR((uintptr_t)vaddr); + pce_dev->ce_request_info[i].ce_sps.result = + (struct ce_result_dump_format *)vaddr; + vaddr += CRYPTO_RESULT_DUMP_SIZE; + + pce_dev->ce_request_info[i].ce_sps.result_dump_null = + (uintptr_t)vaddr; + pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy = + GET_PHYS_ADDR((uintptr_t)vaddr); + pce_dev->ce_request_info[i].ce_sps.result_null = + (struct ce_result_dump_format *)vaddr; + vaddr += CRYPTO_RESULT_DUMP_SIZE; + + pce_dev->ce_request_info[i].ce_sps.ignore_buffer = + (uintptr_t)vaddr; + vaddr += pce_dev->ce_bam_info.ce_burst_size * 2; + } + if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize || + iovec_memsize < 0) + panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n", + pce_dev->memsize, (uintptr_t)vaddr - + (uintptr_t)pce_dev->coh_vmem); + return 0; +} + +static int qce_init_ce_cfg_val(struct qce_device *pce_dev) +{ + uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1; + uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index; + + pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) | + BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) | + BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) | + (pipe_pair << CRYPTO_PIPE_SET_SELECT); + + pce_dev->reg.crypto_cfg_le = + (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK); + + /* Initialize encr_cfg register for AES alg */ + pce_dev->reg.encr_cfg_aes_cbc_128 = + (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_cbc_256 = + (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_ctr_128 = + (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_ctr_256 = + (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_xts_128 = + (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_xts_256 = + (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_ecb_128 = + (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_ecb_256 = + (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_aes_ccm_128 = + (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)| + (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); + + pce_dev->reg.encr_cfg_aes_ccm_256 = + (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) | + (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM); + + /* Initialize encr_cfg register for DES alg */ + pce_dev->reg.encr_cfg_des_ecb = + (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_des_cbc = + (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_3des_ecb = + (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + + pce_dev->reg.encr_cfg_3des_cbc = + (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) | + (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + + /* Initialize encr_cfg register for kasumi/snow3g alg */ + pce_dev->reg.encr_cfg_kasumi = + (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG); + + pce_dev->reg.encr_cfg_snow3g = + (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG); + + /* Initialize auth_cfg register for CMAC alg */ + pce_dev->reg.auth_cfg_cmac_128 = + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | + (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE); + + pce_dev->reg.auth_cfg_cmac_256 = + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | + (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE); + + /* Initialize auth_cfg register for HMAC alg */ + pce_dev->reg.auth_cfg_hmac_sha1 = + (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + pce_dev->reg.auth_cfg_hmac_sha256 = + (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + /* Initialize auth_cfg register for SHA1/256 alg */ + pce_dev->reg.auth_cfg_sha1 = + (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + pce_dev->reg.auth_cfg_sha256 = + (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + /* Initialize auth_cfg register for AEAD alg */ + pce_dev->reg.auth_cfg_aead_sha1_hmac = + (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST); + + pce_dev->reg.auth_cfg_aead_sha256_hmac = + (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) | + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) | + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST); + + pce_dev->reg.auth_cfg_aes_ccm_128 = + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | + (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) | + ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS); + pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); + + pce_dev->reg.auth_cfg_aes_ccm_256 = + (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) | + (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)| + (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) | + (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) | + ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS); + pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); + + /* Initialize auth_cfg register for kasumi/snow3g */ + pce_dev->reg.auth_cfg_kasumi = + (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) | + BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST); + pce_dev->reg.auth_cfg_snow3g = + (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) | + BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST); + return 0; +} + +static void _qce_ccm_get_around_input(struct qce_device *pce_dev, + struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir) +{ + struct qce_cmdlist_info *cmdlistinfo; + struct ce_sps_data *pce_sps_data; + + pce_sps_data = &preq_info->ce_sps; + if ((dir == QCE_DECRYPT) && pce_dev->no_get_around && + !(pce_dev->no_ccm_mac_status_get_around)) { + cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null; + _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo, + &pce_sps_data->in_transfer); + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + pce_dev->ce_bam_info.ce_burst_size, + &pce_sps_data->in_transfer); + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD); + } +} + +static void _qce_ccm_get_around_output(struct qce_device *pce_dev, + struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir) +{ + struct ce_sps_data *pce_sps_data; + + pce_sps_data = &preq_info->ce_sps; + + if ((dir == QCE_DECRYPT) && pce_dev->no_get_around && + !(pce_dev->no_ccm_mac_status_get_around)) { + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + pce_dev->ce_bam_info.ce_burst_size, + &pce_sps_data->out_transfer); + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null), + CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); + } +} + +/* QCE_DUMMY_REQ */ +static void qce_dummy_complete(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret) +{ + if (!cookie) + pr_err("invalid cookie\n"); +} + +static int qce_dummy_req(struct qce_device *pce_dev) +{ + int ret = 0; + + if (atomic_xchg( + &pce_dev->ce_request_info[DUMMY_REQ_INDEX].in_use, true)) + return -EBUSY; + ret = qce_process_sha_req(pce_dev, NULL); + pce_dev->qce_stats.no_of_dummy_reqs++; + return ret; +} + +static int select_mode(struct qce_device *pce_dev, + struct ce_request_info *preq_info) +{ + struct ce_sps_data *pce_sps_data = &preq_info->ce_sps; + unsigned int no_of_queued_req; + unsigned int cadence; + + if (!pce_dev->no_get_around) { + _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); + return 0; + } + + /* + * claim ownership of device + */ +again: + if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT) + != QCE_OWNER_NONE) { + ndelay(40); + goto again; + } + no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req); + if (pce_dev->mode == IN_INTERRUPT_MODE) { + if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) { + pce_dev->mode = IN_BUNCH_MODE; + pr_debug("pcedev %d mode switch to BUNCH\n", + pce_dev->dev_no); + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + pce_dev->intr_cadence = 0; + atomic_set(&pce_dev->bunch_cmd_seq, 1); + atomic_set(&pce_dev->last_intr_seq, 1); + mod_timer(&(pce_dev->timer), + (jiffies + DELAY_IN_JIFFIES)); + } else { + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + } + } else { + pce_dev->intr_cadence++; + cadence = (preq_info->req_len >> 7) + 1; + if (cadence > SET_INTR_AT_REQ) + cadence = SET_INTR_AT_REQ; + if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence + == cadence) && pce_dev->cadence_flag)) + atomic_inc(&pce_dev->bunch_cmd_seq); + else { + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + pce_dev->intr_cadence = 0; + atomic_set(&pce_dev->bunch_cmd_seq, 0); + atomic_set(&pce_dev->last_intr_seq, 0); + pce_dev->cadence_flag = !pce_dev->cadence_flag; + } + } + + return 0; +} + +static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) +{ + int rc = 0; + struct qce_device *pce_dev = (struct qce_device *) handle; + struct aead_request *areq = (struct aead_request *) q_req->areq; + uint32_t authsize = q_req->authsize; + uint32_t totallen_in, out_len; + uint32_t hw_pad_out = 0; + int ce_burst_size; + struct qce_cmdlist_info *cmdlistinfo = NULL; + int req_info = -1; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + + ce_burst_size = pce_dev->ce_bam_info.ce_burst_size; + totallen_in = areq->cryptlen + q_req->assoclen; + if (q_req->dir == QCE_ENCRYPT) { + q_req->cryptlen = areq->cryptlen; + out_len = areq->cryptlen + authsize; + hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize; + } else { + q_req->cryptlen = areq->cryptlen - authsize; + out_len = q_req->cryptlen; + hw_pad_out = authsize; + } + + /* + * For crypto 5.0 that has burst size alignment requirement + * for data descritpor, + * the agent above(qcrypto) prepares the src scatter list with + * memory starting with associated data, followed by + * data stream to be ciphered. + * The destination scatter list is pointing to the same + * data area as source. + */ + if (pce_dev->ce_bam_info.minor_version == 0) + preq_info->src_nents = count_sg(areq->src, totallen_in); + else + preq_info->src_nents = count_sg(areq->src, areq->cryptlen + + areq->assoclen); + + if (q_req->assoclen) { + preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen); + + /* formatted associated data input */ + qce_dma_map_sg(pce_dev->pdev, q_req->asg, + preq_info->assoc_nents, DMA_TO_DEVICE); + preq_info->asg = q_req->asg; + } else { + preq_info->assoc_nents = 0; + preq_info->asg = NULL; + } + /* cipher input */ + qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + /* cipher + mac output for encryption */ + if (areq->src != areq->dst) { + /* + * The destination scatter list is pointing to the same + * data area as src. + * Note, the associated data will be pass-through + * at the beginning of destination area. + */ + preq_info->dst_nents = count_sg(areq->dst, + out_len + areq->assoclen); + qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + DMA_FROM_DEVICE); + } else { + preq_info->dst_nents = preq_info->src_nents; + } + + if (pce_dev->support_cmd_dscr) { + cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info, + q_req); + if (cmdlistinfo == NULL) { + pr_err("Unsupported cipher algorithm %d, mode %d\n", + q_req->alg, q_req->mode); + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + /* set up crypto device */ + rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, + q_req->assoclen, cmdlistinfo); + } else { + /* set up crypto device */ + rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in, + q_req->assoclen); + } + + if (rc < 0) + goto bad; + + preq_info->mode = q_req->mode; + + /* setup for callback, and issue command to bam */ + preq_info->areq = q_req->areq; + preq_info->qce_cb = q_req->qce_cb; + preq_info->dir = q_req->dir; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_AEAD; + preq_info->req_len = totallen_in; + + _qce_sps_iovec_count_init(pce_dev, req_info); + + if (pce_dev->support_cmd_dscr && cmdlistinfo) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + + if (pce_dev->ce_bam_info.minor_version == 0) { + goto bad; + } else { + if (q_req->assoclen && (_qce_sps_add_sg_data( + pce_dev, q_req->asg, q_req->assoclen, + &pce_sps_data->in_transfer))) + goto bad; + if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen, + areq->assoclen, + &pce_sps_data->in_transfer)) + goto bad; + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir); + + if (pce_dev->no_get_around) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + /* Pass through to ignore associated data*/ + if (_qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + q_req->assoclen, + &pce_sps_data->out_transfer)) + goto bad; + if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len, + areq->assoclen, + &pce_sps_data->out_transfer)) + goto bad; + /* Pass through to ignore hw_pad (padding of the MAC data) */ + if (_qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + hw_pad_out, &pce_sps_data->out_transfer)) + goto bad; + if (pce_dev->no_get_around || + totallen_in <= SPS_MAX_PKT_SIZE) { + if (_qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer)) + goto bad; + pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + } else { + pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + } + + _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir); + + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + } + if (rc) + goto bad; + return 0; + +bad: + if (preq_info->assoc_nents) { + qce_dma_unmap_sg(pce_dev->pdev, q_req->asg, + preq_info->assoc_nents, DMA_TO_DEVICE); + } + if (preq_info->src_nents) { + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + } + if (areq->src != areq->dst) { + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + DMA_FROM_DEVICE); + } + qce_free_req_info(pce_dev, req_info, false); + return rc; +} + +static int _qce_suspend(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *)handle; + struct sps_pipe *sps_pipe_info; + + if (handle == NULL) + return -ENODEV; + + sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; + sps_disconnect(sps_pipe_info); + + sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; + sps_disconnect(sps_pipe_info); + + return 0; +} + +static int _qce_resume(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *)handle; + struct sps_pipe *sps_pipe_info; + struct sps_connect *sps_connect_info; + int rc; + + if (handle == NULL) + return -ENODEV; + + sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; + sps_connect_info = &pce_dev->ce_bam_info.consumer.connect; + memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + return rc; + } + sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; + sps_connect_info = &pce_dev->ce_bam_info.producer.connect; + memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) + pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + + rc = sps_register_event(sps_pipe_info, + &pce_dev->ce_bam_info.producer.event); + if (rc) + pr_err("Producer callback registration failed rc = %d\n", rc); + + return rc; +} + +struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume}; +EXPORT_SYMBOL(qce_pm_table); + +int qce_aead_req(void *handle, struct qce_req *q_req) +{ + struct qce_device *pce_dev = (struct qce_device *)handle; + struct aead_request *areq; + uint32_t authsize; + struct crypto_aead *aead; + uint32_t ivsize; + uint32_t totallen; + int rc = 0; + struct qce_cmdlist_info *cmdlistinfo = NULL; + int req_info = -1; + struct ce_sps_data *pce_sps_data; + struct ce_request_info *preq_info; + + if (q_req->mode == QCE_MODE_CCM) + return _qce_aead_ccm_req(handle, q_req); + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + areq = (struct aead_request *) q_req->areq; + aead = crypto_aead_reqtfm(areq); + ivsize = crypto_aead_ivsize(aead); + q_req->ivsize = ivsize; + authsize = q_req->authsize; + if (q_req->dir == QCE_ENCRYPT) + q_req->cryptlen = areq->cryptlen; + else + q_req->cryptlen = areq->cryptlen - authsize; + + if (q_req->cryptlen > UINT_MAX - areq->assoclen) { + pr_err("Integer overflow on total aead req length.\n"); + return -EINVAL; + } + + totallen = q_req->cryptlen + areq->assoclen; + + if (pce_dev->support_cmd_dscr) { + cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev, + req_info, q_req); + if (cmdlistinfo == NULL) { + pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n", + q_req->alg, q_req->mode, q_req->encklen, + q_req->authsize); + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + /* set up crypto device */ + rc = _ce_setup_aead(pce_dev, q_req, totallen, + areq->assoclen, cmdlistinfo); + if (rc < 0) { + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + } + + /* + * For crypto 5.0 that has burst size alignment requirement + * for data descritpor, + * the agent above(qcrypto) prepares the src scatter list with + * memory starting with associated data, followed by + * iv, and data stream to be ciphered. + */ + preq_info->src_nents = count_sg(areq->src, totallen); + + + /* cipher input */ + qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + /* cipher output for encryption */ + if (areq->src != areq->dst) { + preq_info->dst_nents = count_sg(areq->dst, totallen); + + qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + DMA_FROM_DEVICE); + } + + + /* setup for callback, and issue command to bam */ + preq_info->areq = q_req->areq; + preq_info->qce_cb = q_req->qce_cb; + preq_info->dir = q_req->dir; + preq_info->asg = NULL; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_AEAD; + preq_info->req_len = totallen; + + _qce_sps_iovec_count_init(pce_dev, req_info); + + if (pce_dev->support_cmd_dscr) { + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + } else { + rc = _ce_setup_aead_direct(pce_dev, q_req, totallen, + areq->assoclen); + if (rc) + goto bad; + } + + preq_info->mode = q_req->mode; + + if (pce_dev->ce_bam_info.minor_version == 0) { + if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, + &pce_sps_data->in_transfer)) + goto bad; + + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, + &pce_sps_data->out_transfer)) + goto bad; + if (totallen > SPS_MAX_PKT_SIZE) { + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + } else { + if (_qce_sps_add_data(GET_PHYS_ADDR( + pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer)) + goto bad; + _qce_set_flag(&pce_sps_data->out_transfer, + SPS_IOVEC_FLAG_INT); + pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + } + rc = _qce_sps_transfer(pce_dev, req_info); + } else { + if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, + &pce_sps_data->in_transfer)) + goto bad; + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + if (pce_dev->no_get_around) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, + &pce_sps_data->out_transfer)) + goto bad; + + if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) { + if (_qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer)) + goto bad; + pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + } else { + pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + } + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + } + if (rc) + goto bad; + return 0; + +bad: + if (preq_info->src_nents) + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + if (areq->src != areq->dst) + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + DMA_FROM_DEVICE); + qce_free_req_info(pce_dev, req_info, false); + + return rc; +} +EXPORT_SYMBOL(qce_aead_req); + +int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) +{ + int rc = 0; + struct qce_device *pce_dev = (struct qce_device *) handle; + struct skcipher_request *areq = (struct skcipher_request *) + c_req->areq; + struct qce_cmdlist_info *cmdlistinfo = NULL; + int req_info = -1; + struct ce_sps_data *pce_sps_data; + struct ce_request_info *preq_info; + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + + preq_info->src_nents = 0; + preq_info->dst_nents = 0; + + /* cipher input */ + preq_info->src_nents = count_sg(areq->src, areq->cryptlen); + + qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + /* cipher output */ + if (areq->src != areq->dst) { + preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen); + qce_dma_map_sg(pce_dev->pdev, areq->dst, + preq_info->dst_nents, DMA_FROM_DEVICE); + } else { + preq_info->dst_nents = preq_info->src_nents; + } + preq_info->dir = c_req->dir; + if ((pce_dev->ce_bam_info.minor_version == 0) && + (preq_info->dir == QCE_DECRYPT) && + (c_req->mode == QCE_MODE_CBC)) { + memcpy(preq_info->dec_iv, (unsigned char *) + sg_virt(areq->src) + areq->src->length - 16, + NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE); + } + + /* set up crypto device */ + if (pce_dev->support_cmd_dscr) { + cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, + req_info, c_req); + if (cmdlistinfo == NULL) { + pr_err("Unsupported cipher algorithm %d, mode %d\n", + c_req->alg, c_req->mode); + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + rc = _ce_setup_cipher(pce_dev, c_req, areq->cryptlen, 0, + cmdlistinfo); + } else { + rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->cryptlen, 0); + } + if (rc < 0) + goto bad; + + preq_info->mode = c_req->mode; + + /* setup for client callback, and issue command to BAM */ + preq_info->areq = areq; + preq_info->qce_cb = c_req->qce_cb; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_CIPHERING; + preq_info->req_len = areq->cryptlen; + + _qce_sps_iovec_count_init(pce_dev, req_info); + if (pce_dev->support_cmd_dscr && cmdlistinfo) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen, + &pce_sps_data->in_transfer)) + goto bad; + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + if (pce_dev->no_get_around) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->cryptlen, + &pce_sps_data->out_transfer)) + goto bad; + if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) { + pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + if (_qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer)) + goto bad; + } else { + pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + } + + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + if (rc) + goto bad; + + return 0; +bad: + if (areq->src != areq->dst) { + if (preq_info->dst_nents) { + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, + preq_info->dst_nents, DMA_FROM_DEVICE); + } + } + if (preq_info->src_nents) { + qce_dma_unmap_sg(pce_dev->pdev, areq->src, + preq_info->src_nents, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + } + qce_free_req_info(pce_dev, req_info, false); + return rc; +} +EXPORT_SYMBOL(qce_ablk_cipher_req); + +int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + int rc; + + struct ahash_request *areq; + struct qce_cmdlist_info *cmdlistinfo = NULL; + int req_info = -1; + struct ce_sps_data *pce_sps_data; + struct ce_request_info *preq_info; + bool is_dummy = false; + + if (!sreq) { + sreq = &(pce_dev->dummyreq.sreq); + req_info = DUMMY_REQ_INDEX; + is_dummy = true; + } else { + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + } + + areq = (struct ahash_request *)sreq->areq; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + + preq_info->src_nents = count_sg(sreq->src, sreq->size); + qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents, + DMA_TO_DEVICE); + + if (pce_dev->support_cmd_dscr) { + cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq); + if (cmdlistinfo == NULL) { + pr_err("Unsupported hash algorithm %d\n", sreq->alg); + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo); + } else { + rc = _ce_setup_hash_direct(pce_dev, sreq); + } + if (rc < 0) + goto bad; + + preq_info->areq = areq; + preq_info->qce_cb = sreq->qce_cb; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_HASHING; + preq_info->req_len = sreq->size; + + _qce_sps_iovec_count_init(pce_dev, req_info); + + if (pce_dev->support_cmd_dscr && cmdlistinfo) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, + &pce_sps_data->in_transfer)) + goto bad; + + /* always ensure there is input data. ZLT does not work for bam-ndp */ + if (!areq->nbytes) + _qce_sps_add_data( + GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + pce_dev->ce_bam_info.ce_burst_size, + &pce_sps_data->in_transfer); + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + if (pce_dev->no_get_around) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer)) + goto bad; + + if (is_dummy) { + _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev, req_info); + } else { + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + } + if (rc) + goto bad; + return 0; +bad: + if (preq_info->src_nents) { + qce_dma_unmap_sg(pce_dev->pdev, sreq->src, + preq_info->src_nents, DMA_TO_DEVICE); + } + qce_free_req_info(pce_dev, req_info, false); + return rc; +} +EXPORT_SYMBOL(qce_process_sha_req); + +int qce_f8_req(void *handle, struct qce_f8_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + bool key_stream_mode; + dma_addr_t dst; + int rc; + struct qce_cmdlist_info *cmdlistinfo; + int req_info = -1; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g; + break; + default: + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + + key_stream_mode = (req->data_in == NULL); + + /* don't support key stream mode */ + + if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) { + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + + /* F8 cipher input */ + preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, + req->data_in, req->data_len, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + + /* F8 cipher output */ + if (req->data_in != req->data_out) { + dst = dma_map_single(pce_dev->pdev, req->data_out, + req->data_len, DMA_FROM_DEVICE); + preq_info->phy_ota_dst = dst; + } else { + /* in place ciphering */ + dst = preq_info->phy_ota_src; + preq_info->phy_ota_dst = 0; + } + preq_info->ota_size = req->data_len; + + + /* set up crypto device */ + if (pce_dev->support_cmd_dscr) + rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, + req->data_len, cmdlistinfo); + else + rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0, + req->data_len); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to sps */ + preq_info->areq = cookie; + preq_info->qce_cb = qce_cb; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_F8; + preq_info->req_len = req->data_len; + + _qce_sps_iovec_count_init(pce_dev, req_info); + + if (pce_dev->support_cmd_dscr) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + + _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len, + &pce_sps_data->in_transfer); + + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + _qce_sps_add_data((uint32_t)dst, req->data_len, + &pce_sps_data->out_transfer); + + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer); + + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + if (rc) + goto bad; + return 0; +bad: + if (preq_info->phy_ota_dst != 0) + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, + req->data_len, DMA_FROM_DEVICE); + if (preq_info->phy_ota_src != 0) + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + req->data_len, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + qce_free_req_info(pce_dev, req_info, false); + return rc; +} +EXPORT_SYMBOL(qce_f8_req); + +int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, + void *cookie, qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + uint16_t num_pkt = mreq->num_pkt; + uint16_t cipher_start = mreq->cipher_start; + uint16_t cipher_size = mreq->cipher_size; + struct qce_f8_req *req = &mreq->qce_f8_req; + uint32_t total; + dma_addr_t dst = 0; + int rc = 0; + struct qce_cmdlist_info *cmdlistinfo; + int req_info = -1; + struct ce_request_info *preq_info; + struct ce_sps_data *pce_sps_data; + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g; + break; + default: + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + + total = num_pkt * req->data_len; + + /* F8 cipher input */ + preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, + req->data_in, total, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + + /* F8 cipher output */ + if (req->data_in != req->data_out) { + dst = dma_map_single(pce_dev->pdev, req->data_out, total, + DMA_FROM_DEVICE); + preq_info->phy_ota_dst = dst; + } else { + /* in place ciphering */ + dst = preq_info->phy_ota_src; + preq_info->phy_ota_dst = 0; + } + + preq_info->ota_size = total; + + /* set up crypto device */ + if (pce_dev->support_cmd_dscr) + rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start, + cipher_size, cmdlistinfo); + else + rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt, + cipher_start, cipher_size); + if (rc) + goto bad; + + /* setup for callback, and issue command to sps */ + preq_info->areq = cookie; + preq_info->qce_cb = qce_cb; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_F8; + preq_info->req_len = total; + + _qce_sps_iovec_count_init(pce_dev, req_info); + + if (pce_dev->support_cmd_dscr) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + + _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total, + &pce_sps_data->in_transfer); + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + _qce_sps_add_data((uint32_t)dst, total, + &pce_sps_data->out_transfer); + + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer); + + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + + if (rc == 0) + return 0; +bad: + if (preq_info->phy_ota_dst) + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total, + DMA_FROM_DEVICE); + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + qce_free_req_info(pce_dev, req_info, false); + return rc; +} +EXPORT_SYMBOL(qce_f8_multi_pkt_req); + +int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, + qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + int rc; + struct qce_cmdlist_info *cmdlistinfo; + int req_info = -1; + struct ce_sps_data *pce_sps_data; + struct ce_request_info *preq_info; + + req_info = qce_alloc_req_info(pce_dev); + if (req_info < 0) + return -EBUSY; + preq_info = &pce_dev->ce_request_info[req_info]; + pce_sps_data = &preq_info->ce_sps; + switch (req->algorithm) { + case QCE_OTA_ALGO_KASUMI: + cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi; + break; + case QCE_OTA_ALGO_SNOW3G: + cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g; + break; + default: + qce_free_req_info(pce_dev, req_info, false); + return -EINVAL; + } + + preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, + req->msize, DMA_TO_DEVICE); + + preq_info->ota_size = req->msize; + + if (pce_dev->support_cmd_dscr) + rc = _ce_f9_setup(pce_dev, req, cmdlistinfo); + else + rc = _ce_f9_setup_direct(pce_dev, req); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to sps */ + preq_info->areq = cookie; + preq_info->qce_cb = qce_cb; + + /* setup xfer type for producer callback handling */ + preq_info->xfer_type = QCE_XFER_F9; + preq_info->req_len = req->msize; + + _qce_sps_iovec_count_init(pce_dev, req_info); + if (pce_dev->support_cmd_dscr) + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + &pce_sps_data->in_transfer); + _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize, + &pce_sps_data->in_transfer); + _qce_set_flag(&pce_sps_data->in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + + _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + &pce_sps_data->cmdlistptr.unlock_all_pipes, + &pce_sps_data->in_transfer); + + _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_sps_data->out_transfer); + + select_mode(pce_dev, preq_info); + rc = _qce_sps_transfer(pce_dev, req_info); + cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + if (rc) + goto bad; + return 0; +bad: + dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + req->msize, DMA_TO_DEVICE); + qce_free_req_info(pce_dev, req_info, false); + return rc; +} +EXPORT_SYMBOL(qce_f9_req); + +static int __qce_get_device_tree_data(struct platform_device *pdev, + struct qce_device *pce_dev) +{ + struct resource *resource; + int rc = 0; + + pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node, + "qcom,ce-hw-shared"); + pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node, + "qcom,ce-hw-key"); + + pce_dev->use_sw_aes_cbc_ecb_ctr_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-aes-cbc-ecb-ctr-algo"); + pce_dev->use_sw_aead_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-aead-algo"); + pce_dev->use_sw_aes_xts_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-aes-xts-algo"); + pce_dev->use_sw_ahash_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-ahash-algo"); + pce_dev->use_sw_hmac_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-hmac-algo"); + pce_dev->use_sw_aes_ccm_algo = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,use-sw-aes-ccm-algo"); + pce_dev->support_clk_mgmt_sus_res = of_property_read_bool( + (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res"); + pce_dev->support_only_core_src_clk = of_property_read_bool( + (&pdev->dev)->of_node, "qcom,support-core-clk-only"); + pce_dev->request_bw_before_clk = of_property_read_bool( + (&pdev->dev)->of_node, "qcom,request-bw-before-clk"); + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,bam-pipe-pair", + &pce_dev->ce_bam_info.pipe_pair_index)) { + pr_err("Fail to get bam pipe pair information.\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,ce-device", + &pce_dev->ce_bam_info.ce_device)) { + pr_err("Fail to get CE device information.\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,ce-hw-instance", + &pce_dev->ce_bam_info.ce_hw_instance)) { + pr_err("Fail to get CE hw instance information.\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,bam-ee", + &pce_dev->ce_bam_info.bam_ee)) { + pr_info("BAM Apps EE is not defined, setting to default 1\n"); + pce_dev->ce_bam_info.bam_ee = 1; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,ce-opp-freq", + &pce_dev->ce_opp_freq_hz)) { + pr_info("CE operating frequency is not defined, setting to default 100MHZ\n"); + pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ; + } + + if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable")) + pce_dev->enable_s1_smmu = true; + + pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node, + "qcom,no-clock-support"); + + pce_dev->ce_bam_info.dest_pipe_index = + 2 * pce_dev->ce_bam_info.pipe_pair_index; + pce_dev->ce_bam_info.src_pipe_index = + pce_dev->ce_bam_info.dest_pipe_index + 1; + + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "crypto-base"); + if (resource) { + pce_dev->phy_iobase = resource->start; + pce_dev->iobase = ioremap(resource->start, + resource_size(resource)); + if (!pce_dev->iobase) { + pr_err("Can not map CRYPTO io memory\n"); + return -ENOMEM; + } + } else { + pr_err("CRYPTO HW mem unavailable.\n"); + return -ENODEV; + } + + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "crypto-bam-base"); + if (resource) { + pce_dev->bam_mem = resource->start; + pce_dev->bam_mem_size = resource_size(resource); + } else { + pr_err("CRYPTO BAM mem unavailable.\n"); + rc = -ENODEV; + goto err_getting_bam_info; + } + + resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (resource) { + pce_dev->ce_bam_info.bam_irq = resource->start; + } else { + pr_err("CRYPTO BAM IRQ unavailable.\n"); + goto err_dev; + } + return rc; +err_dev: + if (pce_dev->ce_bam_info.bam_iobase) + iounmap(pce_dev->ce_bam_info.bam_iobase); + +err_getting_bam_info: + if (pce_dev->iobase) + iounmap(pce_dev->iobase); + + return rc; +} + +static int __qce_init_clk(struct qce_device *pce_dev) +{ + int rc = 0; + + if (pce_dev->no_clock_support) { + pr_debug("No clock support defined in dts\n"); + return rc; + } + + pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src"); + if (!IS_ERR(pce_dev->ce_core_src_clk)) { + if (pce_dev->request_bw_before_clk) + goto skip_set_rate; + + rc = clk_set_rate(pce_dev->ce_core_src_clk, + pce_dev->ce_opp_freq_hz); + if (rc) { + pr_err("Unable to set the core src clk @%uMhz.\n", + pce_dev->ce_opp_freq_hz/CE_CLK_DIV); + goto exit_put_core_src_clk; + } + } else { + if (pce_dev->support_only_core_src_clk) { + rc = PTR_ERR(pce_dev->ce_core_src_clk); + pce_dev->ce_core_src_clk = NULL; + pr_err("Unable to get CE core src clk\n"); + return rc; + } + pr_warn("Unable to get CE core src clk, set to NULL\n"); + pce_dev->ce_core_src_clk = NULL; + } + +skip_set_rate: + if (pce_dev->support_only_core_src_clk) { + pce_dev->ce_core_clk = NULL; + pce_dev->ce_clk = NULL; + pce_dev->ce_bus_clk = NULL; + } else { + pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk"); + if (IS_ERR(pce_dev->ce_core_clk)) { + rc = PTR_ERR(pce_dev->ce_core_clk); + pr_err("Unable to get CE core clk\n"); + goto exit_put_core_src_clk; + } + pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk"); + if (IS_ERR(pce_dev->ce_clk)) { + rc = PTR_ERR(pce_dev->ce_clk); + pr_err("Unable to get CE interface clk\n"); + goto exit_put_core_clk; + } + + pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk"); + if (IS_ERR(pce_dev->ce_bus_clk)) { + rc = PTR_ERR(pce_dev->ce_bus_clk); + pr_err("Unable to get CE BUS interface clk\n"); + goto exit_put_iface_clk; + } + } + return rc; + +exit_put_iface_clk: + if (pce_dev->ce_clk) + clk_put(pce_dev->ce_clk); +exit_put_core_clk: + if (pce_dev->ce_core_clk) + clk_put(pce_dev->ce_core_clk); +exit_put_core_src_clk: + if (pce_dev->ce_core_src_clk) + clk_put(pce_dev->ce_core_src_clk); + pr_err("Unable to init CE clks, rc = %d\n", rc); + return rc; +} + +static void __qce_deinit_clk(struct qce_device *pce_dev) +{ + if (pce_dev->no_clock_support) { + pr_debug("No clock support defined in dts\n"); + return; + } + + if (pce_dev->ce_bus_clk) + clk_put(pce_dev->ce_bus_clk); + if (pce_dev->ce_clk) + clk_put(pce_dev->ce_clk); + if (pce_dev->ce_core_clk) + clk_put(pce_dev->ce_core_clk); + if (pce_dev->ce_core_src_clk) + clk_put(pce_dev->ce_core_src_clk); +} + +int qce_enable_clk(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *)handle; + int rc = 0; + + if (pce_dev->no_clock_support) { + pr_debug("No clock support defined in dts\n"); + return rc; + } + + if (pce_dev->ce_core_src_clk) { + rc = clk_prepare_enable(pce_dev->ce_core_src_clk); + if (rc) { + pr_err("Unable to enable/prepare CE core src clk\n"); + return rc; + } + } + + if (pce_dev->support_only_core_src_clk) + return rc; + + if (pce_dev->ce_core_clk) { + rc = clk_prepare_enable(pce_dev->ce_core_clk); + if (rc) { + pr_err("Unable to enable/prepare CE core clk\n"); + goto exit_disable_core_src_clk; + } + } + + if (pce_dev->ce_clk) { + rc = clk_prepare_enable(pce_dev->ce_clk); + if (rc) { + pr_err("Unable to enable/prepare CE iface clk\n"); + goto exit_disable_core_clk; + } + } + + if (pce_dev->ce_bus_clk) { + rc = clk_prepare_enable(pce_dev->ce_bus_clk); + if (rc) { + pr_err("Unable to enable/prepare CE BUS clk\n"); + goto exit_disable_ce_clk; + } + } + return rc; + +exit_disable_ce_clk: + if (pce_dev->ce_clk) + clk_disable_unprepare(pce_dev->ce_clk); +exit_disable_core_clk: + if (pce_dev->ce_core_clk) + clk_disable_unprepare(pce_dev->ce_core_clk); +exit_disable_core_src_clk: + if (pce_dev->ce_core_src_clk) + clk_disable_unprepare(pce_dev->ce_core_src_clk); + return rc; +} +EXPORT_SYMBOL(qce_enable_clk); + +int qce_disable_clk(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (pce_dev->no_clock_support) { + pr_debug("No clock support defined in dts\n"); + return 0; + } + + if (pce_dev->ce_bus_clk) + clk_disable_unprepare(pce_dev->ce_bus_clk); + if (pce_dev->ce_clk) + clk_disable_unprepare(pce_dev->ce_clk); + if (pce_dev->ce_core_clk) + clk_disable_unprepare(pce_dev->ce_core_clk); + if (pce_dev->ce_core_src_clk) + clk_disable_unprepare(pce_dev->ce_core_src_clk); + + return 0; +} +EXPORT_SYMBOL(qce_disable_clk); + +/* dummy req setup */ +static int setup_dummy_req(struct qce_device *pce_dev) +{ + char *input = + "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs"; + int len = DUMMY_REQ_DATA_LEN; + + memcpy(pce_dev->dummyreq_in_buf, input, len); + sg_init_one(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len); + + pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1; + pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete; + pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg; + pce_dev->dummyreq.sreq.auth_data[0] = 0; + pce_dev->dummyreq.sreq.auth_data[1] = 0; + pce_dev->dummyreq.sreq.auth_data[2] = 0; + pce_dev->dummyreq.sreq.auth_data[3] = 0; + pce_dev->dummyreq.sreq.first_blk = true; + pce_dev->dummyreq.sreq.last_blk = true; + pce_dev->dummyreq.sreq.size = len; + pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq; + pce_dev->dummyreq.sreq.flags = 0; + pce_dev->dummyreq.sreq.authkey = NULL; + + pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src; + pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size; + + return 0; +} + +static int qce_smmu_init(struct qce_device *pce_dev) +{ + struct device *dev = pce_dev->pdev; + + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, + sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64)); + return 0; +} + +/* crypto engine open function. */ +void *qce_open(struct platform_device *pdev, int *rc) +{ + struct qce_device *pce_dev; + int i; + static int pcedev_no = 1; + + pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); + if (!pce_dev) { + *rc = -ENOMEM; + pr_err("Can not allocate memory: %d\n", *rc); + return NULL; + } + pce_dev->pdev = &pdev->dev; + + mutex_lock(&qce_iomap_mutex); + if (pdev->dev.of_node) { + *rc = __qce_get_device_tree_data(pdev, pce_dev); + if (*rc) + goto err_pce_dev; + } else { + *rc = -EINVAL; + pr_err("Device Node not found.\n"); + goto err_pce_dev; + } + + if (pce_dev->enable_s1_smmu) { + if (qce_smmu_init(pce_dev)) { + *rc = -EIO; + goto err_pce_dev; + } + } + + for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) + atomic_set(&pce_dev->ce_request_info[i].in_use, false); + pce_dev->ce_request_index = 0; + + pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ; + pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, + pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL); + + if (pce_dev->coh_vmem == NULL) { + *rc = -ENOMEM; + pr_err("Can not allocate coherent memory for sps data\n"); + goto err_iobase; + } + + pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE * + MAX_QCE_ALLOC_BAM_REQ * 2; + pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL); + if (pce_dev->iovec_vmem == NULL) + goto err_mem; + + pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL); + if (pce_dev->dummyreq_in_buf == NULL) + goto err_mem; + + *rc = __qce_init_clk(pce_dev); + if (*rc) + goto err_mem; + *rc = qce_enable_clk(pce_dev); + if (*rc) + goto err_enable_clk; + + if (_probe_ce_engine(pce_dev)) { + *rc = -ENXIO; + goto err; + } + *rc = 0; + + qce_init_ce_cfg_val(pce_dev); + *rc = qce_sps_init(pce_dev); + if (*rc) + goto err; + qce_setup_ce_sps_data(pce_dev); + qce_disable_clk(pce_dev); + setup_dummy_req(pce_dev); + atomic_set(&pce_dev->no_of_queued_req, 0); + pce_dev->mode = IN_INTERRUPT_MODE; + timer_setup(&(pce_dev->timer), qce_multireq_timeout, 0); + //pce_dev->timer.function = qce_multireq_timeout; + //pce_dev->timer.data = (unsigned long)pce_dev; + pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES; + pce_dev->intr_cadence = 0; + pce_dev->dev_no = pcedev_no; + pcedev_no++; + pce_dev->owner = QCE_OWNER_NONE; + mutex_unlock(&qce_iomap_mutex); + return pce_dev; +err: + qce_disable_clk(pce_dev); + +err_enable_clk: + __qce_deinit_clk(pce_dev); + +err_mem: + kfree(pce_dev->dummyreq_in_buf); + kfree(pce_dev->iovec_vmem); + if (pce_dev->coh_vmem) + dma_free_coherent(pce_dev->pdev, pce_dev->memsize, + pce_dev->coh_vmem, pce_dev->coh_pmem); +err_iobase: + if (pce_dev->iobase) + iounmap(pce_dev->iobase); +err_pce_dev: + mutex_unlock(&qce_iomap_mutex); + kfree(pce_dev); + return NULL; +} +EXPORT_SYMBOL(qce_open); + +/* crypto engine close function. */ +int qce_close(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (handle == NULL) + return -ENODEV; + + mutex_lock(&qce_iomap_mutex); + qce_enable_clk(pce_dev); + qce_sps_exit(pce_dev); + + if (pce_dev->iobase) + iounmap(pce_dev->iobase); + if (pce_dev->coh_vmem) + dma_free_coherent(pce_dev->pdev, pce_dev->memsize, + pce_dev->coh_vmem, pce_dev->coh_pmem); + kfree(pce_dev->dummyreq_in_buf); + kfree(pce_dev->iovec_vmem); + + qce_disable_clk(pce_dev); + __qce_deinit_clk(pce_dev); + mutex_unlock(&qce_iomap_mutex); + kfree(handle); + + return 0; +} +EXPORT_SYMBOL(qce_close); + +#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\ + 1 << CRYPTO_ENCR_KASUMI_SEL |\ + 1 << CRYPTO_AUTH_SNOW3G_SEL |\ + 1 << CRYPTO_AUTH_KASUMI_SEL) + +int qce_hw_support(void *handle, struct ce_hw_support *ce_support) +{ + struct qce_device *pce_dev = (struct qce_device *)handle; + + if (ce_support == NULL) + return -EINVAL; + + ce_support->sha1_hmac_20 = false; + ce_support->sha1_hmac = false; + ce_support->sha256_hmac = false; + ce_support->sha_hmac = true; + ce_support->cmac = true; + ce_support->aes_key_192 = false; + ce_support->aes_xts = true; + if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK) + ce_support->ota = true; + else + ce_support->ota = false; + ce_support->bam = true; + ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false; + ce_support->hw_key = pce_dev->support_hw_key; + ce_support->aes_ccm = true; + ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res; + ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk; + if (pce_dev->ce_bam_info.minor_version) + ce_support->aligned_only = false; + else + ce_support->aligned_only = true; + + ce_support->use_sw_aes_cbc_ecb_ctr_algo = + pce_dev->use_sw_aes_cbc_ecb_ctr_algo; + ce_support->use_sw_aead_algo = + pce_dev->use_sw_aead_algo; + ce_support->use_sw_aes_xts_algo = + pce_dev->use_sw_aes_xts_algo; + ce_support->use_sw_ahash_algo = + pce_dev->use_sw_ahash_algo; + ce_support->use_sw_hmac_algo = + pce_dev->use_sw_hmac_algo; + ce_support->use_sw_aes_ccm_algo = + pce_dev->use_sw_aes_ccm_algo; + ce_support->ce_device = pce_dev->ce_bam_info.ce_device; + ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance; + if (pce_dev->no_get_around) + ce_support->max_request = MAX_QCE_BAM_REQ; + else + ce_support->max_request = 1; + return 0; +} +EXPORT_SYMBOL(qce_hw_support); + +void qce_dump_req(void *handle) +{ + int i; + bool req_in_use; + struct qce_device *pce_dev = (struct qce_device *)handle; + + for (i = 0; i < MAX_QCE_BAM_REQ; i++) { + req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use); + pr_info("%s: %d %d\n", __func__, i, req_in_use); + if (req_in_use) + _qce_dump_descr_fifos(pce_dev, i); + } +} +EXPORT_SYMBOL(qce_dump_req); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Crypto Engine driver"); diff --git a/crypto-qti/qce50.h b/crypto-qti/qce50.h new file mode 100644 index 0000000000..f1e9b6827d --- /dev/null +++ b/crypto-qti/qce50.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_ +#define _DRIVERS_CRYPTO_MSM_QCE50_H_ + +#include "linux/msm-sps.h" + +/* MAX Data xfer block size between BAM and CE */ +#define MAX_CE_BAM_BURST_SIZE 0x40 +#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE + +#define GET_VIRT_ADDR(x) \ + ((uintptr_t)pce_dev->coh_vmem + \ + ((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem)) +#define GET_PHYS_ADDR(x) \ + (phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \ + ((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem))) + +#define CRYPTO_REG_SIZE 4 +#define NUM_OF_CRYPTO_AUTH_IV_REG 16 +#define NUM_OF_CRYPTO_CNTR_IV_REG 4 +#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4 +#define CRYPTO_TOTAL_REGISTERS_DUMPED 26 +#define CRYPTO_RESULT_DUMP_SIZE \ + ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \ + QCEBAM_BURST_SIZE) + +/* QCE max number of descriptor in a descriptor list */ +#define QCE_MAX_NUM_DESC 128 +#define SPS_MAX_PKT_SIZE (32 * 1024 - 64) + +/* default bam ipc log level */ +#define QCE_BAM_DEFAULT_IPC_LOGLVL 2 + +/* State of consumer/producer Pipe */ +enum qce_pipe_st_enum { + QCE_PIPE_STATE_IDLE = 0, + QCE_PIPE_STATE_IN_PROG = 1, + QCE_PIPE_STATE_COMP = 2, + QCE_PIPE_STATE_LAST +}; + +enum qce_xfer_type_enum { + QCE_XFER_HASHING, + QCE_XFER_CIPHERING, + QCE_XFER_AEAD, + QCE_XFER_F8, + QCE_XFER_F9, + QCE_XFER_TYPE_LAST +}; + +struct qce_sps_ep_conn_data { + struct sps_pipe *pipe; + struct sps_connect connect; + struct sps_register_event event; +}; + +/* CE Result DUMP format*/ +struct ce_result_dump_format { + uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG]; + uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG]; + uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG]; + __be32 status; + __be32 status2; +}; + +struct qce_cmdlist_info { + + unsigned long cmdlist; + struct sps_command_element *crypto_cfg; + struct sps_command_element *encr_seg_cfg; + struct sps_command_element *encr_seg_size; + struct sps_command_element *encr_seg_start; + struct sps_command_element *encr_key; + struct sps_command_element *encr_xts_key; + struct sps_command_element *encr_cntr_iv; + struct sps_command_element *encr_ccm_cntr_iv; + struct sps_command_element *encr_mask; + struct sps_command_element *encr_xts_du_size; + + struct sps_command_element *auth_seg_cfg; + struct sps_command_element *auth_seg_size; + struct sps_command_element *auth_seg_start; + struct sps_command_element *auth_key; + struct sps_command_element *auth_iv; + struct sps_command_element *auth_nonce_info; + struct sps_command_element *auth_bytecount; + struct sps_command_element *seg_size; + struct sps_command_element *go_proc; + ptrdiff_t size; +}; + +struct qce_cmdlistptr_ops { + struct qce_cmdlist_info cipher_aes_128_cbc_ctr; + struct qce_cmdlist_info cipher_aes_256_cbc_ctr; + struct qce_cmdlist_info cipher_aes_128_ecb; + struct qce_cmdlist_info cipher_aes_256_ecb; + struct qce_cmdlist_info cipher_aes_128_xts; + struct qce_cmdlist_info cipher_aes_256_xts; + struct qce_cmdlist_info cipher_des_cbc; + struct qce_cmdlist_info cipher_des_ecb; + struct qce_cmdlist_info cipher_3des_cbc; + struct qce_cmdlist_info cipher_3des_ecb; + struct qce_cmdlist_info auth_sha1; + struct qce_cmdlist_info auth_sha256; + struct qce_cmdlist_info auth_sha1_hmac; + struct qce_cmdlist_info auth_sha256_hmac; + struct qce_cmdlist_info auth_aes_128_cmac; + struct qce_cmdlist_info auth_aes_256_cmac; + struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128; + struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256; + struct qce_cmdlist_info aead_hmac_sha1_cbc_des; + struct qce_cmdlist_info aead_hmac_sha1_cbc_3des; + struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128; + struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256; + struct qce_cmdlist_info aead_hmac_sha256_cbc_des; + struct qce_cmdlist_info aead_hmac_sha256_cbc_3des; + struct qce_cmdlist_info aead_aes_128_ccm; + struct qce_cmdlist_info aead_aes_256_ccm; + struct qce_cmdlist_info cipher_null; + struct qce_cmdlist_info f8_kasumi; + struct qce_cmdlist_info f8_snow3g; + struct qce_cmdlist_info f9_kasumi; + struct qce_cmdlist_info f9_snow3g; + struct qce_cmdlist_info unlock_all_pipes; +}; + +struct qce_ce_cfg_reg_setting { + uint32_t crypto_cfg_be; + uint32_t crypto_cfg_le; + + uint32_t encr_cfg_aes_cbc_128; + uint32_t encr_cfg_aes_cbc_256; + + uint32_t encr_cfg_aes_ecb_128; + uint32_t encr_cfg_aes_ecb_256; + + uint32_t encr_cfg_aes_xts_128; + uint32_t encr_cfg_aes_xts_256; + + uint32_t encr_cfg_aes_ctr_128; + uint32_t encr_cfg_aes_ctr_256; + + uint32_t encr_cfg_aes_ccm_128; + uint32_t encr_cfg_aes_ccm_256; + + uint32_t encr_cfg_des_cbc; + uint32_t encr_cfg_des_ecb; + + uint32_t encr_cfg_3des_cbc; + uint32_t encr_cfg_3des_ecb; + uint32_t encr_cfg_kasumi; + uint32_t encr_cfg_snow3g; + + uint32_t auth_cfg_cmac_128; + uint32_t auth_cfg_cmac_256; + + uint32_t auth_cfg_sha1; + uint32_t auth_cfg_sha256; + + uint32_t auth_cfg_hmac_sha1; + uint32_t auth_cfg_hmac_sha256; + + uint32_t auth_cfg_aes_ccm_128; + uint32_t auth_cfg_aes_ccm_256; + uint32_t auth_cfg_aead_sha1_hmac; + uint32_t auth_cfg_aead_sha256_hmac; + uint32_t auth_cfg_kasumi; + uint32_t auth_cfg_snow3g; +}; + +struct ce_bam_info { + uint32_t bam_irq; + uint32_t bam_mem; + void __iomem *bam_iobase; + uint32_t ce_device; + uint32_t ce_hw_instance; + uint32_t bam_ee; + unsigned int pipe_pair_index; + unsigned int src_pipe_index; + unsigned int dest_pipe_index; + unsigned long bam_handle; + int ce_burst_size; + uint32_t minor_version; + struct qce_sps_ep_conn_data producer; + struct qce_sps_ep_conn_data consumer; +}; + +/* SPS data structure with buffers, commandlists & commmand pointer lists */ +struct ce_sps_data { + enum qce_pipe_st_enum producer_state; /* Producer pipe state */ + int consumer_status; /* consumer pipe status */ + int producer_status; /* producer pipe status */ + struct sps_transfer in_transfer; + struct sps_transfer out_transfer; + struct qce_cmdlistptr_ops cmdlistptr; + uint32_t result_dump; /* reuslt dump virtual address */ + uint32_t result_dump_null; + uint32_t result_dump_phy; /* result dump physical address (32 bits) */ + uint32_t result_dump_null_phy; + + uint32_t ignore_buffer; /* ignore buffer virtual address */ + struct ce_result_dump_format *result; /* ponter to result dump */ + struct ce_result_dump_format *result_null; +}; + +struct ce_request_info { + atomic_t in_use; + bool in_prog; + enum qce_xfer_type_enum xfer_type; + struct ce_sps_data ce_sps; + qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ + void *user; + void *areq; + int assoc_nents; + struct scatterlist *asg; /* Formatted associated data sg */ + int src_nents; + int dst_nents; + dma_addr_t phy_iv_in; + unsigned char dec_iv[16]; + int dir; + enum qce_cipher_mode_enum mode; + dma_addr_t phy_ota_src; + dma_addr_t phy_ota_dst; + unsigned int ota_size; + unsigned int req_len; +}; + +struct qce_driver_stats { + int no_of_timeouts; + int no_of_dummy_reqs; + int current_mode; + int outstanding_reqs; +}; + +#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */ diff --git a/crypto-qti/qce_ota.h b/crypto-qti/qce_ota.h new file mode 100644 index 0000000000..81f2d80206 --- /dev/null +++ b/crypto-qti/qce_ota.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QTI Crypto Engine driver OTA API + * + * Copyright (c) 2010-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __CRYPTO_MSM_QCE_OTA_H +#define __CRYPTO_MSM_QCE_OTA_H + +#include +#include "linux/qcota.h" + + +int qce_f8_req(void *handle, struct qce_f8_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); +int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); +int qce_f9_req(void *handle, struct qce_f9_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); + +#endif /* __CRYPTO_MSM_QCE_OTA_H */ diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c new file mode 100644 index 0000000000..9280d500c6 --- /dev/null +++ b/crypto-qti/qcedev.c @@ -0,0 +1,2330 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI CE device driver. + * + * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/platform_data/qcom_crypto_device.h" +#include "linux/qcedev.h" +#include + +#include +#include "qcedevi.h" +#include "qce.h" +#include "qcedev_smmu.h" +#include "compat_qcedev.h" + +#include + +#define CACHE_LINE_SIZE 32 +#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE + +static uint8_t _std_init_vector_sha1_uint8[] = { + 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, + 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, + 0xC3, 0xD2, 0xE1, 0xF0 +}; +/* standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha256_uint8[] = { + 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, + 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, + 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, + 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 +}; + +static DEFINE_MUTEX(send_cmd_lock); +static DEFINE_MUTEX(qcedev_sent_bw_req); +static DEFINE_MUTEX(hash_access_lock); + +static dev_t qcedev_device_no; +static struct class *driver_class; +static struct device *class_dev; + +static const struct of_device_id qcedev_match[] = { + { .compatible = "qcom,qcedev"}, + { .compatible = "qcom,qcedev,context-bank"}, + {} +}; + +MODULE_DEVICE_TABLE(of, qcedev_match); + +static int qcedev_control_clocks(struct qcedev_control *podev, bool enable) +{ + unsigned int control_flag; + int ret = 0; + + if (podev->ce_support.req_bw_before_clk) { + if (enable) + control_flag = QCE_BW_REQUEST_FIRST; + else + control_flag = QCE_CLK_DISABLE_FIRST; + } else { + if (enable) + control_flag = QCE_CLK_ENABLE_FIRST; + else + control_flag = QCE_BW_REQUEST_RESET_FIRST; + } + + switch (control_flag) { + case QCE_CLK_ENABLE_FIRST: + ret = qce_enable_clk(podev->qce); + if (ret) { + pr_err("%s Unable enable clk\n", __func__); + return ret; + } + ret = icc_set_bw(podev->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) { + pr_err("%s Unable to set high bw\n", __func__); + ret = qce_disable_clk(podev->qce); + if (ret) + pr_err("%s Unable disable clk\n", __func__); + return ret; + } + break; + case QCE_BW_REQUEST_FIRST: + ret = icc_set_bw(podev->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) { + pr_err("%s Unable to set high bw\n", __func__); + return ret; + } + ret = qce_enable_clk(podev->qce); + if (ret) { + pr_err("%s Unable enable clk\n", __func__); + ret = icc_set_bw(podev->icc_path, 0, 0); + if (ret) + pr_err("%s Unable to set low bw\n", __func__); + return ret; + } + break; + case QCE_CLK_DISABLE_FIRST: + ret = qce_disable_clk(podev->qce); + if (ret) { + pr_err("%s Unable to disable clk\n", __func__); + return ret; + } + ret = icc_set_bw(podev->icc_path, 0, 0); + if (ret) { + pr_err("%s Unable to set low bw\n", __func__); + ret = qce_enable_clk(podev->qce); + if (ret) + pr_err("%s Unable enable clk\n", __func__); + return ret; + } + break; + case QCE_BW_REQUEST_RESET_FIRST: + ret = icc_set_bw(podev->icc_path, 0, 0); + if (ret) { + pr_err("%s Unable to set low bw\n", __func__); + return ret; + } + ret = qce_disable_clk(podev->qce); + if (ret) { + pr_err("%s Unable to disable clk\n", __func__); + ret = icc_set_bw(podev->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) + pr_err("%s Unable to set high bw\n", __func__); + return ret; + } + break; + default: + return -ENOENT; + } + + return 0; +} + +static void qcedev_ce_high_bw_req(struct qcedev_control *podev, + bool high_bw_req) +{ + int ret = 0; + + mutex_lock(&qcedev_sent_bw_req); + if (high_bw_req) { + if (podev->high_bw_req_count == 0) { + ret = qcedev_control_clocks(podev, true); + if (ret) + goto exit_unlock_mutex; + } + podev->high_bw_req_count++; + } else { + if (podev->high_bw_req_count == 1) { + ret = qcedev_control_clocks(podev, false); + if (ret) + goto exit_unlock_mutex; + } + podev->high_bw_req_count--; + } + +exit_unlock_mutex: + mutex_unlock(&qcedev_sent_bw_req); +} + +#define QCEDEV_MAGIC 0x56434544 /* "qced" */ + +static int qcedev_open(struct inode *inode, struct file *file); +static int qcedev_release(struct inode *inode, struct file *file); +static int start_cipher_req(struct qcedev_control *podev); +static int start_sha_req(struct qcedev_control *podev); + +static const struct file_operations qcedev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qcedev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_qcedev_ioctl, +#endif + .open = qcedev_open, + .release = qcedev_release, +}; + +static struct qcedev_control qce_dev[] = { + { + .magic = QCEDEV_MAGIC, + }, +}; + +#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev) +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 1024 + +struct qcedev_stat { + u32 qcedev_dec_success; + u32 qcedev_dec_fail; + u32 qcedev_enc_success; + u32 qcedev_enc_fail; + u32 qcedev_sha_success; + u32 qcedev_sha_fail; +}; + +static struct qcedev_stat _qcedev_stat; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +static int _debug_qcedev; + +static struct qcedev_control *qcedev_minor_to_control(unsigned int n) +{ + int i; + + for (i = 0; i < MAX_QCE_DEVICE; i++) { + if (qce_dev[i].minor == n) + return &qce_dev[n]; + } + return NULL; +} + +static int qcedev_open(struct inode *inode, struct file *file) +{ + struct qcedev_handle *handle; + struct qcedev_control *podev; + + podev = qcedev_minor_to_control(MINOR(inode->i_rdev)); + if (podev == NULL) { + pr_err("%s: no such device %d\n", __func__, + MINOR(inode->i_rdev)); + return -ENOENT; + } + + handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL); + if (handle == NULL) + return -ENOMEM; + + handle->cntl = podev; + file->private_data = handle; + + mutex_init(&handle->registeredbufs.lock); + INIT_LIST_HEAD(&handle->registeredbufs.list); + return 0; +} + +static int qcedev_release(struct inode *inode, struct file *file) +{ + struct qcedev_control *podev; + struct qcedev_handle *handle; + + handle = file->private_data; + podev = handle->cntl; + if (podev != NULL && podev->magic != QCEDEV_MAGIC) { + pr_err("%s: invalid handle %pK\n", + __func__, podev); + } + + if (qcedev_unmap_all_buffers(handle)) + pr_err("%s: failed to unmap all ion buffers\n", __func__); + + kfree_sensitive(handle); + file->private_data = NULL; + return 0; +} + +static void req_done(unsigned long data) +{ + struct qcedev_control *podev = (struct qcedev_control *)data; + struct qcedev_async_req *areq; + unsigned long flags = 0; + struct qcedev_async_req *new_req = NULL; + int ret = 0; + + spin_lock_irqsave(&podev->lock, flags); + areq = podev->active_command; + podev->active_command = NULL; + +again: + if (!list_empty(&podev->ready_commands)) { + new_req = container_of(podev->ready_commands.next, + struct qcedev_async_req, list); + list_del(&new_req->list); + podev->active_command = new_req; + new_req->err = 0; + if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER) + ret = start_cipher_req(podev); + else + ret = start_sha_req(podev); + } + + spin_unlock_irqrestore(&podev->lock, flags); + + if (areq) + complete(&areq->complete); + + if (new_req && ret) { + complete(&new_req->complete); + spin_lock_irqsave(&podev->lock, flags); + podev->active_command = NULL; + areq = NULL; + ret = 0; + new_req = NULL; + goto again; + } +} + +void qcedev_sha_req_cb(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret) +{ + struct qcedev_sha_req *areq; + struct qcedev_control *pdev; + struct qcedev_handle *handle; + + uint32_t *auth32 = (uint32_t *)authdata; + + areq = (struct qcedev_sha_req *) cookie; + handle = (struct qcedev_handle *) areq->cookie; + pdev = handle->cntl; + + if (digest) + memcpy(&handle->sha_ctxt.digest[0], digest, 32); + + if (authdata) { + handle->sha_ctxt.auth_data[0] = auth32[0]; + handle->sha_ctxt.auth_data[1] = auth32[1]; + } + + tasklet_schedule(&pdev->done_tasklet); +}; + + +void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, + unsigned char *iv, int ret) +{ + struct qcedev_cipher_req *areq; + struct qcedev_handle *handle; + struct qcedev_control *podev; + struct qcedev_async_req *qcedev_areq; + + areq = (struct qcedev_cipher_req *) cookie; + handle = (struct qcedev_handle *) areq->cookie; + podev = handle->cntl; + qcedev_areq = podev->active_command; + + if (iv) + memcpy(&qcedev_areq->cipher_op_req.iv[0], iv, + qcedev_areq->cipher_op_req.ivlen); + tasklet_schedule(&podev->done_tasklet); +}; + +static int start_cipher_req(struct qcedev_control *podev) +{ + struct qcedev_async_req *qcedev_areq; + struct qce_req creq; + int ret = 0; + + /* start the command on the podev->active_command */ + qcedev_areq = podev->active_command; + qcedev_areq->cipher_req.cookie = qcedev_areq->handle; + if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) { + pr_err("%s: Use of PMEM is not supported\n", __func__); + goto unsupported; + } + creq.pmem = NULL; + switch (qcedev_areq->cipher_op_req.alg) { + case QCEDEV_ALG_DES: + creq.alg = CIPHER_ALG_DES; + break; + case QCEDEV_ALG_3DES: + creq.alg = CIPHER_ALG_3DES; + break; + case QCEDEV_ALG_AES: + creq.alg = CIPHER_ALG_AES; + break; + default: + return -EINVAL; + } + + switch (qcedev_areq->cipher_op_req.mode) { + case QCEDEV_AES_MODE_CBC: + case QCEDEV_DES_MODE_CBC: + creq.mode = QCE_MODE_CBC; + break; + case QCEDEV_AES_MODE_ECB: + case QCEDEV_DES_MODE_ECB: + creq.mode = QCE_MODE_ECB; + break; + case QCEDEV_AES_MODE_CTR: + creq.mode = QCE_MODE_CTR; + break; + case QCEDEV_AES_MODE_XTS: + creq.mode = QCE_MODE_XTS; + break; + default: + return -EINVAL; + } + + if ((creq.alg == CIPHER_ALG_AES) && + (creq.mode == QCE_MODE_CTR)) { + creq.dir = QCE_ENCRYPT; + } else { + if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC) + creq.dir = QCE_ENCRYPT; + else + creq.dir = QCE_DECRYPT; + } + + creq.iv = &qcedev_areq->cipher_op_req.iv[0]; + creq.ivsize = qcedev_areq->cipher_op_req.ivlen; + + creq.enckey = &qcedev_areq->cipher_op_req.enckey[0]; + creq.encklen = qcedev_areq->cipher_op_req.encklen; + + creq.cryptlen = qcedev_areq->cipher_op_req.data_len; + + if (qcedev_areq->cipher_op_req.encklen == 0) { + if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY) + || (qcedev_areq->cipher_op_req.op == + QCEDEV_OPER_DEC_NO_KEY)) + creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY; + else { + int i; + + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) { + if (qcedev_areq->cipher_op_req.enckey[i] != 0) + break; + } + + if ((podev->platform_support.hw_key_support == 1) && + (i == QCEDEV_MAX_KEY_SIZE)) + creq.op = QCE_REQ_ABLK_CIPHER; + else { + ret = -EINVAL; + goto unsupported; + } + } + } else { + creq.op = QCE_REQ_ABLK_CIPHER; + } + + creq.qce_cb = qcedev_cipher_req_cb; + creq.areq = (void *)&qcedev_areq->cipher_req; + creq.flags = 0; + ret = qce_ablk_cipher_req(podev->qce, &creq); +unsupported: + if (ret) + qcedev_areq->err = -ENXIO; + else + qcedev_areq->err = 0; + return ret; +}; + +static int start_sha_req(struct qcedev_control *podev) +{ + struct qcedev_async_req *qcedev_areq; + struct qce_sha_req sreq; + int ret = 0; + struct qcedev_handle *handle; + + /* start the command on the podev->active_command */ + qcedev_areq = podev->active_command; + handle = qcedev_areq->handle; + + switch (qcedev_areq->sha_op_req.alg) { + case QCEDEV_ALG_SHA1: + sreq.alg = QCE_HASH_SHA1; + break; + case QCEDEV_ALG_SHA256: + sreq.alg = QCE_HASH_SHA256; + break; + case QCEDEV_ALG_SHA1_HMAC: + if (podev->ce_support.sha_hmac) { + sreq.alg = QCE_HASH_SHA1_HMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE; + + } else { + sreq.alg = QCE_HASH_SHA1; + sreq.authkey = NULL; + } + break; + case QCEDEV_ALG_SHA256_HMAC: + if (podev->ce_support.sha_hmac) { + sreq.alg = QCE_HASH_SHA256_HMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE; + } else { + sreq.alg = QCE_HASH_SHA256; + sreq.authkey = NULL; + } + break; + case QCEDEV_ALG_AES_CMAC: + sreq.alg = QCE_HASH_AES_CMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + sreq.authklen = qcedev_areq->sha_op_req.authklen; + break; + default: + pr_err("Algorithm %d not supported, exiting\n", + qcedev_areq->sha_op_req.alg); + return -EINVAL; + } + + qcedev_areq->sha_req.cookie = handle; + + sreq.qce_cb = qcedev_sha_req_cb; + if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) { + sreq.auth_data[0] = handle->sha_ctxt.auth_data[0]; + sreq.auth_data[1] = handle->sha_ctxt.auth_data[1]; + sreq.auth_data[2] = handle->sha_ctxt.auth_data[2]; + sreq.auth_data[3] = handle->sha_ctxt.auth_data[3]; + sreq.digest = &handle->sha_ctxt.digest[0]; + sreq.first_blk = handle->sha_ctxt.first_blk; + sreq.last_blk = handle->sha_ctxt.last_blk; + } + sreq.size = qcedev_areq->sha_req.sreq.nbytes; + sreq.src = qcedev_areq->sha_req.sreq.src; + sreq.areq = (void *)&qcedev_areq->sha_req; + sreq.flags = 0; + + ret = qce_process_sha_req(podev->qce, &sreq); + + if (ret) + qcedev_areq->err = -ENXIO; + else + qcedev_areq->err = 0; + return ret; +}; + +static int submit_req(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + struct qcedev_control *podev; + unsigned long flags = 0; + int ret = 0; + struct qcedev_stat *pstat; + + qcedev_areq->err = 0; + podev = handle->cntl; + + spin_lock_irqsave(&podev->lock, flags); + + if (podev->active_command == NULL) { + podev->active_command = qcedev_areq; + if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) + ret = start_cipher_req(podev); + else + ret = start_sha_req(podev); + } else { + list_add_tail(&qcedev_areq->list, &podev->ready_commands); + } + + if (ret != 0) + podev->active_command = NULL; + + spin_unlock_irqrestore(&podev->lock, flags); + + if (ret == 0) + wait_for_completion(&qcedev_areq->complete); + + if (ret) + qcedev_areq->err = -EIO; + + pstat = &_qcedev_stat; + if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) { + switch (qcedev_areq->cipher_op_req.op) { + case QCEDEV_OPER_DEC: + if (qcedev_areq->err) + pstat->qcedev_dec_fail++; + else + pstat->qcedev_dec_success++; + break; + case QCEDEV_OPER_ENC: + if (qcedev_areq->err) + pstat->qcedev_enc_fail++; + else + pstat->qcedev_enc_success++; + break; + default: + break; + } + } else { + if (qcedev_areq->err) + pstat->qcedev_sha_fail++; + else + pstat->qcedev_sha_success++; + } + + return qcedev_areq->err; +} + +static int qcedev_sha_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt; + + memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt)); + sha_ctxt->first_blk = 1; + + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) { + memcpy(&sha_ctxt->digest[0], + &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); + sha_ctxt->diglen = SHA1_DIGEST_SIZE; + } else { + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) { + memcpy(&sha_ctxt->digest[0], + &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctxt->diglen = SHA256_DIGEST_SIZE; + } + } + sha_ctxt->init_done = true; + return 0; +} + + +static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + int err = 0; + int i = 0; + uint32_t total; + + uint8_t *user_src = NULL; + uint8_t *k_src = NULL; + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + + uint32_t sha_pad_len = 0; + uint32_t trailing_buf_len = 0; + uint32_t t_buf = handle->sha_ctxt.trailing_buf_len; + uint32_t sha_block_size; + + total = qcedev_areq->sha_op_req.data_len + t_buf; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1) + sha_block_size = SHA1_BLOCK_SIZE; + else + sha_block_size = SHA256_BLOCK_SIZE; + + if (total <= sha_block_size) { + uint32_t len = qcedev_areq->sha_op_req.data_len; + + i = 0; + + k_src = &handle->sha_ctxt.trailing_buf[t_buf]; + + /* Copy data from user src(s) */ + while (len > 0) { + user_src = qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) + return -EFAULT; + + len -= qcedev_areq->sha_op_req.data[i].len; + k_src += qcedev_areq->sha_op_req.data[i].len; + i++; + } + handle->sha_ctxt.trailing_buf_len = total; + + return 0; + } + + + k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) + return -ENOMEM; + + k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src), + CACHE_LINE_SIZE); + k_src = k_align_src; + + /* check for trailing buffer from previous updates and append it */ + if (t_buf > 0) { + memcpy(k_src, &handle->sha_ctxt.trailing_buf[0], + t_buf); + k_src += t_buf; + } + + /* Copy data from user src(s) */ + user_src = qcedev_areq->sha_op_req.data[0].vaddr; + if (user_src && copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[0].len)) { + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[0].len; + for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) { + user_src = qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) { + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[i].len; + } + + /* get new trailing buffer */ + sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total; + trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len; + + qcedev_areq->sha_req.sreq.src = sg_src; + sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, + total-trailing_buf_len); + + qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len; + + /* update sha_ctxt trailing buf content to new trailing buf */ + if (trailing_buf_len > 0) { + memset(&handle->sha_ctxt.trailing_buf[0], 0, 64); + memcpy(&handle->sha_ctxt.trailing_buf[0], + (k_src - trailing_buf_len), + trailing_buf_len); + } + handle->sha_ctxt.trailing_buf_len = trailing_buf_len; + + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.first_blk = 0; + + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return err; +} + +static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + int err = 0; + int i = 0; + int j = 0; + int k = 0; + int num_entries = 0; + uint32_t total = 0; + + if (!handle->sha_ctxt.init_done) { + pr_err("%s Init was not called\n", __func__); + return -EINVAL; + } + + if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) { + + struct qcedev_sha_op_req *saved_req; + struct qcedev_sha_op_req req; + struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req; + + /* save the original req structure */ + saved_req = + kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL); + if (saved_req == NULL) { + pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n", + __func__, (uintptr_t)saved_req); + return -ENOMEM; + } + memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req)); + memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req)); + + i = 0; + /* Address 32 KB at a time */ + while ((i < req.entries) && (err == 0)) { + if (sreq->data[i].len > QCE_MAX_OPER_DATA) { + sreq->data[0].len = QCE_MAX_OPER_DATA; + if (i > 0) { + sreq->data[0].vaddr = + sreq->data[i].vaddr; + } + + sreq->data_len = QCE_MAX_OPER_DATA; + sreq->entries = 1; + + err = qcedev_sha_update_max_xfer(qcedev_areq, + handle, sg_src); + + sreq->data[i].len = req.data[i].len - + QCE_MAX_OPER_DATA; + sreq->data[i].vaddr = req.data[i].vaddr + + QCE_MAX_OPER_DATA; + req.data[i].vaddr = sreq->data[i].vaddr; + req.data[i].len = sreq->data[i].len; + } else { + total = 0; + for (j = i; j < req.entries; j++) { + num_entries++; + if ((total + sreq->data[j].len) >= + QCE_MAX_OPER_DATA) { + sreq->data[j].len = + (QCE_MAX_OPER_DATA - total); + total = QCE_MAX_OPER_DATA; + break; + } + total += sreq->data[j].len; + } + + sreq->data_len = total; + if (i > 0) + for (k = 0; k < num_entries; k++) { + sreq->data[k].len = + sreq->data[i+k].len; + sreq->data[k].vaddr = + sreq->data[i+k].vaddr; + } + sreq->entries = num_entries; + + i = j; + err = qcedev_sha_update_max_xfer(qcedev_areq, + handle, sg_src); + num_entries = 0; + + sreq->data[i].vaddr = req.data[i].vaddr + + sreq->data[i].len; + sreq->data[i].len = req.data[i].len - + sreq->data[i].len; + req.data[i].vaddr = sreq->data[i].vaddr; + req.data[i].len = sreq->data[i].len; + + if (sreq->data[i].len == 0) + i++; + } + } /* end of while ((i < req.entries) && (err == 0)) */ + + /* Restore the original req structure */ + for (i = 0; i < saved_req->entries; i++) { + sreq->data[i].len = saved_req->data[i].len; + sreq->data[i].vaddr = saved_req->data[i].vaddr; + } + sreq->entries = saved_req->entries; + sreq->data_len = saved_req->data_len; + memset(saved_req, 0, ksize((void *)saved_req)); + kfree(saved_req); + } else + err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src); + + return err; +} + +static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + struct scatterlist sg_src; + uint32_t total; + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + + if (!handle->sha_ctxt.init_done) { + pr_err("%s Init was not called\n", __func__); + return -EINVAL; + } + + handle->sha_ctxt.last_blk = 1; + + total = handle->sha_ctxt.trailing_buf_len; + + k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) + return -ENOMEM; + + k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src), + CACHE_LINE_SIZE); + memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total); + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src; + + sg_init_one(qcedev_areq->sha_req.sreq.src, k_align_src, total); + + qcedev_areq->sha_req.sreq.nbytes = total; + + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.first_blk = 0; + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.auth_data[0] = 0; + handle->sha_ctxt.auth_data[1] = 0; + handle->sha_ctxt.trailing_buf_len = 0; + handle->sha_ctxt.init_done = false; + memset(&handle->sha_ctxt.trailing_buf[0], 0, 64); + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + qcedev_areq->sha_req.sreq.src = NULL; + return err; +} + +static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + int err = 0; + int i = 0; + uint32_t total; + + uint8_t *user_src = NULL; + uint8_t *k_src = NULL; + uint8_t *k_buf_src = NULL; + + total = qcedev_areq->sha_op_req.data_len; + + if ((qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_128) && + (qcedev_areq->sha_op_req.authklen != QCEDEV_AES_KEY_256)) { + pr_err("%s: unsupported key length\n", __func__); + return -EINVAL; + } + + if (copy_from_user(&handle->sha_ctxt.authkey[0], + (void __user *)qcedev_areq->sha_op_req.authkey, + qcedev_areq->sha_op_req.authklen)) + return -EFAULT; + + if (total > U32_MAX - CACHE_LINE_SIZE * 2) + return -EINVAL; + + k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, GFP_KERNEL); + if (k_buf_src == NULL) + return -ENOMEM; + + k_src = k_buf_src; + + /* Copy data from user src(s) */ + user_src = qcedev_areq->sha_op_req.data[0].vaddr; + for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) { + user_src = qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && copy_from_user(k_src, (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) { + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[i].len; + } + + qcedev_areq->sha_req.sreq.src = sg_src; + sg_init_one(qcedev_areq->sha_req.sreq.src, k_buf_src, total); + + qcedev_areq->sha_req.sreq.nbytes = total; + handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen; + err = submit_req(qcedev_areq, handle); + + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return err; +} + +static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + int err = 0; + + if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) { + qcedev_sha_init(areq, handle); + if (copy_from_user(&handle->sha_ctxt.authkey[0], + (void __user *)areq->sha_op_req.authkey, + areq->sha_op_req.authklen)) + return -EFAULT; + } else { + struct qcedev_async_req authkey_areq; + uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE]; + + init_completion(&authkey_areq.complete); + + authkey_areq.sha_op_req.entries = 1; + authkey_areq.sha_op_req.data[0].vaddr = + areq->sha_op_req.authkey; + authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen; + authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen; + authkey_areq.sha_op_req.diglen = 0; + authkey_areq.handle = handle; + + memset(&authkey_areq.sha_op_req.digest[0], 0, + QCEDEV_MAX_SHA_DIGEST); + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) + authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1; + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) + authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256; + + authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + + qcedev_sha_init(&authkey_areq, handle); + err = qcedev_sha_update(&authkey_areq, handle, sg_src); + if (!err) + err = qcedev_sha_final(&authkey_areq, handle); + else + return err; + memcpy(&authkey[0], &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + qcedev_sha_init(areq, handle); + + memcpy(&handle->sha_ctxt.authkey[0], &authkey[0], + handle->sha_ctxt.diglen); + } + return err; +} + +static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + struct scatterlist sg_src; + uint8_t *k_src = NULL; + uint32_t sha_block_size = 0; + uint32_t sha_digest_size = 0; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) { + sha_digest_size = SHA1_DIGEST_SIZE; + sha_block_size = SHA1_BLOCK_SIZE; + } else { + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) { + sha_digest_size = SHA256_DIGEST_SIZE; + sha_block_size = SHA256_BLOCK_SIZE; + } + } + k_src = kmalloc(sha_block_size, GFP_KERNEL); + if (k_src == NULL) + return -ENOMEM; + + /* check for trailing buffer from previous updates and append it */ + memcpy(k_src, &handle->sha_ctxt.trailing_buf[0], + handle->sha_ctxt.trailing_buf_len); + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src; + sg_init_one(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size); + + qcedev_areq->sha_req.sreq.nbytes = sha_block_size; + memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size); + memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0], + sha_digest_size); + handle->sha_ctxt.trailing_buf_len = sha_digest_size; + + handle->sha_ctxt.first_blk = 1; + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.auth_data[0] = 0; + handle->sha_ctxt.auth_data[1] = 0; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) { + memcpy(&handle->sha_ctxt.digest[0], + &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); + handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE; + } + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) { + memcpy(&handle->sha_ctxt.digest[0], + &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); + handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE; + } + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.first_blk = 0; + memset(k_src, 0, ksize((void *)k_src)); + kfree(k_src); + qcedev_areq->sha_req.sreq.src = NULL; + return err; +} + +static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq, + struct qcedev_handle *handle, bool ikey) +{ + int i; + uint32_t constant; + uint32_t sha_block_size; + + if (ikey) + constant = 0x36; + else + constant = 0x5c; + + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) + sha_block_size = SHA1_BLOCK_SIZE; + else + sha_block_size = SHA256_BLOCK_SIZE; + + memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size); + for (i = 0; i < sha_block_size; i++) + handle->sha_ctxt.trailing_buf[i] = + (handle->sha_ctxt.authkey[i] ^ constant); + + handle->sha_ctxt.trailing_buf_len = sha_block_size; + return 0; +} + +static int qcedev_hmac_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + int err; + struct qcedev_control *podev = handle->cntl; + + err = qcedev_set_hmac_auth_key(areq, handle, sg_src); + if (err) + return err; + if (!podev->ce_support.sha_hmac) + qcedev_hmac_update_iokey(areq, handle, true); + return 0; +} + +static int qcedev_hmac_final(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err; + struct qcedev_control *podev = handle->cntl; + + err = qcedev_sha_final(areq, handle); + if (podev->ce_support.sha_hmac) + return err; + + qcedev_hmac_update_iokey(areq, handle, false); + err = qcedev_hmac_get_ohash(areq, handle); + if (err) + return err; + err = qcedev_sha_final(areq, handle); + + return err; +} + +static int qcedev_hash_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256)) + return qcedev_sha_init(areq, handle); + else + return qcedev_hmac_init(areq, handle, sg_src); +} + +static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle, + struct scatterlist *sg_src) +{ + return qcedev_sha_update(qcedev_areq, handle, sg_src); +} + +static int qcedev_hash_final(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256)) + return qcedev_sha_final(areq, handle); + else + return qcedev_hmac_final(areq, handle); +} + +static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq, + int *di, struct qcedev_handle *handle, + uint8_t *k_align_src) +{ + int err = 0; + int i = 0; + int dst_i = *di; + struct scatterlist sg_src; + uint32_t byteoffset = 0; + uint8_t *user_src = NULL; + uint8_t *k_align_dst = k_align_src; + struct qcedev_cipher_op_req *creq = &areq->cipher_op_req; + + + if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR) + byteoffset = areq->cipher_op_req.byteoffset; + + user_src = areq->cipher_op_req.vbuf.src[0].vaddr; + if (user_src && copy_from_user((k_align_src + byteoffset), + (void __user *)user_src, + areq->cipher_op_req.vbuf.src[0].len)) + return -EFAULT; + + k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len; + + for (i = 1; i < areq->cipher_op_req.entries; i++) { + user_src = areq->cipher_op_req.vbuf.src[i].vaddr; + if (user_src && copy_from_user(k_align_src, + (void __user *)user_src, + areq->cipher_op_req.vbuf.src[i].len)) { + return -EFAULT; + } + k_align_src += areq->cipher_op_req.vbuf.src[i].len; + } + + /* restore src beginning */ + k_align_src = k_align_dst; + areq->cipher_op_req.data_len += byteoffset; + + areq->cipher_req.creq.src = (struct scatterlist *) &sg_src; + areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src; + + /* In place encryption/decryption */ + sg_init_one(areq->cipher_req.creq.src, + k_align_dst, + areq->cipher_op_req.data_len); + + areq->cipher_req.creq.cryptlen = areq->cipher_op_req.data_len; + areq->cipher_req.creq.iv = areq->cipher_op_req.iv; + areq->cipher_op_req.entries = 1; + + err = submit_req(areq, handle); + + /* copy data to destination buffer*/ + creq->data_len -= byteoffset; + + while (creq->data_len > 0) { + if (creq->vbuf.dst[dst_i].len <= creq->data_len) { + if (err == 0 && copy_to_user( + (void __user *)creq->vbuf.dst[dst_i].vaddr, + (k_align_dst + byteoffset), + creq->vbuf.dst[dst_i].len)) { + err = -EFAULT; + goto exit; + } + + k_align_dst += creq->vbuf.dst[dst_i].len; + creq->data_len -= creq->vbuf.dst[dst_i].len; + dst_i++; + } else { + if (err == 0 && copy_to_user( + (void __user *)creq->vbuf.dst[dst_i].vaddr, + (k_align_dst + byteoffset), + creq->data_len)) { + err = -EFAULT; + goto exit; + } + + k_align_dst += creq->data_len; + creq->vbuf.dst[dst_i].len -= creq->data_len; + creq->vbuf.dst[dst_i].vaddr += creq->data_len; + creq->data_len = 0; + } + } + *di = dst_i; +exit: + areq->cipher_req.creq.src = NULL; + areq->cipher_req.creq.dst = NULL; + return err; +}; + +static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err = 0; + int di = 0; + int i = 0; + int j = 0; + int k = 0; + uint32_t byteoffset = 0; + int num_entries = 0; + uint32_t total = 0; + uint32_t len; + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + uint32_t max_data_xfer; + struct qcedev_cipher_op_req *saved_req; + struct qcedev_cipher_op_req *creq = &areq->cipher_op_req; + + total = 0; + + if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR) + byteoffset = areq->cipher_op_req.byteoffset; + k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) + return -ENOMEM; + k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src), + CACHE_LINE_SIZE); + max_data_xfer = QCE_MAX_OPER_DATA - byteoffset; + + saved_req = kmemdup(creq, sizeof(struct qcedev_cipher_op_req), + GFP_KERNEL); + if (saved_req == NULL) { + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(k_buf_src); + return -ENOMEM; + + } + + if (areq->cipher_op_req.data_len > max_data_xfer) { + struct qcedev_cipher_op_req req; + + /* save the original req structure */ + memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req)); + + i = 0; + /* Address 32 KB at a time */ + while ((i < req.entries) && (err == 0)) { + if (creq->vbuf.src[i].len > max_data_xfer) { + creq->vbuf.src[0].len = max_data_xfer; + if (i > 0) { + creq->vbuf.src[0].vaddr = + creq->vbuf.src[i].vaddr; + } + + creq->data_len = max_data_xfer; + creq->entries = 1; + + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, + &di, handle, k_align_src); + if (err < 0) { + memset(saved_req, 0, + ksize((void *)saved_req)); + memset(k_buf_src, 0, + ksize((void *)k_buf_src)); + kfree(k_buf_src); + kfree(saved_req); + return err; + } + + creq->vbuf.src[i].len = req.vbuf.src[i].len - + max_data_xfer; + creq->vbuf.src[i].vaddr = + req.vbuf.src[i].vaddr + + max_data_xfer; + req.vbuf.src[i].vaddr = + creq->vbuf.src[i].vaddr; + req.vbuf.src[i].len = creq->vbuf.src[i].len; + + } else { + total = areq->cipher_op_req.byteoffset; + for (j = i; j < req.entries; j++) { + num_entries++; + if ((total + creq->vbuf.src[j].len) + >= max_data_xfer) { + creq->vbuf.src[j].len = + max_data_xfer - total; + total = max_data_xfer; + break; + } + total += creq->vbuf.src[j].len; + } + + creq->data_len = total; + if (i > 0) + for (k = 0; k < num_entries; k++) { + creq->vbuf.src[k].len = + creq->vbuf.src[i+k].len; + creq->vbuf.src[k].vaddr = + creq->vbuf.src[i+k].vaddr; + } + creq->entries = num_entries; + + i = j; + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, + &di, handle, k_align_src); + if (err < 0) { + memset(saved_req, 0, + ksize((void *)saved_req)); + memset(k_buf_src, 0, + ksize((void *)k_buf_src)); + kfree(k_buf_src); + kfree(saved_req); + return err; + } + + num_entries = 0; + areq->cipher_op_req.byteoffset = 0; + + creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr + + creq->vbuf.src[i].len; + creq->vbuf.src[i].len = req.vbuf.src[i].len - + creq->vbuf.src[i].len; + + req.vbuf.src[i].vaddr = + creq->vbuf.src[i].vaddr; + req.vbuf.src[i].len = creq->vbuf.src[i].len; + + if (creq->vbuf.src[i].len == 0) + i++; + } + + areq->cipher_op_req.byteoffset = 0; + max_data_xfer = QCE_MAX_OPER_DATA; + byteoffset = 0; + + } /* end of while ((i < req.entries) && (err == 0)) */ + } else + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle, + k_align_src); + + /* Restore the original req structure */ + for (i = 0; i < saved_req->entries; i++) { + creq->vbuf.src[i].len = saved_req->vbuf.src[i].len; + creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr; + } + for (len = 0, i = 0; len < saved_req->data_len; i++) { + creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len; + creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr; + len += saved_req->vbuf.dst[i].len; + } + creq->entries = saved_req->entries; + creq->data_len = saved_req->data_len; + creq->byteoffset = saved_req->byteoffset; + + memset(saved_req, 0, ksize((void *)saved_req)); + memset(k_buf_src, 0, ksize((void *)k_buf_src)); + kfree(saved_req); + kfree(k_buf_src); + return err; + +} + +static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req, + struct qcedev_control *podev) +{ + /* if intending to use HW key make sure key fields are set + * correctly and HW key is indeed supported in target + */ + if (req->encklen == 0) { + int i; + + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) { + if (req->enckey[i]) { + pr_err("%s: Invalid key: non-zero key input\n", + __func__); + goto error; + } + } + if ((req->op != QCEDEV_OPER_ENC_NO_KEY) && + (req->op != QCEDEV_OPER_DEC_NO_KEY)) + if (!podev->platform_support.hw_key_support) { + pr_err("%s: Invalid op %d\n", __func__, + (uint32_t)req->op); + goto error; + } + } else { + if (req->encklen == QCEDEV_AES_KEY_192) { + if (!podev->ce_support.aes_key_192) { + pr_err("%s: AES-192 not supported\n", __func__); + goto error; + } + } else { + /* if not using HW key make sure key + * length is valid + */ + if (req->mode == QCEDEV_AES_MODE_XTS) { + if ((req->encklen != QCEDEV_AES_KEY_128*2) && + (req->encklen != QCEDEV_AES_KEY_256*2)) { + pr_err("%s: unsupported key size: %d\n", + __func__, req->encklen); + goto error; + } + } else { + if ((req->encklen != QCEDEV_AES_KEY_128) && + (req->encklen != QCEDEV_AES_KEY_256)) { + pr_err("%s: unsupported key size %d\n", + __func__, req->encklen); + goto error; + } + } + } + } + return 0; +error: + return -EINVAL; +} + +static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req, + struct qcedev_control *podev) +{ + uint32_t total = 0; + uint32_t i; + + if (req->use_pmem) { + pr_err("%s: Use of PMEM is not supported\n", __func__); + goto error; + } + if ((req->entries == 0) || (req->data_len == 0) || + (req->entries > QCEDEV_MAX_BUFFERS)) { + pr_err("%s: Invalid cipher length/entries\n", __func__); + goto error; + } + if ((req->alg >= QCEDEV_ALG_LAST) || + (req->mode >= QCEDEV_AES_DES_MODE_LAST)) { + pr_err("%s: Invalid algorithm %d\n", __func__, + (uint32_t)req->alg); + goto error; + } + if ((req->mode == QCEDEV_AES_MODE_XTS) && + (!podev->ce_support.aes_xts)) { + pr_err("%s: XTS algorithm is not supported\n", __func__); + goto error; + } + if (req->alg == QCEDEV_ALG_AES) { + if (qcedev_check_cipher_key(req, podev)) + goto error; + + } + /* if using a byteoffset, make sure it is CTR mode using vbuf */ + if (req->byteoffset) { + if (req->mode != QCEDEV_AES_MODE_CTR) { + pr_err("%s: Operation on byte offset not supported\n", + __func__); + goto error; + } + if (req->byteoffset >= AES_CE_BLOCK_SIZE) { + pr_err("%s: Invalid byte offset\n", __func__); + goto error; + } + total = req->byteoffset; + for (i = 0; i < req->entries; i++) { + if (total > U32_MAX - req->vbuf.src[i].len) { + pr_err("%s:Integer overflow on total src len\n", + __func__); + goto error; + } + total += req->vbuf.src[i].len; + } + } + + if (req->data_len < req->byteoffset) { + pr_err("%s: req data length %u is less than byteoffset %u\n", + __func__, req->data_len, req->byteoffset); + goto error; + } + + /* Ensure IV size */ + if (req->ivlen > QCEDEV_MAX_IV_SIZE) { + pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen); + goto error; + } + + /* Ensure Key size */ + if (req->encklen > QCEDEV_MAX_KEY_SIZE) { + pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen); + goto error; + } + + /* Ensure zer ivlen for ECB mode */ + if (req->ivlen > 0) { + if ((req->mode == QCEDEV_AES_MODE_ECB) || + (req->mode == QCEDEV_DES_MODE_ECB)) { + pr_err("%s: Expecting a zero length IV\n", __func__); + goto error; + } + } else { + if ((req->mode != QCEDEV_AES_MODE_ECB) && + (req->mode != QCEDEV_DES_MODE_ECB)) { + pr_err("%s: Expecting a non-zero ength IV\n", __func__); + goto error; + } + } + /* Check for sum of all dst length is equal to data_len */ + for (i = 0, total = 0; i < req->entries; i++) { + if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) { + pr_err("%s: NULL req dst vbuf[%d] with length %d\n", + __func__, i, req->vbuf.dst[i].len); + goto error; + } + if (req->vbuf.dst[i].len >= U32_MAX - total) { + pr_err("%s: Integer overflow on total req dst vbuf length\n", + __func__); + goto error; + } + total += req->vbuf.dst[i].len; + } + if (total != req->data_len) { + pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n", + __func__, i, total, req->data_len); + goto error; + } + /* Check for sum of all src length is equal to data_len */ + for (i = 0, total = 0; i < req->entries; i++) { + if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) { + pr_err("%s: NULL req src vbuf[%d] with length %d\n", + __func__, i, req->vbuf.src[i].len); + goto error; + } + if (req->vbuf.src[i].len > U32_MAX - total) { + pr_err("%s: Integer overflow on total req src vbuf length\n", + __func__); + goto error; + } + total += req->vbuf.src[i].len; + } + if (total != req->data_len) { + pr_err("%s: Total src(%d) buf size != data_len (%d)\n", + __func__, total, req->data_len); + goto error; + } + return 0; +error: + return -EINVAL; + +} + +static int qcedev_check_sha_params(struct qcedev_sha_op_req *req, + struct qcedev_control *podev) +{ + uint32_t total = 0; + uint32_t i; + + if ((req->alg == QCEDEV_ALG_AES_CMAC) && + (!podev->ce_support.cmac)) { + pr_err("%s: CMAC not supported\n", __func__); + goto sha_error; + } + if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) { + pr_err("%s: Invalid num entries (%d)\n", + __func__, req->entries); + goto sha_error; + } + + if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) { + pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg); + goto sha_error; + } + if ((req->alg == QCEDEV_ALG_SHA1_HMAC) || + (req->alg == QCEDEV_ALG_SHA256_HMAC)) { + if (req->authkey == NULL) { + pr_err("%s: Invalid authkey pointer\n", __func__); + goto sha_error; + } + if (req->authklen <= 0) { + pr_err("%s: Invalid authkey length (%d)\n", + __func__, req->authklen); + goto sha_error; + } + } + + if (req->alg == QCEDEV_ALG_AES_CMAC) { + if ((req->authklen != QCEDEV_AES_KEY_128) && + (req->authklen != QCEDEV_AES_KEY_256)) { + pr_err("%s: unsupported key length\n", __func__); + goto sha_error; + } + } + + /* Check for sum of all src length is equal to data_len */ + for (i = 0, total = 0; i < req->entries; i++) { + if (req->data[i].len > U32_MAX - total) { + pr_err("%s: Integer overflow on total req buf length\n", + __func__); + goto sha_error; + } + total += req->data[i].len; + } + + if (total != req->data_len) { + pr_err("%s: Total src(%d) buf size != data_len (%d)\n", + __func__, total, req->data_len); + goto sha_error; + } + return 0; +sha_error: + return -EINVAL; +} + +long qcedev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int err = 0; + struct qcedev_handle *handle; + struct qcedev_control *podev; + struct qcedev_async_req *qcedev_areq; + struct qcedev_stat *pstat; + + qcedev_areq = kzalloc(sizeof(struct qcedev_async_req), GFP_KERNEL); + if (!qcedev_areq) + return -ENOMEM; + + handle = file->private_data; + podev = handle->cntl; + qcedev_areq->handle = handle; + if (podev == NULL || podev->magic != QCEDEV_MAGIC) { + pr_err("%s: invalid handle %pK\n", + __func__, podev); + err = -ENOENT; + goto exit_free_qcedev_areq; + } + + /* Verify user arguments. */ + if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) { + err = -ENOTTY; + goto exit_free_qcedev_areq; + } + + init_completion(&qcedev_areq->complete); + pstat = &_qcedev_stat; + + if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ && + cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ) + qcedev_ce_high_bw_req(podev, true); + + switch (cmd) { + case QCEDEV_IOCTL_ENC_REQ: + case QCEDEV_IOCTL_DEC_REQ: + if (copy_from_user(&qcedev_areq->cipher_op_req, + (void __user *)arg, + sizeof(struct qcedev_cipher_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_CIPHER; + + if (qcedev_check_cipher_params(&qcedev_areq->cipher_op_req, + podev)) { + err = -EINVAL; + goto exit_free_qcedev_areq; + } + + err = qcedev_vbuf_ablk_cipher(qcedev_areq, handle); + if (err) + goto exit_free_qcedev_areq; + if (copy_to_user((void __user *)arg, + &qcedev_areq->cipher_op_req, + sizeof(struct qcedev_cipher_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + break; + + case QCEDEV_IOCTL_SHA_INIT_REQ: + { + struct scatterlist sg_src; + + if (copy_from_user(&qcedev_areq->sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA; + err = qcedev_hash_init(qcedev_areq, handle, &sg_src); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + mutex_unlock(&hash_access_lock); + if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + handle->sha_ctxt.init_done = true; + } + break; + case QCEDEV_IOCTL_GET_CMAC_REQ: + if (!podev->ce_support.cmac) { + err = -ENOTTY; + goto exit_free_qcedev_areq; + } + /* Fall-through */ + case QCEDEV_IOCTL_SHA_UPDATE_REQ: + { + struct scatterlist sg_src; + + if (copy_from_user(&qcedev_areq->sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_AES_CMAC) { + err = qcedev_hash_cmac(qcedev_areq, handle, &sg_src); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + } else { + if (!handle->sha_ctxt.init_done) { + pr_err("%s Init was not called\n", __func__); + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + err = qcedev_hash_update(qcedev_areq, handle, &sg_src); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + } + + if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { + pr_err("Invalid sha_ctxt.diglen %d\n", + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + memcpy(&qcedev_areq->sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + } + break; + + case QCEDEV_IOCTL_SHA_FINAL_REQ: + + if (!handle->sha_ctxt.init_done) { + pr_err("%s Init was not called\n", __func__); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + if (copy_from_user(&qcedev_areq->sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA; + err = qcedev_hash_final(qcedev_areq, handle); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { + pr_err("Invalid sha_ctxt.diglen %d\n", + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen; + memcpy(&qcedev_areq->sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + handle->sha_ctxt.init_done = false; + break; + + case QCEDEV_IOCTL_GET_SHA_REQ: + { + struct scatterlist sg_src; + + if (copy_from_user(&qcedev_areq->sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq->sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_SHA; + qcedev_hash_init(qcedev_areq, handle, &sg_src); + err = qcedev_hash_update(qcedev_areq, handle, &sg_src); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + err = qcedev_hash_final(qcedev_areq, handle); + if (err) { + mutex_unlock(&hash_access_lock); + goto exit_free_qcedev_areq; + } + if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { + pr_err("Invalid sha_ctxt.diglen %d\n", + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + err = -EINVAL; + goto exit_free_qcedev_areq; + } + qcedev_areq->sha_op_req.diglen = handle->sha_ctxt.diglen; + memcpy(&qcedev_areq->sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); + if (copy_to_user((void __user *)arg, &qcedev_areq->sha_op_req, + sizeof(struct qcedev_sha_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + } + break; + + case QCEDEV_IOCTL_MAP_BUF_REQ: + { + unsigned long long vaddr = 0; + struct qcedev_map_buf_req map_buf = { {0} }; + int i = 0; + + if (copy_from_user(&map_buf, + (void __user *)arg, sizeof(map_buf))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + + if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) { + err = -EINVAL; + goto exit_free_qcedev_areq; + } + + for (i = 0; i < map_buf.num_fds; i++) { + err = qcedev_check_and_map_buffer(handle, + map_buf.fd[i], + map_buf.fd_offset[i], + map_buf.fd_size[i], + &vaddr); + if (err) { + pr_err( + "%s: err: failed to map fd(%d) - %d\n", + __func__, map_buf.fd[i], err); + goto exit_free_qcedev_areq; + } + map_buf.buf_vaddr[i] = vaddr; + pr_info("%s: info: vaddr = %llx\n", + __func__, vaddr); + } + + if (copy_to_user((void __user *)arg, &map_buf, + sizeof(map_buf))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + break; + } + + case QCEDEV_IOCTL_UNMAP_BUF_REQ: + { + struct qcedev_unmap_buf_req unmap_buf = { { 0 } }; + int i = 0; + + if (copy_from_user(&unmap_buf, + (void __user *)arg, sizeof(unmap_buf))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + + for (i = 0; i < unmap_buf.num_fds; i++) { + err = qcedev_check_and_unmap_buffer(handle, + unmap_buf.fd[i]); + if (err) { + pr_err( + "%s: err: failed to unmap fd(%d) - %d\n", + __func__, + unmap_buf.fd[i], err); + goto exit_free_qcedev_areq; + } + } + break; + } + + default: + err = -ENOTTY; + goto exit_free_qcedev_areq; + } + +exit_free_qcedev_areq: + if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ && + cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ && podev != NULL) + qcedev_ce_high_bw_req(podev, false); + kfree(qcedev_areq); + return err; +} + +static int qcedev_probe_device(struct platform_device *pdev) +{ + void *handle = NULL; + int rc = 0; + struct qcedev_control *podev; + struct msm_ce_hw_support *platform_support; + + podev = &qce_dev[0]; + + rc = alloc_chrdev_region(&qcedev_device_no, 0, 1, QCEDEV_DEV); + if (rc < 0) { + pr_err("alloc_chrdev_region failed %d\n", rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, QCEDEV_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("class_create failed %d\n", rc); + goto exit_unreg_chrdev_region; + } + + class_dev = device_create(driver_class, NULL, qcedev_device_no, NULL, + QCEDEV_DEV); + if (IS_ERR(class_dev)) { + pr_err("class_device_create failed %d\n", rc); + rc = -ENOMEM; + goto exit_destroy_class; + } + + cdev_init(&podev->cdev, &qcedev_fops); + podev->cdev.owner = THIS_MODULE; + + rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcedev_device_no), 0), 1); + if (rc < 0) { + pr_err("cdev_add failed %d\n", rc); + goto exit_destroy_device; + } + podev->minor = 0; + + podev->high_bw_req_count = 0; + INIT_LIST_HEAD(&podev->ready_commands); + podev->active_command = NULL; + + INIT_LIST_HEAD(&podev->context_banks); + + spin_lock_init(&podev->lock); + + tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev); + + podev->icc_path = of_icc_get(&pdev->dev, "data_path"); + if (IS_ERR(podev->icc_path)) { + rc = PTR_ERR(podev->icc_path); + pr_err("%s Failed to get icc path with error %d\n", + __func__, rc); + goto exit_del_cdev; + } + + rc = icc_set_bw(podev->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (rc) { + pr_err("%s Unable to set high bandwidth\n", __func__); + goto exit_unregister_bus_scale; + } + + handle = qce_open(pdev, &rc); + if (handle == NULL) { + rc = -ENODEV; + goto exit_scale_busbandwidth; + } + rc = icc_set_bw(podev->icc_path, 0, 0); + if (rc) { + pr_err("%s Unable to set to low bandwidth\n", __func__); + goto exit_qce_close; + } + + podev->qce = handle; + podev->pdev = pdev; + platform_set_drvdata(pdev, podev); + + qce_hw_support(podev->qce, &podev->ce_support); + if (podev->ce_support.bam) { + podev->platform_support.ce_shared = 0; + podev->platform_support.shared_ce_resource = 0; + podev->platform_support.hw_key_support = + podev->ce_support.hw_key; + podev->platform_support.sha_hmac = 1; + } else { + platform_support = + (struct msm_ce_hw_support *)pdev->dev.platform_data; + podev->platform_support.ce_shared = platform_support->ce_shared; + podev->platform_support.shared_ce_resource = + platform_support->shared_ce_resource; + podev->platform_support.hw_key_support = + platform_support->hw_key_support; + podev->platform_support.sha_hmac = platform_support->sha_hmac; + } + + podev->mem_client = qcedev_mem_new_client(MEM_ION); + if (!podev->mem_client) { + pr_err("%s: err: qcedev_mem_new_client failed\n", __func__); + goto exit_qce_close; + } + + rc = of_platform_populate(pdev->dev.of_node, qcedev_match, + NULL, &pdev->dev); + if (rc) { + pr_err("%s: err: of_platform_populate failed: %d\n", + __func__, rc); + goto exit_mem_new_client; + } + + return 0; + +exit_mem_new_client: + if (podev->mem_client) + qcedev_mem_delete_client(podev->mem_client); + podev->mem_client = NULL; + +exit_qce_close: + if (handle) + qce_close(handle); +exit_scale_busbandwidth: + icc_set_bw(podev->icc_path, 0, 0); +exit_unregister_bus_scale: + if (podev->icc_path) + icc_put(podev->icc_path); +exit_del_cdev: + cdev_del(&podev->cdev); +exit_destroy_device: + device_destroy(driver_class, qcedev_device_no); +exit_destroy_class: + class_destroy(driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(qcedev_device_no, 1); + + podev->icc_path = NULL; + platform_set_drvdata(pdev, NULL); + podev->pdev = NULL; + podev->qce = NULL; + + return rc; +} + +static int qcedev_probe(struct platform_device *pdev) +{ + if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcedev")) + return qcedev_probe_device(pdev); + else if (of_device_is_compatible(pdev->dev.of_node, + "qcom,qcedev,context-bank")) + return qcedev_parse_context_bank(pdev); + + return -EINVAL; +}; + +static int qcedev_remove(struct platform_device *pdev) +{ + struct qcedev_control *podev; + + podev = platform_get_drvdata(pdev); + if (!podev) + return 0; + if (podev->qce) + qce_close(podev->qce); + + if (podev->icc_path) + icc_put(podev->icc_path); + tasklet_kill(&podev->done_tasklet); + + cdev_del(&podev->cdev); + + device_destroy(driver_class, qcedev_device_no); + + class_destroy(driver_class); + + unregister_chrdev_region(qcedev_device_no, 1); + return 0; +}; + +static int qcedev_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct qcedev_control *podev; + int ret; + + podev = platform_get_drvdata(pdev); + + if (!podev) + return 0; + + mutex_lock(&qcedev_sent_bw_req); + if (podev->high_bw_req_count) { + ret = qcedev_control_clocks(podev, false); + if (ret) + goto suspend_exit; + } + +suspend_exit: + mutex_unlock(&qcedev_sent_bw_req); + return 0; +} + +static int qcedev_resume(struct platform_device *pdev) +{ + struct qcedev_control *podev; + int ret; + + podev = platform_get_drvdata(pdev); + + if (!podev) + return 0; + + mutex_lock(&qcedev_sent_bw_req); + if (podev->high_bw_req_count) { + ret = qcedev_control_clocks(podev, true); + if (ret) + goto resume_exit; + } + +resume_exit: + mutex_unlock(&qcedev_sent_bw_req); + return 0; +} + +static struct platform_driver qcedev_plat_driver = { + .probe = qcedev_probe, + .remove = qcedev_remove, + .suspend = qcedev_suspend, + .resume = qcedev_resume, + .driver = { + .name = "qce", + .of_match_table = qcedev_match, + }, +}; + +static int _disp_stats(int id) +{ + struct qcedev_stat *pstat; + int len = 0; + + pstat = &_qcedev_stat; + len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQTI QCE dev driver %d Statistics:\n", + id + 1); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation success : %d\n", + pstat->qcedev_enc_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation fail : %d\n", + pstat->qcedev_enc_fail); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Decryption operation success : %d\n", + pstat->qcedev_dec_success); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation fail : %d\n", + pstat->qcedev_dec_fail); + + return len; +} + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t rc = -EINVAL; + int qcedev = *((int *) file->private_data); + int len; + + len = _disp_stats(qcedev); + + if (len <= count) + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat)); + return count; +}; + +static const struct file_operations _debug_stats_ops = { + .open = simple_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcedev_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcedev", NULL); + if (IS_ERR(_debug_dent)) { + pr_debug("qcedev debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1); + _debug_qcedev = 0; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcedev, &_debug_stats_ops); + if (dent == NULL) { + pr_debug("qcedev debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int qcedev_init(void) +{ + _qcedev_debug_init(); + return platform_driver_register(&qcedev_plat_driver); +} + +static void qcedev_exit(void) +{ + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&qcedev_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QTI DEV Crypto driver"); + +module_init(qcedev_init); +module_exit(qcedev_exit); diff --git a/crypto-qti/qcedev_smmu.c b/crypto-qti/qcedev_smmu.c new file mode 100644 index 0000000000..04c7ff2842 --- /dev/null +++ b/crypto-qti/qcedev_smmu.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Qti (or) Qualcomm Technologies Inc CE device driver. + * + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include "linux/qcedev.h" +#include "qcedevi.h" +#include "qcedev_smmu.h" +#include "soc/qcom/secure_buffer.h" +#include + +static int qcedev_setup_context_bank(struct context_bank_info *cb, + struct device *dev) +{ + if (!dev || !cb) { + pr_err("%s err: invalid input params\n", __func__); + return -EINVAL; + } + cb->dev = dev; + + if (!dev->dma_parms) { + dev->dma_parms = devm_kzalloc(dev, + sizeof(*dev->dma_parms), GFP_KERNEL); + if (!dev->dma_parms) + return -ENOMEM; + } + dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); + dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64)); + + return 0; +} + +int qcedev_parse_context_bank(struct platform_device *pdev) +{ + struct qcedev_control *podev; + struct context_bank_info *cb = NULL; + struct device_node *np = NULL; + int rc = 0; + + if (!pdev) { + pr_err("%s err: invalid platform devices\n", __func__); + return -EINVAL; + } + if (!pdev->dev.parent) { + pr_err("%s err: failed to find a parent for %s\n", + __func__, dev_name(&pdev->dev)); + return -EINVAL; + } + + podev = dev_get_drvdata(pdev->dev.parent); + np = pdev->dev.of_node; + cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL); + if (!cb) { + pr_err("%s ERROR = Failed to allocate cb\n", __func__); + return -ENOMEM; + } + + INIT_LIST_HEAD(&cb->list); + list_add_tail(&cb->list, &podev->context_banks); + + rc = of_property_read_string(np, "label", &cb->name); + if (rc) + pr_debug("%s ERROR = Unable to read label\n", __func__); + + cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank"); + + rc = qcedev_setup_context_bank(cb, &pdev->dev); + if (rc) { + pr_err("%s err: cannot setup context bank %d\n", __func__, rc); + goto err_setup_cb; + } + + return 0; + +err_setup_cb: + list_del(&cb->list); + devm_kfree(&pdev->dev, cb); + return rc; +} + +struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype) +{ + struct qcedev_mem_client *mem_client = NULL; + + if (mtype != MEM_ION) { + pr_err("%s: err: Mem type not supported\n", __func__); + goto err; + } + + mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL); + if (!mem_client) + goto err; + mem_client->mtype = mtype; + + return mem_client; +err: + return NULL; +} + +void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client) +{ + kfree(mem_client); +} + +static bool is_iommu_present(struct qcedev_handle *qce_hndl) +{ + return !list_empty(&qce_hndl->cntl->context_banks); +} + +static struct context_bank_info *get_context_bank( + struct qcedev_handle *qce_hndl, bool is_secure) +{ + struct qcedev_control *podev = qce_hndl->cntl; + struct context_bank_info *cb = NULL, *match = NULL; + + list_for_each_entry(cb, &podev->context_banks, list) { + if (cb->is_secure == is_secure) { + match = cb; + break; + } + } + return match; +} + +static int ion_map_buffer(struct qcedev_handle *qce_hndl, + struct qcedev_mem_client *mem_client, int fd, + unsigned int fd_size, struct qcedev_reg_buf_info *binfo) +{ + int rc = 0; + struct dma_buf *buf = NULL; + struct dma_buf_attachment *attach = NULL; + struct sg_table *table = NULL; + struct context_bank_info *cb = NULL; + + buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(buf)) + return -EINVAL; + + if (is_iommu_present(qce_hndl)) { + cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf)); + if (!cb) { + pr_err("%s: err: failed to get context bank info\n", + __func__); + rc = -EIO; + goto map_err; + } + + /* Prepare a dma buf for dma on the given device */ + attach = dma_buf_attach(buf, cb->dev); + if (IS_ERR_OR_NULL(attach)) { + rc = PTR_ERR(attach) ?: -ENOMEM; + pr_err("%s: err: failed to attach dmabuf\n", __func__); + goto map_err; + } + + /* Get the scatterlist for the given attachment */ + attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP; + table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(table)) { + rc = PTR_ERR(table) ?: -ENOMEM; + pr_err("%s: err: failed to map table\n", __func__); + goto map_table_err; + } + + if (table->sgl) { + binfo->ion_buf.iova = sg_dma_address(table->sgl); + binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl); + if (binfo->ion_buf.mapped_buf_size < fd_size) { + pr_err("%s: err: mapping failed, size mismatch\n", + __func__); + rc = -ENOMEM; + goto map_sg_err; + } + } else { + pr_err("%s: err: sg list is NULL\n", __func__); + rc = -ENOMEM; + goto map_sg_err; + } + + binfo->ion_buf.mapping_info.dev = cb->dev; + binfo->ion_buf.mapping_info.mapping = cb->mapping; + binfo->ion_buf.mapping_info.table = table; + binfo->ion_buf.mapping_info.attach = attach; + binfo->ion_buf.mapping_info.buf = buf; + binfo->ion_buf.ion_fd = fd; + } else { + pr_err("%s: err: smmu not enabled\n", __func__); + rc = -EIO; + goto map_err; + } + + return 0; + +map_sg_err: + dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL); +map_table_err: + dma_buf_detach(buf, attach); +map_err: + dma_buf_put(buf); + return rc; +} + +static int ion_unmap_buffer(struct qcedev_handle *qce_hndl, + struct qcedev_reg_buf_info *binfo) +{ + struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info; + + if (is_iommu_present(qce_hndl)) { + dma_buf_unmap_attachment(mapping_info->attach, + mapping_info->table, DMA_BIDIRECTIONAL); + dma_buf_detach(mapping_info->buf, mapping_info->attach); + dma_buf_put(mapping_info->buf); + + } + return 0; +} + +static int qcedev_map_buffer(struct qcedev_handle *qce_hndl, + struct qcedev_mem_client *mem_client, int fd, + unsigned int fd_size, struct qcedev_reg_buf_info *binfo) +{ + int rc = -1; + + switch (mem_client->mtype) { + case MEM_ION: + rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo); + break; + default: + pr_err("%s: err: Mem type not supported\n", __func__); + break; + } + + if (rc) + pr_err("%s: err: failed to map buffer\n", __func__); + + return rc; +} + +static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl, + struct qcedev_mem_client *mem_client, + struct qcedev_reg_buf_info *binfo) +{ + int rc = -1; + + switch (mem_client->mtype) { + case MEM_ION: + rc = ion_unmap_buffer(qce_hndl, binfo); + break; + default: + pr_err("%s: err: Mem type not supported\n", __func__); + break; + } + + if (rc) + pr_err("%s: err: failed to unmap buffer\n", __func__); + + return rc; +} + +int qcedev_check_and_map_buffer(void *handle, + int fd, unsigned int offset, unsigned int fd_size, + unsigned long long *vaddr) +{ + bool found = false; + struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL; + struct qcedev_mem_client *mem_client = NULL; + struct qcedev_handle *qce_hndl = handle; + int rc = 0; + unsigned long mapped_size = 0; + + if (!handle || !vaddr || fd < 0 || offset >= fd_size) { + pr_err("%s: err: invalid input arguments\n", __func__); + return -EINVAL; + } + + if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) { + pr_err("%s: err: invalid qcedev handle\n", __func__); + return -EINVAL; + } + mem_client = qce_hndl->cntl->mem_client; + + if (mem_client->mtype != MEM_ION) + return -EPERM; + + /* Check if the buffer fd is already mapped */ + mutex_lock(&qce_hndl->registeredbufs.lock); + list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) { + if (temp->ion_buf.ion_fd == fd) { + found = true; + *vaddr = temp->ion_buf.iova; + mapped_size = temp->ion_buf.mapped_buf_size; + atomic_inc(&temp->ref_count); + break; + } + } + mutex_unlock(&qce_hndl->registeredbufs.lock); + + /* If buffer fd is not mapped then create a fresh mapping */ + if (!found) { + pr_debug("%s: info: ion fd not registered with driver\n", + __func__); + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + pr_err("%s: err: failed to allocate binfo\n", + __func__); + rc = -ENOMEM; + goto error; + } + rc = qcedev_map_buffer(qce_hndl, mem_client, fd, + fd_size, binfo); + if (rc) { + pr_err("%s: err: failed to map fd (%d) error = %d\n", + __func__, fd, rc); + goto error; + } + + *vaddr = binfo->ion_buf.iova; + mapped_size = binfo->ion_buf.mapped_buf_size; + atomic_inc(&binfo->ref_count); + + /* Add buffer mapping information to regd buffer list */ + mutex_lock(&qce_hndl->registeredbufs.lock); + list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list); + mutex_unlock(&qce_hndl->registeredbufs.lock); + } + + /* Make sure the offset is within the mapped range */ + if (offset >= mapped_size) { + pr_err( + "%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n", + __func__, offset, mapped_size, fd); + rc = -ERANGE; + goto unmap; + } + + /* return the mapped virtual address adjusted by offset */ + *vaddr += offset; + + return 0; + +unmap: + if (!found) + qcedev_unmap_buffer(handle, mem_client, binfo); + +error: + kfree(binfo); + return rc; +} + +int qcedev_check_and_unmap_buffer(void *handle, int fd) +{ + struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL; + struct qcedev_mem_client *mem_client = NULL; + struct qcedev_handle *qce_hndl = handle; + bool found = false; + + if (!handle || fd < 0) { + pr_err("%s: err: invalid input arguments\n", __func__); + return -EINVAL; + } + + if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) { + pr_err("%s: err: invalid qcedev handle\n", __func__); + return -EINVAL; + } + mem_client = qce_hndl->cntl->mem_client; + + if (mem_client->mtype != MEM_ION) + return -EPERM; + + /* Check if the buffer fd is mapped and present in the regd list. */ + mutex_lock(&qce_hndl->registeredbufs.lock); + list_for_each_entry_safe(binfo, dummy, + &qce_hndl->registeredbufs.list, list) { + if (binfo->ion_buf.ion_fd == fd) { + found = true; + atomic_dec(&binfo->ref_count); + + /* Unmap only if there are no more references */ + if (atomic_read(&binfo->ref_count) == 0) { + qcedev_unmap_buffer(qce_hndl, + mem_client, binfo); + list_del(&binfo->list); + kfree(binfo); + } + break; + } + } + mutex_unlock(&qce_hndl->registeredbufs.lock); + + if (!found) { + pr_err("%s: err: calling unmap on unknown fd %d\n", + __func__, fd); + return -EINVAL; + } + + return 0; +} + +int qcedev_unmap_all_buffers(void *handle) +{ + struct qcedev_reg_buf_info *binfo = NULL; + struct qcedev_mem_client *mem_client = NULL; + struct qcedev_handle *qce_hndl = handle; + struct list_head *pos; + + if (!handle) { + pr_err("%s: err: invalid input arguments\n", __func__); + return -EINVAL; + } + + if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) { + pr_err("%s: err: invalid qcedev handle\n", __func__); + return -EINVAL; + } + mem_client = qce_hndl->cntl->mem_client; + + if (mem_client->mtype != MEM_ION) + return -EPERM; + + mutex_lock(&qce_hndl->registeredbufs.lock); + while (!list_empty(&qce_hndl->registeredbufs.list)) { + pos = qce_hndl->registeredbufs.list.next; + binfo = list_entry(pos, struct qcedev_reg_buf_info, list); + if (binfo) + qcedev_unmap_buffer(qce_hndl, mem_client, binfo); + list_del(pos); + kfree(binfo); + } + mutex_unlock(&qce_hndl->registeredbufs.lock); + + return 0; +} + diff --git a/crypto-qti/qcedev_smmu.h b/crypto-qti/qcedev_smmu.h new file mode 100644 index 0000000000..1fe35d23a7 --- /dev/null +++ b/crypto-qti/qcedev_smmu.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Qti (or) Qualcomm Technologies Inc CE device driver. + * + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DRIVERS_CRYPTO_PARSE_H_ +#define _DRIVERS_CRYPTO_PARSE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct context_bank_info { + struct list_head list; + const char *name; + u32 buffer_type; + u32 start_addr; + u32 size; + bool is_secure; + struct device *dev; + struct dma_iommu_mapping *mapping; +}; + +enum qcedev_mem_type { + MEM_ION, +}; + +struct qcedev_mem_client { + enum qcedev_mem_type mtype; +}; + +struct dma_mapping_info { + struct device *dev; + struct dma_iommu_mapping *mapping; + struct sg_table *table; + struct dma_buf_attachment *attach; + struct dma_buf *buf; +}; + +struct qcedev_ion_buf_info { + struct dma_mapping_info mapping_info; + dma_addr_t iova; + unsigned long mapped_buf_size; + int ion_fd; +}; + +struct qcedev_reg_buf_info { + struct list_head list; + union { + struct qcedev_ion_buf_info ion_buf; + }; + atomic_t ref_count; +}; + +struct qcedev_buffer_list { + struct list_head list; + struct mutex lock; +}; + +int qcedev_parse_context_bank(struct platform_device *pdev); +struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype); +void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client); +int qcedev_check_and_map_buffer(void *qce_hndl, + int fd, unsigned int offset, unsigned int fd_size, + unsigned long long *vaddr); +int qcedev_check_and_unmap_buffer(void *handle, int fd); +int qcedev_unmap_all_buffers(void *handle); + +extern struct qcedev_reg_buf_info *global_binfo_in; +extern struct qcedev_reg_buf_info *global_binfo_out; +extern struct qcedev_reg_buf_info *global_binfo_res; +#endif + diff --git a/crypto-qti/qcedevi.h b/crypto-qti/qcedevi.h new file mode 100644 index 0000000000..41810784d9 --- /dev/null +++ b/crypto-qti/qcedevi.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * QTI crypto Driver + * + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __CRYPTO_MSM_QCEDEVI_H +#define __CRYPTO_MSM_QCEDEVI_H + +#include +#include +#include +#include "linux/platform_data/qcom_crypto_device.h" +#include "linux/fips_status.h" +#include "qce.h" +#include "qcedev_smmu.h" + +#define CACHE_LINE_SIZE 32 +#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE + +enum qcedev_crypto_oper_type { + QCEDEV_CRYPTO_OPER_CIPHER = 0, + QCEDEV_CRYPTO_OPER_SHA = 1, + QCEDEV_CRYPTO_OPER_LAST +}; + +struct qcedev_handle; + +struct qcedev_cipher_req { + struct skcipher_request creq; + void *cookie; +}; + +struct qcedev_sha_req { + struct ahash_request sreq; + void *cookie; +}; + +struct qcedev_sha_ctxt { + uint32_t auth_data[4]; + uint8_t digest[QCEDEV_MAX_SHA_DIGEST]; + uint32_t diglen; + uint8_t trailing_buf[64]; + uint32_t trailing_buf_len; + uint8_t first_blk; + uint8_t last_blk; + uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE]; + bool init_done; +}; + +struct qcedev_async_req { + struct list_head list; + struct completion complete; + enum qcedev_crypto_oper_type op_type; + union { + struct qcedev_cipher_op_req cipher_op_req; + struct qcedev_sha_op_req sha_op_req; + }; + + union { + struct qcedev_cipher_req cipher_req; + struct qcedev_sha_req sha_req; + }; + struct qcedev_handle *handle; + int err; +}; + +/********************************************************************** + * Register ourselves as a char device to be able to access the dev driver + * from userspace. + */ + +#define QCEDEV_DEV "qce" + +struct qcedev_control { + + /* CE features supported by platform */ + struct msm_ce_hw_support platform_support; + + uint32_t ce_lock_count; + uint32_t high_bw_req_count; + + /* CE features/algorithms supported by HW engine*/ + struct ce_hw_support ce_support; + + /* replaced msm_bus with interconnect path */ + struct icc_path *icc_path; + + /* char device */ + struct cdev cdev; + + int minor; + + /* qce handle */ + void *qce; + + /* platform device */ + struct platform_device *pdev; + + unsigned int magic; + + struct list_head ready_commands; + struct qcedev_async_req *active_command; + spinlock_t lock; + struct tasklet_struct done_tasklet; + struct list_head context_banks; + struct qcedev_mem_client *mem_client; +}; + +struct qcedev_handle { + /* qcedev control handle */ + struct qcedev_control *cntl; + /* qce internal sha context*/ + struct qcedev_sha_ctxt sha_ctxt; + /* qcedev mapped buffer list */ + struct qcedev_buffer_list registeredbufs; +}; + +void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, + unsigned char *iv, int ret); + +void qcedev_sha_req_cb(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret); + +#endif /* __CRYPTO_MSM_QCEDEVI_H */ diff --git a/crypto-qti/qcrypto.c b/crypto-qti/qcrypto.c new file mode 100644 index 0000000000..32864a85b5 --- /dev/null +++ b/crypto-qti/qcrypto.c @@ -0,0 +1,5495 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI Crypto driver + * + * Copyright (c) 2010-2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/platform_data/qcom_crypto_device.h" +#include +#include +#include "linux/qcrypto.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "linux/fips_status.h" + +#include "qce.h" + +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 4096 +#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */ + +/* + * For crypto 5.0 which has burst size alignment requirement. + */ +#define MAX_ALIGN_SIZE 0x40 + +#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000 + +/* Status of response workq */ +enum resp_workq_sts { + NOT_SCHEDULED = 0, + IS_SCHEDULED = 1, + SCHEDULE_AGAIN = 2 +}; + +/* Status of req processing by CEs */ +enum req_processing_sts { + STOPPED = 0, + IN_PROGRESS = 1 +}; + +enum qcrypto_bus_state { + BUS_NO_BANDWIDTH = 0, + BUS_HAS_BANDWIDTH, + BUS_BANDWIDTH_RELEASING, + BUS_BANDWIDTH_ALLOCATING, + BUS_SUSPENDED, + BUS_SUSPENDING, +}; + +struct crypto_stat { + u64 aead_sha1_aes_enc; + u64 aead_sha1_aes_dec; + u64 aead_sha1_des_enc; + u64 aead_sha1_des_dec; + u64 aead_sha1_3des_enc; + u64 aead_sha1_3des_dec; + u64 aead_sha256_aes_enc; + u64 aead_sha256_aes_dec; + u64 aead_sha256_des_enc; + u64 aead_sha256_des_dec; + u64 aead_sha256_3des_enc; + u64 aead_sha256_3des_dec; + u64 aead_ccm_aes_enc; + u64 aead_ccm_aes_dec; + u64 aead_rfc4309_ccm_aes_enc; + u64 aead_rfc4309_ccm_aes_dec; + u64 aead_op_success; + u64 aead_op_fail; + u64 aead_bad_msg; + u64 sk_cipher_aes_enc; + u64 sk_cipher_aes_dec; + u64 sk_cipher_des_enc; + u64 sk_cipher_des_dec; + u64 sk_cipher_3des_enc; + u64 sk_cipher_3des_dec; + u64 sk_cipher_op_success; + u64 sk_cipher_op_fail; + u64 sha1_digest; + u64 sha256_digest; + u64 sha1_hmac_digest; + u64 sha256_hmac_digest; + u64 ahash_op_success; + u64 ahash_op_fail; +}; +static struct crypto_stat _qcrypto_stat; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +static bool _qcrypto_init_assign; +struct crypto_priv; +struct qcrypto_req_control { + unsigned int index; + bool in_use; + struct crypto_engine *pce; + struct crypto_async_request *req; + struct qcrypto_resp_ctx *arsp; + int res; /* execution result */ +}; + +struct crypto_engine { + struct list_head elist; + void *qce; /* qce handle */ + struct platform_device *pdev; /* platform device */ + struct crypto_priv *pcp; + struct icc_path *icc_path; + struct crypto_queue req_queue; /* + * request queue for those requests + * that have this engine assigned + * waiting to be executed + */ + u64 total_req; + u64 err_req; + u32 unit; + u32 ce_device; + u32 ce_hw_instance; + unsigned int signature; + + enum qcrypto_bus_state bw_state; + bool high_bw_req; + struct timer_list bw_reaper_timer; + struct work_struct bw_reaper_ws; + struct work_struct bw_allocate_ws; + + /* engine execution sequence number */ + u32 active_seq; + /* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */ + u32 last_active_seq; + + bool check_flag; + /*Added to support multi-requests*/ + unsigned int max_req; + struct qcrypto_req_control *preq_pool; + atomic_t req_count; + bool issue_req; /* an request is being issued to qce */ + bool first_engine; /* this engine is the first engine or not */ + unsigned int irq_cpu; /* the cpu running the irq of this engine */ + unsigned int max_req_used; /* debug stats */ +}; + +#define MAX_SMP_CPU 8 + +struct crypto_priv { + /* CE features supported by target device*/ + struct msm_ce_hw_support platform_support; + + /* CE features/algorithms supported by HW engine*/ + struct ce_hw_support ce_support; + + /* the lock protects crypto queue and req */ + spinlock_t lock; + + /* list of registered algorithms */ + struct list_head alg_list; + + /* current active request */ + struct crypto_async_request *req; + + struct work_struct unlock_ce_ws; + struct list_head engine_list; /* list of qcrypto engines */ + int32_t total_units; /* total units of engines */ + struct mutex engine_lock; + + struct crypto_engine *next_engine; /* next assign engine */ + struct crypto_queue req_queue; /* + * request queue for those requests + * that waiting for an available + * engine. + */ + struct llist_head ordered_resp_list; /* Queue to maintain + * responses in sequence. + */ + atomic_t resp_cnt; + struct workqueue_struct *resp_wq; + struct work_struct resp_work; /* + * Workq to send responses + * in sequence. + */ + enum resp_workq_sts sched_resp_workq_status; + enum req_processing_sts ce_req_proc_sts; + int cpu_getting_irqs_frm_first_ce; + struct crypto_engine *first_engine; + struct crypto_engine *scheduled_eng; /* last engine scheduled */ + + /* debug stats */ + unsigned int no_avail; + unsigned int resp_stop; + unsigned int resp_start; + unsigned int max_qlen; + unsigned int queue_work_eng3; + unsigned int queue_work_not_eng3; + unsigned int queue_work_not_eng3_nz; + unsigned int max_resp_qlen; + unsigned int max_reorder_cnt; + unsigned int cpu_req[MAX_SMP_CPU+1]; +}; +static struct crypto_priv qcrypto_dev; +static struct crypto_engine *_qcrypto_static_assign_engine( + struct crypto_priv *cp); +static struct crypto_engine *_avail_eng(struct crypto_priv *cp); +static struct qcrypto_req_control *qcrypto_alloc_req_control( + struct crypto_engine *pce) +{ + int i; + struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool; + unsigned int req_count; + + for (i = 0; i < pce->max_req; i++) { + if (!xchg(&pqcrypto_req_control->in_use, true)) { + req_count = atomic_inc_return(&pce->req_count); + if (req_count > pce->max_req_used) + pce->max_req_used = req_count; + return pqcrypto_req_control; + } + pqcrypto_req_control++; + } + return NULL; +} + +static void qcrypto_free_req_control(struct crypto_engine *pce, + struct qcrypto_req_control *preq) +{ + /* do this before free req */ + preq->req = NULL; + preq->arsp = NULL; + /* free req */ + if (!xchg(&preq->in_use, false)) + pr_warn("request info %pK free already\n", preq); + else + atomic_dec(&pce->req_count); +} + +static struct qcrypto_req_control *find_req_control_for_areq( + struct crypto_engine *pce, + struct crypto_async_request *areq) +{ + int i; + struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool; + + for (i = 0; i < pce->max_req; i++) { + if (pqcrypto_req_control->req == areq) + return pqcrypto_req_control; + pqcrypto_req_control++; + } + return NULL; +} + +static void qcrypto_init_req_control(struct crypto_engine *pce, + struct qcrypto_req_control *pqcrypto_req_control) +{ + int i; + + pce->preq_pool = pqcrypto_req_control; + atomic_set(&pce->req_count, 0); + for (i = 0; i < pce->max_req; i++) { + pqcrypto_req_control->index = i; + pqcrypto_req_control->in_use = false; + pqcrypto_req_control->pce = pce; + pqcrypto_req_control++; + } +} + +static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp, + unsigned int device) +{ + struct crypto_engine *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + list_for_each_entry(entry, &cp->engine_list, elist) { + if (entry->ce_device == device) + break; + } + spin_unlock_irqrestore(&cp->lock, flags); + + if (((entry != NULL) && (entry->ce_device != device)) || + (entry == NULL)) { + pr_err("Device node for CE device %d NOT FOUND!!\n", + device); + return NULL; + } + + return entry; +} + +static struct crypto_engine *_qrypto_find_pengine_device_hw + (struct crypto_priv *cp, + u32 device, + u32 hw_instance) +{ + struct crypto_engine *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + list_for_each_entry(entry, &cp->engine_list, elist) { + if ((entry->ce_device == device) && + (entry->ce_hw_instance == hw_instance)) + break; + } + spin_unlock_irqrestore(&cp->lock, flags); + + if (((entry != NULL) && + ((entry->ce_device != device) + || (entry->ce_hw_instance != hw_instance))) + || (entry == NULL)) { + pr_err("Device node for CE device %d NOT FOUND!!\n", + device); + return NULL; + } + return entry; +} + +int qcrypto_get_num_engines(void) +{ + struct crypto_priv *cp = &qcrypto_dev; + struct crypto_engine *entry = NULL; + int count = 0; + + list_for_each_entry(entry, &cp->engine_list, elist) { + count++; + } + return count; +} +EXPORT_SYMBOL(qcrypto_get_num_engines); + +void qcrypto_get_engine_list(size_t num_engines, + struct crypto_engine_entry *arr) +{ + struct crypto_priv *cp = &qcrypto_dev; + struct crypto_engine *entry = NULL; + size_t arr_index = 0; + + list_for_each_entry(entry, &cp->engine_list, elist) { + arr[arr_index].ce_device = entry->ce_device; + arr[arr_index].hw_instance = entry->ce_hw_instance; + arr_index++; + if (arr_index >= num_engines) + break; + } +} +EXPORT_SYMBOL(qcrypto_get_engine_list); + +enum qcrypto_alg_type { + QCRYPTO_ALG_CIPHER = 0, + QCRYPTO_ALG_SHA = 1, + QCRYPTO_ALG_AEAD = 2, + QCRYPTO_ALG_LAST +}; + +struct qcrypto_alg { + struct list_head entry; + struct skcipher_alg cipher_alg; + struct ahash_alg sha_alg; + struct aead_alg aead_alg; + enum qcrypto_alg_type alg_type; + struct crypto_priv *cp; +}; + +#define QCRYPTO_MAX_KEY_SIZE 64 +/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +#define QCRYPTO_MAX_IV_LENGTH 16 + +#define QCRYPTO_CCM4309_NONCE_LEN 3 + +struct qcrypto_cipher_ctx { + struct list_head rsp_queue; /* response queue */ + struct crypto_engine *pengine; /* fixed engine assigned to this tfm */ + struct crypto_priv *cp; + unsigned int flags; + + enum qce_hash_alg_enum auth_alg; /* for aead */ + u8 auth_key[QCRYPTO_MAX_KEY_SIZE]; + u8 iv[QCRYPTO_MAX_IV_LENGTH]; + + u8 enc_key[QCRYPTO_MAX_KEY_SIZE]; + unsigned int enc_key_len; + + unsigned int authsize; + unsigned int auth_key_len; + + u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN]; + + struct crypto_sync_skcipher *cipher_aes192_fb; + + struct crypto_ahash *ahash_aead_aes192_fb; +}; + +struct qcrypto_resp_ctx { + struct list_head list; + struct llist_node llist; + struct crypto_async_request *async_req; /* async req */ + int res; /* execution result */ +}; + +struct qcrypto_cipher_req_ctx { + struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */ + struct crypto_engine *pengine; /* engine assigned to this request */ + u8 *iv; + u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH]; + unsigned int ivsize; + int aead; + int ccmtype; /* default: 0, rfc4309: 1 */ + struct scatterlist asg; /* Formatted associated data sg */ + unsigned char *adata; /* Pointer to formatted assoc data */ + enum qce_cipher_alg_enum alg; + enum qce_cipher_dir_enum dir; + enum qce_cipher_mode_enum mode; + + struct scatterlist *orig_src; /* Original src sg ptr */ + struct scatterlist *orig_dst; /* Original dst sg ptr */ + struct scatterlist dsg; /* Dest Data sg */ + struct scatterlist ssg; /* Source Data sg */ + unsigned char *data; /* Incoming data pointer*/ + + struct aead_request *aead_req; + struct ahash_request *fb_hash_req; + uint8_t fb_ahash_digest[SHA256_DIGEST_SIZE]; + struct scatterlist fb_ablkcipher_src_sg[2]; + struct scatterlist fb_ablkcipher_dst_sg[2]; + char *fb_aes_iv; + unsigned int fb_ahash_length; + struct skcipher_request *fb_aes_req; + struct scatterlist *fb_aes_src; + struct scatterlist *fb_aes_dst; + unsigned int fb_aes_cryptlen; +}; + +#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE +#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32)) +#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE + +#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 768 +#define COMPLETION_CB_BACKLOG_LENGTH_STOP 400 +#define COMPLETION_CB_BACKLOG_LENGTH_START \ + (COMPLETION_CB_BACKLOG_LENGTH_STOP / 2) + +static uint8_t _std_init_vector_sha1_uint8[] = { + 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, + 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, + 0xC3, 0xD2, 0xE1, 0xF0 +}; + +/* standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha256_uint8[] = { + 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, + 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, + 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, + 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 +}; + +struct qcrypto_sha_ctx { + struct list_head rsp_queue; /* response queue */ + struct crypto_engine *pengine; /* fixed engine assigned to this tfm */ + struct crypto_priv *cp; + unsigned int flags; + enum qce_hash_alg_enum alg; + uint32_t diglen; + uint32_t authkey_in_len; + uint8_t authkey[SHA_MAX_BLOCK_SIZE]; + struct ahash_request *ahash_req; + struct completion ahash_req_complete; +}; + +struct qcrypto_sha_req_ctx { + struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */ + struct crypto_engine *pengine; /* engine assigned to this request */ + + struct scatterlist *src; + uint32_t nbytes; + + struct scatterlist *orig_src; /* Original src sg ptr */ + struct scatterlist dsg; /* Data sg */ + unsigned char *data; /* Incoming data pointer*/ + unsigned char *data2; /* Updated data pointer*/ + + uint32_t byte_count[4]; + u64 count; + uint8_t first_blk; + uint8_t last_blk; + uint8_t trailing_buf[SHA_MAX_BLOCK_SIZE]; + uint32_t trailing_buf_len; + + /* dma buffer, Internal use */ + uint8_t staging_dmabuf + [SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE]; + + uint8_t digest[SHA_MAX_DIGEST_SIZE]; + struct scatterlist sg[2]; +}; + +static void _byte_stream_to_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned int n; + + n = len / sizeof(uint32_t); + for (; n > 0; n--) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) | + (*(b+3) & 0xff); + b += sizeof(uint32_t); + iv++; + } + + n = len % sizeof(uint32_t); + if (n == 3) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00); + } else if (n == 2) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000); + } else if (n == 1) { + *iv = ((*b << 24) & 0xff000000); + } +} + +static void _words_to_byte_stream(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned int n = len / sizeof(uint32_t); + + for (; n > 0; n--) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b++ = (unsigned char) ((*iv >> 8) & 0xff); + *b++ = (unsigned char) (*iv & 0xff); + iv++; + } + n = len % sizeof(uint32_t); + if (n == 3) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b = (unsigned char) ((*iv >> 8) & 0xff); + } else if (n == 2) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b = (unsigned char) ((*iv >> 16) & 0xff); + } else if (n == 1) { + *b = (unsigned char) ((*iv >> 24) & 0xff); + } +} + +static void qcrypto_ce_set_bus(struct crypto_engine *pengine, + bool high_bw_req) +{ + struct crypto_priv *cp = pengine->pcp; + unsigned int control_flag; + int ret = 0; + + if (cp->ce_support.req_bw_before_clk) { + if (high_bw_req) + control_flag = QCE_BW_REQUEST_FIRST; + else + control_flag = QCE_CLK_DISABLE_FIRST; + } else { + if (high_bw_req) + control_flag = QCE_CLK_ENABLE_FIRST; + else + control_flag = QCE_BW_REQUEST_RESET_FIRST; + } + + switch (control_flag) { + case QCE_CLK_ENABLE_FIRST: + ret = qce_enable_clk(pengine->qce); + if (ret) { + pr_err("%s Unable enable clk\n", __func__); + return; + } + ret = icc_set_bw(pengine->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) { + pr_err("%s Unable to set high bw\n", __func__); + ret = qce_disable_clk(pengine->qce); + if (ret) + pr_err("%s Unable disable clk\n", __func__); + return; + } + break; + case QCE_BW_REQUEST_FIRST: + ret = icc_set_bw(pengine->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) { + pr_err("%s Unable to set high bw\n", __func__); + return; + } + ret = qce_enable_clk(pengine->qce); + if (ret) { + pr_err("%s Unable enable clk\n", __func__); + ret = icc_set_bw(pengine->icc_path, 0, 0); + if (ret) + pr_err("%s Unable to set low bw\n", __func__); + return; + } + break; + case QCE_CLK_DISABLE_FIRST: + ret = qce_disable_clk(pengine->qce); + if (ret) { + pr_err("%s Unable to disable clk\n", __func__); + return; + } + ret = icc_set_bw(pengine->icc_path, 0, 0); + if (ret) { + pr_err("%s Unable to set low bw\n", __func__); + ret = qce_enable_clk(pengine->qce); + if (ret) + pr_err("%s Unable enable clk\n", __func__); + return; + } + break; + case QCE_BW_REQUEST_RESET_FIRST: + ret = icc_set_bw(pengine->icc_path, 0, 0); + if (ret) { + pr_err("%s Unable to set low bw\n", __func__); + return; + } + ret = qce_disable_clk(pengine->qce); + if (ret) { + pr_err("%s Unable to disable clk\n", __func__); + ret = icc_set_bw(pengine->icc_path, + CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (ret) + pr_err("%s Unable to set high bw\n", __func__); + return; + } + break; + default: + return; + } +} + +static void qcrypto_bw_reaper_timer_callback(struct timer_list *data) +{ + struct crypto_engine *pengine = from_timer(pengine, data, + bw_reaper_timer); + + schedule_work(&pengine->bw_reaper_ws); +} + +static void qcrypto_bw_set_timeout(struct crypto_engine *pengine) +{ + pengine->bw_reaper_timer.expires = jiffies + + msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT); + mod_timer(&(pengine->bw_reaper_timer), + pengine->bw_reaper_timer.expires); +} + +static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine) +{ + schedule_work(&pengine->bw_allocate_ws); +} + +static int _start_qcrypto_process(struct crypto_priv *cp, + struct crypto_engine *pengine); + +static void qcrypto_bw_allocate_work(struct work_struct *work) +{ + struct crypto_engine *pengine = container_of(work, + struct crypto_engine, bw_allocate_ws); + unsigned long flags; + struct crypto_priv *cp = pengine->pcp; + + spin_lock_irqsave(&cp->lock, flags); + pengine->bw_state = BUS_BANDWIDTH_ALLOCATING; + spin_unlock_irqrestore(&cp->lock, flags); + + qcrypto_ce_set_bus(pengine, true); + qcrypto_bw_set_timeout(pengine); + spin_lock_irqsave(&cp->lock, flags); + pengine->bw_state = BUS_HAS_BANDWIDTH; + pengine->high_bw_req = false; + pengine->active_seq++; + pengine->check_flag = true; + spin_unlock_irqrestore(&cp->lock, flags); + _start_qcrypto_process(cp, pengine); +}; + +static void qcrypto_bw_reaper_work(struct work_struct *work) +{ + struct crypto_engine *pengine = container_of(work, + struct crypto_engine, bw_reaper_ws); + struct crypto_priv *cp = pengine->pcp; + unsigned long flags; + u32 active_seq; + bool restart = false; + + spin_lock_irqsave(&cp->lock, flags); + active_seq = pengine->active_seq; + if (pengine->bw_state == BUS_HAS_BANDWIDTH && + (active_seq == pengine->last_active_seq)) { + + /* check if engine is stuck */ + if (atomic_read(&pengine->req_count) > 0) { + if (pengine->check_flag) + dev_warn(&pengine->pdev->dev, + "The engine appears to be stuck seq %d.\n", + active_seq); + pengine->check_flag = false; + goto ret; + } + pengine->bw_state = BUS_BANDWIDTH_RELEASING; + spin_unlock_irqrestore(&cp->lock, flags); + + qcrypto_ce_set_bus(pengine, false); + + spin_lock_irqsave(&cp->lock, flags); + + if (pengine->high_bw_req) { + /* we got request while we are disabling clock */ + pengine->bw_state = BUS_BANDWIDTH_ALLOCATING; + spin_unlock_irqrestore(&cp->lock, flags); + + qcrypto_ce_set_bus(pengine, true); + + spin_lock_irqsave(&cp->lock, flags); + pengine->bw_state = BUS_HAS_BANDWIDTH; + pengine->high_bw_req = false; + restart = true; + } else + pengine->bw_state = BUS_NO_BANDWIDTH; + } +ret: + pengine->last_active_seq = active_seq; + spin_unlock_irqrestore(&cp->lock, flags); + if (restart) + _start_qcrypto_process(cp, pengine); + if (pengine->bw_state != BUS_NO_BANDWIDTH) + qcrypto_bw_set_timeout(pengine); +} + +static int qcrypto_count_sg(struct scatterlist *sg, int nbytes) +{ + int i; + + for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg)) + nbytes -= sg->length; + + return i; +} + +static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl, + unsigned int nents, void *buf, size_t buflen) +{ + int i; + size_t offset, len; + + for (i = 0, offset = 0; i < nents; ++i) { + len = sg_copy_from_buffer(sgl, 1, buf, buflen); + buf += len; + buflen -= len; + offset += len; + sgl = sg_next(sgl); + } + + return offset; +} + +static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl, + unsigned int nents, void *buf, size_t buflen) +{ + int i; + size_t offset, len; + + for (i = 0, offset = 0; i < nents; ++i) { + len = sg_copy_to_buffer(sgl, 1, buf, buflen); + buf += len; + buflen -= len; + offset += len; + sgl = sg_next(sgl); + } + + return offset; +} +static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp, + struct ahash_alg *template) +{ + struct qcrypto_alg *q_alg; + + q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); + if (!q_alg) + return ERR_PTR(-ENOMEM); + + q_alg->alg_type = QCRYPTO_ALG_SHA; + q_alg->sha_alg = *template; + q_alg->cp = cp; + + return q_alg; +} + +static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp, + struct skcipher_alg *template) +{ + struct qcrypto_alg *q_alg; + + q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); + if (!q_alg) + return ERR_PTR(-ENOMEM); + + q_alg->alg_type = QCRYPTO_ALG_CIPHER; + q_alg->cipher_alg = *template; + q_alg->cp = cp; + + return q_alg; +} + +static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp, + struct aead_alg *template) +{ + struct qcrypto_alg *q_alg; + + q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); + if (!q_alg) + return ERR_PTR(-ENOMEM); + + q_alg->alg_type = QCRYPTO_ALG_AEAD; + q_alg->aead_alg = *template; + q_alg->cp = cp; + + return q_alg; +} + +static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx, + struct qcrypto_alg *q_alg) +{ + if (!ctx || !q_alg) { + pr_err("ctx or q_alg is NULL\n"); + return -EINVAL; + } + ctx->flags = 0; + /* update context with ptr to cp */ + ctx->cp = q_alg->cp; + /* random first IV */ + get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH); + if (_qcrypto_init_assign) { + ctx->pengine = _qcrypto_static_assign_engine(ctx->cp); + if (ctx->pengine == NULL) + return -ENODEV; + } else + ctx->pengine = NULL; + INIT_LIST_HEAD(&ctx->rsp_queue); + ctx->auth_alg = QCE_HASH_LAST; + return 0; +} + +static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), + struct ahash_alg, halg); + struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg, + sha_alg); + + crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx)); + /* update context with ptr to cp */ + sha_ctx->cp = q_alg->cp; + sha_ctx->flags = 0; + sha_ctx->ahash_req = NULL; + if (_qcrypto_init_assign) { + sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp); + if (sha_ctx->pengine == NULL) + return -ENODEV; + } else + sha_ctx->pengine = NULL; + INIT_LIST_HEAD(&sha_ctx->rsp_queue); + return 0; +} + +static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + + if (!list_empty(&sha_ctx->rsp_queue)) + pr_err("%s: requests still outstanding\n", __func__); + if (sha_ctx->ahash_req != NULL) { + ahash_request_free(sha_ctx->ahash_req); + sha_ctx->ahash_req = NULL; + } +} + + +static void _crypto_sha_hmac_ahash_req_complete( + struct crypto_async_request *req, int err); + +static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + int ret = 0; + + ret = _qcrypto_ahash_cra_init(tfm); + if (ret) + return ret; + sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL); + + if (sha_ctx->ahash_req == NULL) { + _qcrypto_ahash_cra_exit(tfm); + return -ENOMEM; + } + + init_completion(&sha_ctx->ahash_req_complete); + ahash_request_set_callback(sha_ctx->ahash_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _crypto_sha_hmac_ahash_req_complete, + &sha_ctx->ahash_req_complete); + crypto_ahash_clear_flags(ahash, ~0); + + return 0; +} + +static int _qcrypto_skcipher_init(struct crypto_skcipher *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct qcrypto_alg *q_alg; + + q_alg = container_of(alg, struct qcrypto_alg, cipher_alg); + crypto_skcipher_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + + return _qcrypto_cipher_ctx_init(ctx, q_alg); +} + +static int _qcrypto_aes_skcipher_init(struct crypto_skcipher *tfm) +{ + const char *name = crypto_tfm_alg_name(&tfm->base); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + struct crypto_priv *cp = &qcrypto_dev; + + if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) { + ctx->cipher_aes192_fb = NULL; + return _qcrypto_skcipher_init(tfm); + } + ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher(name, 0, + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->cipher_aes192_fb)) { + pr_err("Error allocating fallback algo %s\n", name); + ret = PTR_ERR(ctx->cipher_aes192_fb); + ctx->cipher_aes192_fb = NULL; + return ret; + } + return _qcrypto_skcipher_init(tfm); +} + +static int _qcrypto_aead_cra_init(struct crypto_aead *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + struct aead_alg *aeadalg = crypto_aead_alg(tfm); + struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg, + aead_alg); + return _qcrypto_cipher_ctx_init(ctx, q_alg); +} + +static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + ctx->auth_alg = QCE_HASH_SHA1_HMAC; + return rc; +} + +static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + ctx->auth_alg = QCE_HASH_SHA256_HMAC; + return rc; +} + +static int _qcrypto_cra_aead_ccm_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + ctx->auth_alg = QCE_HASH_AES_CMAC; + return rc; +} + +static int _qcrypto_cra_aead_rfc4309_ccm_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + ctx->auth_alg = QCE_HASH_AES_CMAC; + return rc; +} + +static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_priv *cp = &qcrypto_dev; + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + if (rc) + return rc; + ctx->cipher_aes192_fb = NULL; + ctx->ahash_aead_aes192_fb = NULL; + if (!cp->ce_support.aes_key_192) { + ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher( + "cbc(aes)", 0, 0); + if (IS_ERR(ctx->cipher_aes192_fb)) { + ctx->cipher_aes192_fb = NULL; + } else { + ctx->ahash_aead_aes192_fb = crypto_alloc_ahash( + "hmac(sha1)", 0, 0); + if (IS_ERR(ctx->ahash_aead_aes192_fb)) { + ctx->ahash_aead_aes192_fb = NULL; + crypto_free_sync_skcipher( + ctx->cipher_aes192_fb); + ctx->cipher_aes192_fb = NULL; + } + } + } + ctx->auth_alg = QCE_HASH_SHA1_HMAC; + return 0; +} + +static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm) +{ + int rc; + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_priv *cp = &qcrypto_dev; + + crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx)); + rc = _qcrypto_aead_cra_init(tfm); + if (rc) + return rc; + ctx->cipher_aes192_fb = NULL; + ctx->ahash_aead_aes192_fb = NULL; + if (!cp->ce_support.aes_key_192) { + ctx->cipher_aes192_fb = crypto_alloc_sync_skcipher( + "cbc(aes)", 0, 0); + if (IS_ERR(ctx->cipher_aes192_fb)) { + ctx->cipher_aes192_fb = NULL; + } else { + ctx->ahash_aead_aes192_fb = crypto_alloc_ahash( + "hmac(sha256)", 0, 0); + if (IS_ERR(ctx->ahash_aead_aes192_fb)) { + ctx->ahash_aead_aes192_fb = NULL; + crypto_free_sync_skcipher( + ctx->cipher_aes192_fb); + ctx->cipher_aes192_fb = NULL; + } + } + } + ctx->auth_alg = QCE_HASH_SHA256_HMAC; + return 0; +} + +static void _qcrypto_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (!list_empty(&ctx->rsp_queue)) + pr_err("_qcrypto__cra_skcipher_exit: requests still outstanding\n"); +} + +static void _qcrypto_aes_skcipher_exit(struct crypto_skcipher *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + _qcrypto_skcipher_exit(tfm); + if (ctx->cipher_aes192_fb) + crypto_free_sync_skcipher(ctx->cipher_aes192_fb); + ctx->cipher_aes192_fb = NULL; +} + +static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + if (!list_empty(&ctx->rsp_queue)) + pr_err("_qcrypto__cra_aead_exit: requests still outstanding\n"); +} + +static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + + if (!list_empty(&ctx->rsp_queue)) + pr_err("_qcrypto__cra_aead_exit: requests still outstanding\n"); + if (ctx->cipher_aes192_fb) + crypto_free_sync_skcipher(ctx->cipher_aes192_fb); + if (ctx->ahash_aead_aes192_fb) + crypto_free_ahash(ctx->ahash_aead_aes192_fb); + ctx->cipher_aes192_fb = NULL; + ctx->ahash_aead_aes192_fb = NULL; +} + +static int _disp_stats(int id) +{ + struct crypto_stat *pstat; + int len = 0; + unsigned long flags; + struct crypto_priv *cp = &qcrypto_dev; + struct crypto_engine *pe; + int i; + + pstat = &_qcrypto_stat; + len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQTI crypto accelerator %d Statistics\n", + id + 1); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER AES encryption : %llu\n", + pstat->sk_cipher_aes_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER AES decryption : %llu\n", + pstat->sk_cipher_aes_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER DES encryption : %llu\n", + pstat->sk_cipher_des_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER DES decryption : %llu\n", + pstat->sk_cipher_des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER 3DES encryption : %llu\n", + pstat->sk_cipher_3des_enc); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER 3DES decryption : %llu\n", + pstat->sk_cipher_3des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER operation success : %llu\n", + pstat->sk_cipher_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SK CIPHER operation fail : %llu\n", + pstat->sk_cipher_op_fail); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + "\n"); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-AES encryption : %llu\n", + pstat->aead_sha1_aes_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-AES decryption : %llu\n", + pstat->aead_sha1_aes_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-DES encryption : %llu\n", + pstat->aead_sha1_des_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-DES decryption : %llu\n", + pstat->aead_sha1_des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-3DES encryption : %llu\n", + pstat->aead_sha1_3des_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-3DES decryption : %llu\n", + pstat->aead_sha1_3des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-AES encryption : %llu\n", + pstat->aead_sha256_aes_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-AES decryption : %llu\n", + pstat->aead_sha256_aes_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-DES encryption : %llu\n", + pstat->aead_sha256_des_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-DES decryption : %llu\n", + pstat->aead_sha256_des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-3DES encryption : %llu\n", + pstat->aead_sha256_3des_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA256-3DES decryption : %llu\n", + pstat->aead_sha256_3des_dec); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD CCM-AES encryption : %llu\n", + pstat->aead_ccm_aes_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD CCM-AES decryption : %llu\n", + pstat->aead_ccm_aes_dec); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD RFC4309-CCM-AES encryption : %llu\n", + pstat->aead_rfc4309_ccm_aes_enc); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD RFC4309-CCM-AES decryption : %llu\n", + pstat->aead_rfc4309_ccm_aes_dec); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD operation success : %llu\n", + pstat->aead_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD operation fail : %llu\n", + pstat->aead_op_fail); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD bad message : %llu\n", + pstat->aead_bad_msg); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + "\n"); + + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH SHA1 digest : %llu\n", + pstat->sha1_digest); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH SHA256 digest : %llu\n", + pstat->sha256_digest); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH SHA1 HMAC digest : %llu\n", + pstat->sha1_hmac_digest); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH SHA256 HMAC digest : %llu\n", + pstat->sha256_hmac_digest); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH operation success : %llu\n", + pstat->ahash_op_success); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AHASH operation fail : %llu\n", + pstat->ahash_op_fail); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n", + cp->resp_start, cp->resp_stop, + cp->max_resp_qlen, cp->max_reorder_cnt); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " max queue length, no avail : %u %u\n", + cp->max_qlen, cp->no_avail); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " work queue : %u %u %u\n", + cp->queue_work_eng3, + cp->queue_work_not_eng3, + cp->queue_work_not_eng3_nz); + len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + "\n"); + spin_lock_irqsave(&cp->lock, flags); + list_for_each_entry(pe, &cp->engine_list, elist) { + len += scnprintf( + _debug_read_buf + len, + DEBUG_MAX_RW_BUF - len - 1, + " Engine %4d Req max %d : %llu\n", + pe->unit, + pe->max_req_used, + pe->total_req + ); + len += scnprintf( + _debug_read_buf + len, + DEBUG_MAX_RW_BUF - len - 1, + " Engine %4d Req Error : %llu\n", + pe->unit, + pe->err_req + ); + qce_get_driver_stats(pe->qce); + } + spin_unlock_irqrestore(&cp->lock, flags); + + for (i = 0; i < MAX_SMP_CPU+1; i++) + if (cp->cpu_req[i]) + len += scnprintf( + _debug_read_buf + len, + DEBUG_MAX_RW_BUF - len - 1, + "CPU %d Issue Req : %d\n", + i, cp->cpu_req[i]); + return len; +} + +static void _qcrypto_remove_engine(struct crypto_engine *pengine) +{ + struct crypto_priv *cp; + struct qcrypto_alg *q_alg; + struct qcrypto_alg *n; + unsigned long flags; + struct crypto_engine *pe; + + cp = pengine->pcp; + + spin_lock_irqsave(&cp->lock, flags); + list_del(&pengine->elist); + if (pengine->first_engine) { + cp->first_engine = NULL; + pe = list_first_entry(&cp->engine_list, struct crypto_engine, + elist); + if (pe) { + pe->first_engine = true; + cp->first_engine = pe; + } + } + if (cp->next_engine == pengine) + cp->next_engine = NULL; + if (cp->scheduled_eng == pengine) + cp->scheduled_eng = NULL; + spin_unlock_irqrestore(&cp->lock, flags); + + cp->total_units--; + + cancel_work_sync(&pengine->bw_reaper_ws); + cancel_work_sync(&pengine->bw_allocate_ws); + del_timer_sync(&pengine->bw_reaper_timer); + + if (pengine->icc_path) + icc_put(pengine->icc_path); + pengine->icc_path = NULL; + + kfree_sensitive(pengine->preq_pool); + + if (cp->total_units) + return; + + list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) { + if (q_alg->alg_type == QCRYPTO_ALG_CIPHER) + crypto_unregister_skcipher(&q_alg->cipher_alg); + if (q_alg->alg_type == QCRYPTO_ALG_SHA) + crypto_unregister_ahash(&q_alg->sha_alg); + if (q_alg->alg_type == QCRYPTO_ALG_AEAD) + crypto_unregister_aead(&q_alg->aead_alg); + list_del(&q_alg->entry); + kfree_sensitive(q_alg); + } +} + +static int _qcrypto_remove(struct platform_device *pdev) +{ + struct crypto_engine *pengine; + struct crypto_priv *cp; + + pengine = platform_get_drvdata(pdev); + + if (!pengine) + return 0; + cp = pengine->pcp; + mutex_lock(&cp->engine_lock); + _qcrypto_remove_engine(pengine); + mutex_unlock(&cp->engine_lock); + if (pengine->qce) + qce_close(pengine->qce); + kfree_sensitive(pengine); + return 0; +} + +static int _qcrypto_check_aes_keylen(struct crypto_priv *cp, unsigned int len) +{ + switch (len) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + case AES_KEYSIZE_192: + if (cp->ce_support.aes_key_192) + break; + default: + //crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + + return 0; +} + +static int _qcrypto_setkey_aes_192_fallback(struct crypto_skcipher *tfm, + const u8 *key) +{ + //struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; + + ctx->enc_key_len = AES_KEYSIZE_192; + + crypto_sync_skcipher_clear_flags(ctx->cipher_aes192_fb, + CRYPTO_TFM_REQ_MASK); + crypto_sync_skcipher_set_flags(ctx->cipher_aes192_fb, + (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK)); + + ret = crypto_sync_skcipher_setkey(ctx->cipher_aes192_fb, key, + AES_KEYSIZE_192); + /* + * TODO: delete or find equivalent in new crypto_skcipher api + if (ret) { + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; + tfm->crt_flags |= + (cipher->base.crt_flags & CRYPTO_TFM_RES_MASK); + } + */ + return ret; +} + +static int _qcrypto_setkey_aes(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) + return 0; + + if ((keylen == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) + && ctx->cipher_aes192_fb) + return _qcrypto_setkey_aes_192_fallback(tfm, key); + + if (_qcrypto_check_aes_keylen(cp, keylen)) + return -EINVAL; + + ctx->enc_key_len = keylen; + if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) { + if (key != NULL) { + memcpy(ctx->enc_key, key, keylen); + } else { + pr_err("%s Invalid key pointer\n", __func__); + return -EINVAL; + } + } + return 0; +} + +static int _qcrypto_setkey_aes_xts(struct crypto_skcipher *tfm, + const u8 *key, unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) + return 0; + if (_qcrypto_check_aes_keylen(cp, keylen/2)) + return -EINVAL; + + ctx->enc_key_len = keylen; + if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) { + if (key != NULL) { + memcpy(ctx->enc_key, key, keylen); + } else { + pr_err("%s Invalid key pointer\n", __func__); + return -EINVAL; + } + } + return 0; +} + +static int _qcrypto_setkey_des(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct des_ctx dctx; + + if (!key) { + pr_err("%s Invalid key pointer\n", __func__); + return -EINVAL; + } + if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { + pr_err("%s HW KEY usage not supported for DES algorithm\n", __func__); + return 0; + } + + if (keylen != DES_KEY_SIZE) { + //crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memset(&dctx, 0, sizeof(dctx)); + if (des_expand_key(&dctx, key, keylen) == -ENOKEY) { + if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) + return -EINVAL; + else + return 0; + } + + /* + * TODO: delete of find equivalent in skcipher api + if (ret) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + */ + + ctx->enc_key_len = keylen; + if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) + memcpy(ctx->enc_key, key, keylen); + + return 0; +} + +static int _qcrypto_setkey_3des(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { + pr_err("%s HW KEY usage not supported for 3DES algorithm\n", __func__); + return 0; + } + if (keylen != DES3_EDE_KEY_SIZE) { + //crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->enc_key_len = keylen; + if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) { + if (key != NULL) { + memcpy(ctx->enc_key, key, keylen); + } else { + pr_err("%s Invalid key pointer\n", __func__); + return -EINVAL; + } + } + return 0; +} + +static void seq_response(struct work_struct *work) +{ + struct crypto_priv *cp = container_of(work, struct crypto_priv, + resp_work); + struct llist_node *list; + struct llist_node *rev = NULL; + struct crypto_engine *pengine; + unsigned long flags; + int total_unit; + +again: + list = llist_del_all(&cp->ordered_resp_list); + + if (!list) + goto end; + + while (list) { + struct llist_node *t = list; + + list = llist_next(list); + t->next = rev; + rev = t; + } + + while (rev) { + struct qcrypto_resp_ctx *arsp; + struct crypto_async_request *areq; + + arsp = container_of(rev, struct qcrypto_resp_ctx, llist); + rev = llist_next(rev); + + areq = arsp->async_req; + local_bh_disable(); + areq->complete(areq, arsp->res); + local_bh_enable(); + atomic_dec(&cp->resp_cnt); + } + + if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START && + (cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS) + == STOPPED)) { + cp->resp_start++; + for (total_unit = cp->total_units; total_unit-- > 0;) { + spin_lock_irqsave(&cp->lock, flags); + pengine = _avail_eng(cp); + spin_unlock_irqrestore(&cp->lock, flags); + if (pengine) + _start_qcrypto_process(cp, pengine); + else + break; + } + } +end: + if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN, + IS_SCHEDULED) == SCHEDULE_AGAIN) + goto again; + else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED, + NOT_SCHEDULED) == SCHEDULE_AGAIN) + goto end; +} + +#define SCHEUDLE_RSP_QLEN_THRESHOLD 64 + +static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type, + void *tfm_ctx, + struct qcrypto_resp_ctx *cur_arsp, + int res) +{ + struct crypto_priv *cp = pengine->pcp; + unsigned long flags; + struct qcrypto_resp_ctx *arsp; + struct list_head *plist; + unsigned int resp_qlen; + unsigned int cnt = 0; + + switch (type) { + case CRYPTO_ALG_TYPE_AHASH: + plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue; + break; + case CRYPTO_ALG_TYPE_SKCIPHER: + case CRYPTO_ALG_TYPE_AEAD: + default: + plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue; + break; + } + + spin_lock_irqsave(&cp->lock, flags); + + cur_arsp->res = res; + while (!list_empty(plist)) { + arsp = list_first_entry(plist, + struct qcrypto_resp_ctx, list); + if (arsp->res == -EINPROGRESS) + break; + list_del(&arsp->list); + llist_add(&arsp->llist, &cp->ordered_resp_list); + atomic_inc(&cp->resp_cnt); + cnt++; + } + resp_qlen = atomic_read(&cp->resp_cnt); + if (resp_qlen > cp->max_resp_qlen) + cp->max_resp_qlen = resp_qlen; + if (cnt > cp->max_reorder_cnt) + cp->max_reorder_cnt = cnt; + if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) && + cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS, + STOPPED) == IN_PROGRESS) { + cp->resp_stop++; + } + + spin_unlock_irqrestore(&cp->lock, flags); + +retry: + if (!llist_empty(&cp->ordered_resp_list)) { + unsigned int cpu; + + if (pengine->first_engine) { + cpu = WORK_CPU_UNBOUND; + cp->queue_work_eng3++; + } else { + cp->queue_work_not_eng3++; + cpu = cp->cpu_getting_irqs_frm_first_ce; + /* + * If source not the first engine, and there + * are outstanding requests going on first engine, + * skip scheduling of work queue to anticipate + * more may be coming. If the response queue + * length exceeds threshold, to avoid further + * delay, schedule work queue immediately. + */ + if (cp->first_engine && atomic_read( + &cp->first_engine->req_count)) { + if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD) + return; + cp->queue_work_not_eng3_nz++; + } + } + if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED, + IS_SCHEDULED) == NOT_SCHEDULED) + queue_work_on(cpu, cp->resp_wq, &cp->resp_work); + else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED, + SCHEDULE_AGAIN) == NOT_SCHEDULED) + goto retry; + } +} + +static void req_done(struct qcrypto_req_control *pqcrypto_req_control) +{ + struct crypto_engine *pengine; + struct crypto_async_request *areq; + struct crypto_priv *cp; + struct qcrypto_resp_ctx *arsp; + u32 type = 0; + void *tfm_ctx = NULL; + unsigned int cpu; + int res; + + pengine = pqcrypto_req_control->pce; + cp = pengine->pcp; + areq = pqcrypto_req_control->req; + arsp = pqcrypto_req_control->arsp; + res = pqcrypto_req_control->res; + qcrypto_free_req_control(pengine, pqcrypto_req_control); + + if (areq) { + type = crypto_tfm_alg_type(areq->tfm); + tfm_ctx = crypto_tfm_ctx(areq->tfm); + } + cpu = smp_processor_id(); + pengine->irq_cpu = cpu; + if (pengine->first_engine) { + if (cpu != cp->cpu_getting_irqs_frm_first_ce) + cp->cpu_getting_irqs_frm_first_ce = cpu; + } + if (areq) + _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res); + if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS) + _start_qcrypto_process(cp, pengine); +} + +static void _qce_ahash_complete(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret) +{ + struct ahash_request *areq = (struct ahash_request *) cookie; + struct crypto_async_request *async_req; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + uint32_t diglen = crypto_ahash_digestsize(ahash); + uint32_t *auth32 = (uint32_t *)authdata; + struct crypto_engine *pengine; + struct qcrypto_req_control *pqcrypto_req_control; + + async_req = &areq->base; + pstat = &_qcrypto_stat; + + pengine = rctx->pengine; + pqcrypto_req_control = find_req_control_for_areq(pengine, + async_req); + if (pqcrypto_req_control == NULL) { + pr_err("async request not found\n"); + return; + } + +#ifdef QCRYPTO_DEBUG + dev_info(&pengine->pdev->dev, "%s: %pK ret %d\n", + __func__, areq, ret); +#endif + if (digest) { + memcpy(rctx->digest, digest, diglen); + if (rctx->last_blk) + memcpy(areq->result, digest, diglen); + } + if (authdata) { + rctx->byte_count[0] = auth32[0]; + rctx->byte_count[1] = auth32[1]; + rctx->byte_count[2] = auth32[2]; + rctx->byte_count[3] = auth32[3]; + } + areq->src = rctx->src; + areq->nbytes = rctx->nbytes; + + rctx->last_blk = 0; + rctx->first_blk = 0; + + if (ret) { + pqcrypto_req_control->res = -ENXIO; + pstat->ahash_op_fail++; + } else { + pqcrypto_req_control->res = 0; + pstat->ahash_op_success++; + } + if (cp->ce_support.aligned_only) { + areq->src = rctx->orig_src; + kfree(rctx->data); + } + req_done(pqcrypto_req_control); +} + +static void _qce_sk_cipher_complete(void *cookie, unsigned char *icb, + unsigned char *iv, int ret) +{ + struct skcipher_request *areq = (struct skcipher_request *) cookie; + struct crypto_async_request *async_req; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_engine *pengine; + struct qcrypto_req_control *pqcrypto_req_control; + + async_req = &areq->base; + pstat = &_qcrypto_stat; + rctx = skcipher_request_ctx(areq); + pengine = rctx->pengine; + pqcrypto_req_control = find_req_control_for_areq(pengine, + async_req); + if (pqcrypto_req_control == NULL) { + pr_err("async request not found\n"); + return; + } + +#ifdef QCRYPTO_DEBUG + dev_info(&pengine->pdev->dev, "%s: %pK ret %d\n", + __func__, areq, ret); +#endif + if (iv) + memcpy(ctx->iv, iv, crypto_skcipher_ivsize(tfm)); + + if (ret) { + pqcrypto_req_control->res = -ENXIO; + pstat->sk_cipher_op_fail++; + } else { + pqcrypto_req_control->res = 0; + pstat->sk_cipher_op_success++; + } + + if (cp->ce_support.aligned_only) { + struct qcrypto_cipher_req_ctx *rctx; + uint32_t num_sg = 0; + uint32_t bytes = 0; + + rctx = skcipher_request_ctx(areq); + areq->src = rctx->orig_src; + areq->dst = rctx->orig_dst; + + num_sg = qcrypto_count_sg(areq->dst, areq->cryptlen); + bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg, + rctx->data, areq->cryptlen); + if (bytes != areq->cryptlen) + pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", + bytes, areq->cryptlen); + kfree_sensitive(rctx->data); + } + req_done(pqcrypto_req_control); +} + +static void _qce_aead_complete(void *cookie, unsigned char *icv, + unsigned char *iv, int ret) +{ + struct aead_request *areq = (struct aead_request *) cookie; + struct crypto_async_request *async_req; + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + struct crypto_engine *pengine; + struct qcrypto_req_control *pqcrypto_req_control; + + async_req = &areq->base; + pstat = &_qcrypto_stat; + rctx = aead_request_ctx(areq); + pengine = rctx->pengine; + pqcrypto_req_control = find_req_control_for_areq(pengine, + async_req); + if (pqcrypto_req_control == NULL) { + pr_err("async request not found\n"); + return; + } + + if (rctx->mode == QCE_MODE_CCM) { + kfree_sensitive(rctx->adata); + } else { + uint32_t ivsize = crypto_aead_ivsize(aead); + + if (ret == 0) { + if (rctx->dir == QCE_ENCRYPT) { + /* copy the icv to dst */ + scatterwalk_map_and_copy(icv, areq->dst, + areq->cryptlen + areq->assoclen, + ctx->authsize, 1); + + } else { + unsigned char tmp[SHA256_DIGESTSIZE] = {0}; + + /* compare icv from src */ + scatterwalk_map_and_copy(tmp, + areq->src, areq->assoclen + + areq->cryptlen - ctx->authsize, + ctx->authsize, 0); + ret = memcmp(icv, tmp, ctx->authsize); + if (ret != 0) + ret = -EBADMSG; + + } + } else { + ret = -ENXIO; + } + + if (iv) + memcpy(ctx->iv, iv, ivsize); + } + + if (ret == (-EBADMSG)) + pstat->aead_bad_msg++; + else if (ret) + pstat->aead_op_fail++; + else + pstat->aead_op_success++; + + pqcrypto_req_control->res = ret; + req_done(pqcrypto_req_control); +} + +static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq, uint32_t assoclen) +{ + unsigned int i = ((unsigned int)qreq->iv[0]) + 1; + + memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize); + /* + * Format control info per RFC 3610 and + * NIST Special Publication 800-38C + */ + qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2)); + if (assoclen) + qreq->nonce[0] |= 64; + + if (i > MAX_NONCE) + return -EINVAL; + + return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i); +} + +static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen, + struct scatterlist *sg, unsigned char *adata) +{ + uint32_t len; + uint32_t bytes = 0; + uint32_t num_sg = 0; + + /* + * Add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (alen < 65280) { + *(__be16 *)adata = cpu_to_be16(alen); + len = 2; + } else { + if ((alen >= 65280) && (alen <= 0xffffffff)) { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(alen); + len = 6; + } else { + *(__be16 *)adata = cpu_to_be16(0xffff); + *(__be32 *)&adata[6] = cpu_to_be32(alen); + len = 10; + } + } + adata += len; + qreq->assoclen = ALIGN((alen + len), 16); + + num_sg = qcrypto_count_sg(sg, alen); + bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen); + if (bytes != alen) + pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", bytes, alen); + + return 0; +} + +static int _qcrypto_process_skcipher(struct crypto_engine *pengine, + struct qcrypto_req_control *pqcrypto_req_control) +{ + struct crypto_async_request *async_req; + struct qce_req qreq; + int ret; + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *cipher_ctx; + struct skcipher_request *req; + struct crypto_skcipher *tfm; + + async_req = pqcrypto_req_control->req; + req = container_of(async_req, struct skcipher_request, base); + cipher_ctx = crypto_tfm_ctx(async_req->tfm); + rctx = skcipher_request_ctx(req); + rctx->pengine = pengine; + tfm = crypto_skcipher_reqtfm(req); + if (pengine->pcp->ce_support.aligned_only) { + uint32_t bytes = 0; + uint32_t num_sg = 0; + + rctx->orig_src = req->src; + rctx->orig_dst = req->dst; + rctx->data = kzalloc((req->cryptlen + 64), GFP_ATOMIC); + if (rctx->data == NULL) + return -ENOMEM; + num_sg = qcrypto_count_sg(req->src, req->cryptlen); + bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data, + req->cryptlen); + if (bytes != req->cryptlen) + pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", + bytes, req->cryptlen); + sg_set_buf(&rctx->dsg, rctx->data, req->cryptlen); + sg_mark_end(&rctx->dsg); + rctx->iv = req->iv; + + req->src = &rctx->dsg; + req->dst = &rctx->dsg; + } + qreq.op = QCE_REQ_ABLK_CIPHER; //TODO: change name in qcedev.h + qreq.qce_cb = _qce_sk_cipher_complete; + qreq.areq = req; + qreq.alg = rctx->alg; + qreq.dir = rctx->dir; + qreq.mode = rctx->mode; + qreq.enckey = cipher_ctx->enc_key; + qreq.encklen = cipher_ctx->enc_key_len; + qreq.iv = req->iv; + qreq.ivsize = crypto_skcipher_ivsize(tfm); + qreq.cryptlen = req->cryptlen; + qreq.use_pmem = 0; + qreq.flags = cipher_ctx->flags; + + if ((cipher_ctx->enc_key_len == 0) && + (pengine->pcp->platform_support.hw_key_support == 0)) + ret = -EINVAL; + else + ret = qce_ablk_cipher_req(pengine->qce, &qreq); //maybe change name? + + return ret; +} + +static int _qcrypto_process_ahash(struct crypto_engine *pengine, + struct qcrypto_req_control *pqcrypto_req_control) +{ + struct crypto_async_request *async_req; + struct ahash_request *req; + struct qce_sha_req sreq; + struct qcrypto_sha_req_ctx *rctx; + struct qcrypto_sha_ctx *sha_ctx; + int ret = 0; + + async_req = pqcrypto_req_control->req; + req = container_of(async_req, + struct ahash_request, base); + rctx = ahash_request_ctx(req); + sha_ctx = crypto_tfm_ctx(async_req->tfm); + rctx->pengine = pengine; + + sreq.qce_cb = _qce_ahash_complete; + sreq.digest = &rctx->digest[0]; + sreq.src = req->src; + sreq.auth_data[0] = rctx->byte_count[0]; + sreq.auth_data[1] = rctx->byte_count[1]; + sreq.auth_data[2] = rctx->byte_count[2]; + sreq.auth_data[3] = rctx->byte_count[3]; + sreq.first_blk = rctx->first_blk; + sreq.last_blk = rctx->last_blk; + sreq.size = req->nbytes; + sreq.areq = req; + sreq.flags = sha_ctx->flags; + + switch (sha_ctx->alg) { + case QCE_HASH_SHA1: + sreq.alg = QCE_HASH_SHA1; + sreq.authkey = NULL; + break; + case QCE_HASH_SHA256: + sreq.alg = QCE_HASH_SHA256; + sreq.authkey = NULL; + break; + case QCE_HASH_SHA1_HMAC: + sreq.alg = QCE_HASH_SHA1_HMAC; + sreq.authkey = &sha_ctx->authkey[0]; + sreq.authklen = SHA_HMAC_KEY_SIZE; + break; + case QCE_HASH_SHA256_HMAC: + sreq.alg = QCE_HASH_SHA256_HMAC; + sreq.authkey = &sha_ctx->authkey[0]; + sreq.authklen = SHA_HMAC_KEY_SIZE; + break; + default: + pr_err("Algorithm %d not supported, exiting\n", sha_ctx->alg); + ret = -1; + break; + } + ret = qce_process_sha_req(pengine->qce, &sreq); + + return ret; +} + +static int _qcrypto_process_aead(struct crypto_engine *pengine, + struct qcrypto_req_control *pqcrypto_req_control) +{ + struct crypto_async_request *async_req; + struct qce_req qreq; + int ret = 0; + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *cipher_ctx; + struct aead_request *req; + struct crypto_aead *aead; + + async_req = pqcrypto_req_control->req; + req = container_of(async_req, struct aead_request, base); + aead = crypto_aead_reqtfm(req); + rctx = aead_request_ctx(req); + rctx->pengine = pengine; + cipher_ctx = crypto_tfm_ctx(async_req->tfm); + + qreq.op = QCE_REQ_AEAD; + qreq.qce_cb = _qce_aead_complete; + + qreq.areq = req; + qreq.alg = rctx->alg; + qreq.dir = rctx->dir; + qreq.mode = rctx->mode; + qreq.iv = rctx->iv; + + qreq.enckey = cipher_ctx->enc_key; + qreq.encklen = cipher_ctx->enc_key_len; + qreq.authkey = cipher_ctx->auth_key; + qreq.authklen = cipher_ctx->auth_key_len; + qreq.authsize = crypto_aead_authsize(aead); + qreq.auth_alg = cipher_ctx->auth_alg; + if (qreq.mode == QCE_MODE_CCM) + qreq.ivsize = AES_BLOCK_SIZE; + else + qreq.ivsize = crypto_aead_ivsize(aead); + qreq.flags = cipher_ctx->flags; + + if (qreq.mode == QCE_MODE_CCM) { + uint32_t assoclen; + + if (qreq.dir == QCE_ENCRYPT) + qreq.cryptlen = req->cryptlen; + else + qreq.cryptlen = req->cryptlen - + qreq.authsize; + + /* if rfc4309 ccm, adjust assoclen */ + assoclen = req->assoclen; + if (rctx->ccmtype) + assoclen -= 8; + /* Get NONCE */ + ret = qccrypto_set_aead_ccm_nonce(&qreq, assoclen); + if (ret) + return ret; + + if (assoclen) { + rctx->adata = kzalloc((assoclen + 0x64), + GFP_ATOMIC); + if (!rctx->adata) + return -ENOMEM; + /* Format Associated data */ + ret = qcrypto_aead_ccm_format_adata(&qreq, + assoclen, + req->src, + rctx->adata); + } else { + qreq.assoclen = 0; + rctx->adata = NULL; + } + if (ret) { + kfree_sensitive(rctx->adata); + return ret; + } + + /* + * update req with new formatted associated + * data info + */ + qreq.asg = &rctx->asg; + if (rctx->adata) + sg_set_buf(qreq.asg, rctx->adata, + qreq.assoclen); + sg_mark_end(qreq.asg); + } + ret = qce_aead_req(pengine->qce, &qreq); + + return ret; +} + +static struct crypto_engine *_qcrypto_static_assign_engine( + struct crypto_priv *cp) +{ + struct crypto_engine *pengine; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + if (cp->next_engine) + pengine = cp->next_engine; + else + pengine = list_first_entry(&cp->engine_list, + struct crypto_engine, elist); + + if (list_is_last(&pengine->elist, &cp->engine_list)) + cp->next_engine = list_first_entry( + &cp->engine_list, struct crypto_engine, elist); + else + cp->next_engine = list_next_entry(pengine, elist); + spin_unlock_irqrestore(&cp->lock, flags); + return pengine; +} + +static int _start_qcrypto_process(struct crypto_priv *cp, + struct crypto_engine *pengine) +{ + struct crypto_async_request *async_req = NULL; + struct crypto_async_request *backlog_eng = NULL; + struct crypto_async_request *backlog_cp = NULL; + unsigned long flags; + u32 type; + int ret = 0; + struct crypto_stat *pstat; + void *tfm_ctx; + struct qcrypto_cipher_req_ctx *cipher_rctx; + struct qcrypto_sha_req_ctx *ahash_rctx; + struct skcipher_request *skcipher_req; + struct ahash_request *ahash_req; + struct aead_request *aead_req; + struct qcrypto_resp_ctx *arsp; + struct qcrypto_req_control *pqcrypto_req_control; + unsigned int cpu = MAX_SMP_CPU; + + if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED) + return 0; + + if (in_interrupt()) { + cpu = smp_processor_id(); + if (cpu >= MAX_SMP_CPU) + cpu = MAX_SMP_CPU - 1; + } else + cpu = MAX_SMP_CPU; + + pstat = &_qcrypto_stat; + +again: + spin_lock_irqsave(&cp->lock, flags); + if (pengine->issue_req || + atomic_read(&pengine->req_count) >= (pengine->max_req)) { + spin_unlock_irqrestore(&cp->lock, flags); + return 0; + } + + backlog_eng = crypto_get_backlog(&pengine->req_queue); + + /* make sure it is in high bandwidth state */ + if (pengine->bw_state != BUS_HAS_BANDWIDTH) { + spin_unlock_irqrestore(&cp->lock, flags); + return 0; + } + + /* try to get request from request queue of the engine first */ + async_req = crypto_dequeue_request(&pengine->req_queue); + if (!async_req) { + /* + * if no request from the engine, + * try to get from request queue of driver + */ + backlog_cp = crypto_get_backlog(&cp->req_queue); + async_req = crypto_dequeue_request(&cp->req_queue); + if (!async_req) { + spin_unlock_irqrestore(&cp->lock, flags); + return 0; + } + } + pqcrypto_req_control = qcrypto_alloc_req_control(pengine); + if (pqcrypto_req_control == NULL) { + pr_err("Allocation of request failed\n"); + spin_unlock_irqrestore(&cp->lock, flags); + return 0; + } + + /* add associated rsp entry to tfm response queue */ + type = crypto_tfm_alg_type(async_req->tfm); + tfm_ctx = crypto_tfm_ctx(async_req->tfm); + switch (type) { + case CRYPTO_ALG_TYPE_AHASH: + ahash_req = container_of(async_req, + struct ahash_request, base); + ahash_rctx = ahash_request_ctx(ahash_req); + arsp = &ahash_rctx->rsp_entry; + list_add_tail( + &arsp->list, + &((struct qcrypto_sha_ctx *)tfm_ctx) + ->rsp_queue); + break; + case CRYPTO_ALG_TYPE_SKCIPHER: + skcipher_req = container_of(async_req, + struct skcipher_request, base); + cipher_rctx = skcipher_request_ctx(skcipher_req); + arsp = &cipher_rctx->rsp_entry; + list_add_tail( + &arsp->list, + &((struct qcrypto_cipher_ctx *)tfm_ctx) + ->rsp_queue); + break; + case CRYPTO_ALG_TYPE_AEAD: + default: + aead_req = container_of(async_req, + struct aead_request, base); + cipher_rctx = aead_request_ctx(aead_req); + arsp = &cipher_rctx->rsp_entry; + list_add_tail( + &arsp->list, + &((struct qcrypto_cipher_ctx *)tfm_ctx) + ->rsp_queue); + break; + } + + arsp->res = -EINPROGRESS; + arsp->async_req = async_req; + pqcrypto_req_control->pce = pengine; + pqcrypto_req_control->req = async_req; + pqcrypto_req_control->arsp = arsp; + pengine->active_seq++; + pengine->check_flag = true; + + pengine->issue_req = true; + cp->cpu_req[cpu]++; + smp_mb(); /* make it visible */ + + spin_unlock_irqrestore(&cp->lock, flags); + if (backlog_eng) + backlog_eng->complete(backlog_eng, -EINPROGRESS); + if (backlog_cp) + backlog_cp->complete(backlog_cp, -EINPROGRESS); + switch (type) { + case CRYPTO_ALG_TYPE_SKCIPHER: + ret = _qcrypto_process_skcipher(pengine, pqcrypto_req_control); + break; + case CRYPTO_ALG_TYPE_AHASH: + ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control); + break; + case CRYPTO_ALG_TYPE_AEAD: + ret = _qcrypto_process_aead(pengine, pqcrypto_req_control); + break; + default: + ret = -EINVAL; + } + + pengine->issue_req = false; + smp_mb(); /* make it visible */ + + pengine->total_req++; + if (ret) { + pengine->err_req++; + qcrypto_free_req_control(pengine, pqcrypto_req_control); + + if (type == CRYPTO_ALG_TYPE_SKCIPHER) + pstat->sk_cipher_op_fail++; + else + if (type == CRYPTO_ALG_TYPE_AHASH) + pstat->ahash_op_fail++; + else + pstat->aead_op_fail++; + + _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret); + goto again; + } + return ret; +} + +static inline struct crypto_engine *_next_eng(struct crypto_priv *cp, + struct crypto_engine *p) +{ + + if (p == NULL || list_is_last(&p->elist, &cp->engine_list)) + p = list_first_entry(&cp->engine_list, struct crypto_engine, + elist); + else + p = list_entry(p->elist.next, struct crypto_engine, elist); + return p; +} +static struct crypto_engine *_avail_eng(struct crypto_priv *cp) +{ + /* call this function with spinlock set */ + struct crypto_engine *q = NULL; + struct crypto_engine *p = cp->scheduled_eng; + struct crypto_engine *q1; + int eng_cnt = cp->total_units; + + if (unlikely(list_empty(&cp->engine_list))) { + pr_err("%s: no valid ce to schedule\n", __func__); + return NULL; + } + + p = _next_eng(cp, p); + q1 = p; + while (eng_cnt-- > 0) { + if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) { + q = p; + break; + } + p = _next_eng(cp, p); + if (q1 == p) + break; + } + cp->scheduled_eng = q; + return q; +} + +static int _qcrypto_queue_req(struct crypto_priv *cp, + struct crypto_engine *pengine, + struct crypto_async_request *req) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + + if (pengine) { + ret = crypto_enqueue_request(&pengine->req_queue, req); + } else { + ret = crypto_enqueue_request(&cp->req_queue, req); + pengine = _avail_eng(cp); + if (cp->req_queue.qlen > cp->max_qlen) + cp->max_qlen = cp->req_queue.qlen; + } + if (pengine) { + switch (pengine->bw_state) { + case BUS_NO_BANDWIDTH: + if (!pengine->high_bw_req) { + qcrypto_ce_bw_allocate_req(pengine); + pengine->high_bw_req = true; + } + pengine = NULL; + break; + case BUS_HAS_BANDWIDTH: + break; + case BUS_BANDWIDTH_RELEASING: + pengine->high_bw_req = true; + pengine = NULL; + break; + case BUS_BANDWIDTH_ALLOCATING: + pengine = NULL; + break; + case BUS_SUSPENDED: + case BUS_SUSPENDING: + default: + pengine = NULL; + break; + } + } else { + cp->no_avail++; + } + spin_unlock_irqrestore(&cp->lock, flags); + if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)) + _start_qcrypto_process(cp, pengine); + return ret; +} + +static int _qcrypto_enc_aes_192_fallback(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb); + skcipher_request_set_sync_tfm(subreq, ctx->cipher_aes192_fb); + + skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + err = crypto_skcipher_encrypt(subreq); + skcipher_request_zero(subreq); + return err; +} + +static int _qcrypto_dec_aes_192_fallback(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + int err; + + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb); + skcipher_request_set_sync_tfm(subreq, ctx->cipher_aes192_fb); + + skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); + err = crypto_skcipher_decrypt(subreq); + skcipher_request_zero(subreq); + return err; +} + + +static int _qcrypto_enc_aes_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_enc_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_aes_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_enc_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_aes_ctr(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_enc_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CTR; + + pstat->sk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_aes_xts(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat = &_qcrypto_stat; + struct crypto_priv *cp = ctx->cp; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_XTS; + + pstat->sk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) + return -EINVAL; + if ((ctx->auth_key_len != AES_KEYSIZE_128) && + (ctx->auth_key_len != AES_KEYSIZE_256)) + return -EINVAL; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CCM; + rctx->iv = req->iv; + rctx->ccmtype = 0; + + pstat->aead_ccm_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CCM; + memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv)); + rctx->rfc4309_iv[0] = 3; /* L -1 */ + memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3); + memcpy(&rctx->rfc4309_iv[4], req->iv, 8); + rctx->ccmtype = 1; + rctx->iv = rctx->rfc4309_iv; + pstat->aead_rfc4309_ccm_aes_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_des_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_des_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_3des_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_3des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_enc_3des_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_3des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_aes_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_dec_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_aes_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_dec_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_aes_ctr(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + if ((ctx->enc_key_len == AES_KEYSIZE_192) && + (!cp->ce_support.aes_key_192) && + ctx->cipher_aes192_fb) + return _qcrypto_dec_aes_192_fallback(req); + + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->mode = QCE_MODE_CTR; + + /* Note. There is no such thing as aes/counter mode, decrypt */ + rctx->dir = QCE_ENCRYPT; + + pstat->sk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_des_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_des_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_3des_ecb(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->sk_cipher_3des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_3des_cbc(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->sk_cipher_3des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_dec_aes_xts(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat = &_qcrypto_stat; + + WARN_ON(crypto_tfm_alg_type(&tfm->base) != CRYPTO_ALG_TYPE_SKCIPHER); + rctx = skcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->mode = QCE_MODE_XTS; + rctx->dir = QCE_DECRYPT; + + pstat->sk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) + return -EINVAL; + if ((ctx->auth_key_len != AES_KEYSIZE_128) && + (ctx->auth_key_len != AES_KEYSIZE_256)) + return -EINVAL; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CCM; + rctx->iv = req->iv; + rctx->ccmtype = 0; + + pstat->aead_ccm_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + if (req->assoclen != 16 && req->assoclen != 20) + return -EINVAL; + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CCM; + memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv)); + rctx->rfc4309_iv[0] = 3; /* L -1 */ + memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3); + memcpy(&rctx->rfc4309_iv[4], req->iv, 8); + rctx->ccmtype = 1; + rctx->iv = rctx->rfc4309_iv; + pstat->aead_rfc4309_ccm_aes_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + return 0; +} + +static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); + + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + ctx->authsize = authsize; + return 0; +} + +static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); + + switch (authsize) { + case 8: + case 12: + case 16: + break; + default: + return -EINVAL; + } + ctx->authsize = authsize; + return 0; +} + +static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + struct rtattr *rta = (struct rtattr *)key; + struct crypto_authenc_key_param *param; + int ret; + + if (!RTA_OK(rta, keylen)) + goto badkey; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + goto badkey; + if (RTA_PAYLOAD(rta) < sizeof(*param)) + goto badkey; + + param = RTA_DATA(rta); + ctx->enc_key_len = be32_to_cpu(param->enckeylen); + + key += RTA_ALIGN(rta->rta_len); + keylen -= RTA_ALIGN(rta->rta_len); + + if (keylen < ctx->enc_key_len) + goto badkey; + + ctx->auth_key_len = keylen - ctx->enc_key_len; + if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE || + ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE) + goto badkey; + memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE); + memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len); + memcpy(ctx->auth_key, key, ctx->auth_key_len); + + if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && + ctx->ahash_aead_aes192_fb) { + crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0); + ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb, + ctx->auth_key, ctx->auth_key_len); + if (ret) + goto badkey; + crypto_sync_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0); + ret = crypto_sync_skcipher_setkey(ctx->cipher_aes192_fb, + ctx->enc_key, ctx->enc_key_len); + if (ret) + goto badkey; + } + + return 0; +badkey: + ctx->enc_key_len = 0; + //crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + case AES_KEYSIZE_192: + if (cp->ce_support.aes_key_192) + break; + default: + ctx->enc_key_len = 0; + //crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + ctx->enc_key_len = keylen; + memcpy(ctx->enc_key, key, keylen); + ctx->auth_key_len = keylen; + memcpy(ctx->auth_key, key, keylen); + + return 0; +} + +static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead, + const u8 *key, unsigned int key_len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + int ret; + + if (key_len < QCRYPTO_CCM4309_NONCE_LEN) + return -EINVAL; + key_len -= QCRYPTO_CCM4309_NONCE_LEN; + memcpy(ctx->ccm4309_nonce, key + key_len, QCRYPTO_CCM4309_NONCE_LEN); + ret = _qcrypto_aead_ccm_setkey(aead, key, key_len); + return ret; +} + +static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx, + int res) +{ + struct aead_request *req; + struct crypto_async_request *areq; + + req = rctx->aead_req; + areq = &req->base; + if (rctx->fb_aes_req) + skcipher_request_free(rctx->fb_aes_req); + if (rctx->fb_hash_req) + ahash_request_free(rctx->fb_hash_req); + rctx->fb_aes_req = NULL; + rctx->fb_hash_req = NULL; + kfree(rctx->fb_aes_iv); + areq->complete(areq, res); +} + +static void _aead_aes_fb_stage2_ahash_complete( + struct crypto_async_request *base, int err) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct aead_request *req; + struct qcrypto_cipher_ctx *ctx; + + rctx = base->data; + req = rctx->aead_req; + ctx = crypto_tfm_ctx(req->base.tfm); + /* copy icv */ + if (err == 0) + scatterwalk_map_and_copy(rctx->fb_ahash_digest, + rctx->fb_aes_dst, + req->cryptlen, + ctx->authsize, 1); + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); +} + + +static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx) +{ + struct ahash_request *ahash_req; + + ahash_req = rctx->fb_hash_req; + ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, + _aead_aes_fb_stage2_ahash_complete, rctx); + + return crypto_ahash_digest(ahash_req); +} + +static void _aead_aes_fb_stage2_decrypt_complete( + struct crypto_async_request *base, int err) +{ + struct qcrypto_cipher_req_ctx *rctx; + + rctx = base->data; + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); +} + +static int _start_aead_aes_fb_stage2_decrypt( + struct qcrypto_cipher_req_ctx *rctx) +{ + struct skcipher_request *aes_req; + + aes_req = rctx->fb_aes_req; + skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG, + _aead_aes_fb_stage2_decrypt_complete, rctx); + return crypto_skcipher_decrypt(aes_req); +} + +static void _aead_aes_fb_stage1_ahash_complete( + struct crypto_async_request *base, int err) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct aead_request *req; + struct qcrypto_cipher_ctx *ctx; + + rctx = base->data; + req = rctx->aead_req; + ctx = crypto_tfm_ctx(req->base.tfm); + + /* compare icv */ + if (err == 0) { + unsigned char *tmp; + + tmp = kmalloc(ctx->authsize, GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto ret; + } + scatterwalk_map_and_copy(tmp, rctx->fb_aes_src, + req->cryptlen - ctx->authsize, ctx->authsize, 0); + if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0) + err = -EBADMSG; + kfree(tmp); + } +ret: + if (err) + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); + else { + err = _start_aead_aes_fb_stage2_decrypt(rctx); + if (err != -EINPROGRESS && err != -EBUSY) + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); + } + +} + +static void _aead_aes_fb_stage1_encrypt_complete( + struct crypto_async_request *base, int err) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct aead_request *req; + struct qcrypto_cipher_ctx *ctx; + + rctx = base->data; + req = rctx->aead_req; + ctx = crypto_tfm_ctx(req->base.tfm); + + memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize); + + if (err) { + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); + return; + } + + err = _start_aead_aes_fb_stage2_hmac(rctx); + + /* copy icv */ + if (err == 0) { + scatterwalk_map_and_copy(rctx->fb_ahash_digest, + rctx->fb_aes_dst, + req->cryptlen, + ctx->authsize, 1); + } + if (err != -EINPROGRESS && err != -EBUSY) + _qcrypto_aead_aes_192_fb_a_cb(rctx, err); +} + +static int _qcrypto_aead_aes_192_fallback(struct aead_request *req, + bool is_encrypt) +{ + int rc = -EINVAL; + struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req); + struct skcipher_request *aes_req = NULL; + struct ahash_request *ahash_req = NULL; + int nbytes; + struct scatterlist *src, *dst; + + rctx->fb_aes_iv = NULL; + aes_req = skcipher_request_alloc(&ctx->cipher_aes192_fb->base, + GFP_KERNEL); + if (!aes_req) + return -ENOMEM; + ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL); + if (!ahash_req) + goto ret; + rctx->fb_aes_req = aes_req; + rctx->fb_hash_req = ahash_req; + rctx->aead_req = req; + /* assoc and iv are sitting in the beginning of src sg list */ + /* Similarly, assoc and iv are sitting in the beginning of dst list */ + src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src, + req->assoclen); + dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst, + req->assoclen); + + nbytes = req->cryptlen; + if (!is_encrypt) + nbytes -= ctx->authsize; + rctx->fb_ahash_length = nbytes + req->assoclen; + rctx->fb_aes_src = src; + rctx->fb_aes_dst = dst; + rctx->fb_aes_cryptlen = nbytes; + rctx->ivsize = crypto_aead_ivsize(aead_tfm); + rctx->fb_aes_iv = kmemdup(req->iv, rctx->ivsize, GFP_ATOMIC); + if (!rctx->fb_aes_iv) + goto ret; + skcipher_request_set_crypt(aes_req, rctx->fb_aes_src, + rctx->fb_aes_dst, + rctx->fb_aes_cryptlen, rctx->fb_aes_iv); + if (is_encrypt) + ahash_request_set_crypt(ahash_req, req->dst, + rctx->fb_ahash_digest, + rctx->fb_ahash_length); + else + ahash_request_set_crypt(ahash_req, req->src, + rctx->fb_ahash_digest, + rctx->fb_ahash_length); + + if (is_encrypt) { + + skcipher_request_set_callback(aes_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _aead_aes_fb_stage1_encrypt_complete, rctx); + + rc = crypto_skcipher_encrypt(aes_req); + if (rc == 0) { + memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize); + rc = _start_aead_aes_fb_stage2_hmac(rctx); + if (rc == 0) { + /* copy icv */ + scatterwalk_map_and_copy(rctx->fb_ahash_digest, + dst, + req->cryptlen, + ctx->authsize, 1); + } + } + if (rc == -EINPROGRESS || rc == -EBUSY) + return rc; + goto ret; + + } else { + ahash_request_set_callback(ahash_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _aead_aes_fb_stage1_ahash_complete, rctx); + + rc = crypto_ahash_digest(ahash_req); + if (rc == 0) { + unsigned char *tmp; + + tmp = kmalloc(ctx->authsize, GFP_KERNEL); + if (!tmp) { + rc = -ENOMEM; + goto ret; + } + /* compare icv */ + scatterwalk_map_and_copy(tmp, + src, req->cryptlen - ctx->authsize, + ctx->authsize, 0); + if (memcmp(rctx->fb_ahash_digest, tmp, + ctx->authsize) != 0) + rc = -EBADMSG; + else + rc = _start_aead_aes_fb_stage2_decrypt(rctx); + kfree(tmp); + } + if (rc == -EINPROGRESS || rc == -EBUSY) + return rc; + goto ret; + } +ret: + if (aes_req) + skcipher_request_free(aes_req); + if (ahash_req) + ahash_request_free(ahash_req); + kfree(rctx->fb_aes_iv); + return rc; +} + +static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + rctx->aead_req = req; + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_aes_enc++; + else + pstat->aead_sha256_aes_enc++; + if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && + ctx->ahash_aead_aes192_fb) + return _qcrypto_aead_aes_192_fallback(req, true); + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + +#ifdef QCRYPTO_DEBUG + dev_info(&ctx->pengine->pdev->dev, "%s: %pK\n", __func__, req); +#endif + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + rctx->aead_req = req; + + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_aes_dec++; + else + pstat->aead_sha256_aes_dec++; + + if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && + ctx->ahash_aead_aes192_fb) + return _qcrypto_aead_aes_192_fallback(req, false); + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_des_enc++; + else + pstat->aead_sha256_des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_des_dec++; + else + pstat->aead_sha256_des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_3des_enc++; + else + pstat->aead_sha256_3des_enc++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) + pstat->aead_sha1_3des_dec++; + else + pstat->aead_sha256_3des_dec++; + return _qcrypto_queue_req(cp, ctx->pengine, &req->base); +} + +static int _sha_init(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + rctx->first_blk = 1; + rctx->last_blk = 0; + rctx->byte_count[0] = 0; + rctx->byte_count[1] = 0; + rctx->byte_count[2] = 0; + rctx->byte_count[3] = 0; + rctx->trailing_buf_len = 0; + rctx->count = 0; + + return 0; +} + +static int _sha1_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_stat *pstat; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + + _sha_init(req); + sha_ctx->alg = QCE_HASH_SHA1; + + memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); + memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + pstat->sha1_digest++; + return 0; +} + +static int _sha256_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_stat *pstat; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + + _sha_init(req); + sha_ctx->alg = QCE_HASH_SHA256; + + memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); + memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + pstat->sha256_digest++; + return 0; +} + + +static int _sha1_export(struct ahash_request *req, void *out) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *out_ctx = (struct sha1_state *)out; + + out_ctx->count = rctx->count; + _byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE); + memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE); + + return 0; +} + +static int _sha1_hmac_export(struct ahash_request *req, void *out) +{ + return _sha1_export(req, out); +} + +/* crypto hw padding constant for hmac first operation */ +#define HMAC_PADDING 64 + +static int __sha1_import_common(struct ahash_request *req, const void *in, + bool hmac) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *in_ctx = (struct sha1_state *)in; + u64 hw_count = in_ctx->count; + + rctx->count = in_ctx->count; + memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE); + if (in_ctx->count <= SHA1_BLOCK_SIZE) { + rctx->first_blk = 1; + } else { + rctx->first_blk = 0; + /* + * For hmac, there is a hardware padding done + * when first is set. So the byte_count will be + * incremened by 64 after the operstion of first + */ + if (hmac) + hw_count += HMAC_PADDING; + } + rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0); + rctx->byte_count[1] = (uint32_t)(hw_count >> 32); + _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen); + + rctx->trailing_buf_len = (uint32_t)(in_ctx->count & + (SHA1_BLOCK_SIZE-1)); + return 0; +} + +static int _sha1_import(struct ahash_request *req, const void *in) +{ + return __sha1_import_common(req, in, false); +} + +static int _sha1_hmac_import(struct ahash_request *req, const void *in) +{ + return __sha1_import_common(req, in, true); +} + +static int _sha256_export(struct ahash_request *req, void *out) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *out_ctx = (struct sha256_state *)out; + + out_ctx->count = rctx->count; + _byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE); + memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE); + + return 0; +} + +static int _sha256_hmac_export(struct ahash_request *req, void *out) +{ + return _sha256_export(req, out); +} + +static int __sha256_import_common(struct ahash_request *req, const void *in, + bool hmac) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *in_ctx = (struct sha256_state *)in; + u64 hw_count = in_ctx->count; + + rctx->count = in_ctx->count; + memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE); + + if (in_ctx->count <= SHA256_BLOCK_SIZE) { + rctx->first_blk = 1; + } else { + rctx->first_blk = 0; + /* + * for hmac, there is a hardware padding done + * when first is set. So the byte_count will be + * incremened by 64 after the operstion of first + */ + if (hmac) + hw_count += HMAC_PADDING; + } + + rctx->byte_count[0] = (uint32_t)(hw_count & 0xFFFFFFC0); + rctx->byte_count[1] = (uint32_t)(hw_count >> 32); + _words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen); + + rctx->trailing_buf_len = (uint32_t)(in_ctx->count & + (SHA256_BLOCK_SIZE-1)); + + + return 0; +} + +static int _sha256_import(struct ahash_request *req, const void *in) +{ + return __sha256_import_common(req, in, false); +} + +static int _sha256_hmac_import(struct ahash_request *req, const void *in) +{ + return __sha256_import_common(req, in, true); +} + +static int _copy_source(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *srctx = NULL; + uint32_t bytes = 0; + uint32_t num_sg = 0; + + srctx = ahash_request_ctx(req); + srctx->orig_src = req->src; + srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC); + if (srctx->data == NULL) { + pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n", + PTR_ERR(srctx->data), (req->nbytes + 64)); + return -ENOMEM; + } + + num_sg = qcrypto_count_sg(req->src, req->nbytes); + bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data, + req->nbytes); + if (bytes != req->nbytes) + pr_warn("bytes copied=0x%x bytes to copy= 0x%x\n", bytes, + req->nbytes); + sg_set_buf(&srctx->dsg, srctx->data, + req->nbytes); + sg_mark_end(&srctx->dsg); + req->src = &srctx->dsg; + + return 0; +} + +static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + uint32_t total, len, num_sg; + struct scatterlist *sg_last; + uint8_t *k_src = NULL; + uint32_t sha_pad_len = 0; + uint32_t trailing_buf_len = 0; + uint32_t nbytes; + uint32_t offset = 0; + uint32_t bytes = 0; + uint8_t *staging; + int ret = 0; + + /* check for trailing buffer from previous updates and append it */ + total = req->nbytes + rctx->trailing_buf_len; + len = req->nbytes; + + if (total <= sha_block_size) { + k_src = &rctx->trailing_buf[rctx->trailing_buf_len]; + num_sg = qcrypto_count_sg(req->src, len); + bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len); + + rctx->trailing_buf_len = total; + return 0; + } + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf), + L1_CACHE_BYTES); + memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len); + k_src = &rctx->trailing_buf[0]; + /* get new trailing buffer */ + sha_pad_len = ALIGN(total, sha_block_size) - total; + trailing_buf_len = sha_block_size - sha_pad_len; + offset = req->nbytes - trailing_buf_len; + + if (offset != req->nbytes) + scatterwalk_map_and_copy(k_src, req->src, offset, + trailing_buf_len, 0); + + nbytes = total - trailing_buf_len; + num_sg = qcrypto_count_sg(req->src, req->nbytes); + + len = rctx->trailing_buf_len; + sg_last = req->src; + + while (len < nbytes) { + if ((len + sg_last->length) > nbytes) + break; + len += sg_last->length; + sg_last = sg_next(sg_last); + } + if (rctx->trailing_buf_len) { + if (cp->ce_support.aligned_only) { + rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC); + if (rctx->data2 == NULL) + return -ENOMEM; + memcpy(rctx->data2, staging, + rctx->trailing_buf_len); + memcpy((rctx->data2 + rctx->trailing_buf_len), + rctx->data, req->src->length); + kfree_sensitive(rctx->data); + rctx->data = rctx->data2; + sg_set_buf(&rctx->sg[0], rctx->data, + (rctx->trailing_buf_len + + req->src->length)); + req->src = rctx->sg; + sg_mark_end(&rctx->sg[0]); + } else { + sg_mark_end(sg_last); + memset(rctx->sg, 0, sizeof(rctx->sg)); + sg_set_buf(&rctx->sg[0], staging, + rctx->trailing_buf_len); + sg_mark_end(&rctx->sg[1]); + sg_chain(rctx->sg, 2, req->src); + req->src = rctx->sg; + } + } else + sg_mark_end(sg_last); + + req->nbytes = nbytes; + rctx->trailing_buf_len = trailing_buf_len; + + ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base); + + return ret; +} + +static int _sha1_update(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + + if (cp->ce_support.aligned_only) { + if (_copy_source(req)) + return -ENOMEM; + } + rctx->count += req->nbytes; + return _sha_update(req, SHA1_BLOCK_SIZE); +} + +static int _sha256_update(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + + if (cp->ce_support.aligned_only) { + if (_copy_source(req)) + return -ENOMEM; + } + + rctx->count += req->nbytes; + return _sha_update(req, SHA256_BLOCK_SIZE); +} + +static int _sha_final(struct ahash_request *req, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + int ret = 0; + uint8_t *staging; + + if (cp->ce_support.aligned_only) { + if (_copy_source(req)) + return -ENOMEM; + } + + rctx->last_blk = 1; + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf), + L1_CACHE_BYTES); + memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len); + sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len); + sg_mark_end(&rctx->sg[0]); + + req->src = &rctx->sg[0]; + req->nbytes = rctx->trailing_buf_len; + + ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base); + + return ret; +} + +static int _sha1_final(struct ahash_request *req) +{ + return _sha_final(req, SHA1_BLOCK_SIZE); +} + +static int _sha256_final(struct ahash_request *req) +{ + return _sha_final(req, SHA256_BLOCK_SIZE); +} + +static int _sha_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + if (cp->ce_support.aligned_only) { + if (_copy_source(req)) + return -ENOMEM; + } + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + rctx->first_blk = 1; + rctx->last_blk = 1; + ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base); + + return ret; +} + +static int _sha1_digest(struct ahash_request *req) +{ + _sha1_init(req); + return _sha_digest(req); +} + +static int _sha256_digest(struct ahash_request *req) +{ + _sha256_init(req); + return _sha_digest(req); +} + +static void _crypto_sha_hmac_ahash_req_complete( + struct crypto_async_request *req, int err) +{ + struct completion *ahash_req_complete = req->data; + + if (err == -EINPROGRESS) + return; + complete(ahash_req_complete); +} + +static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + uint8_t *in_buf; + int ret = 0; + struct scatterlist sg = {0}; + struct ahash_request *ahash_req; + struct completion ahash_req_complete; + + ahash_req = ahash_request_alloc(tfm, GFP_KERNEL); + if (ahash_req == NULL) + return -ENOMEM; + init_completion(&ahash_req_complete); + ahash_request_set_callback(ahash_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _crypto_sha_hmac_ahash_req_complete, + &ahash_req_complete); + crypto_ahash_clear_flags(tfm, ~0); + + in_buf = kzalloc(len + 64, GFP_KERNEL); + if (in_buf == NULL) { + ahash_request_free(ahash_req); + return -ENOMEM; + } + memcpy(in_buf, key, len); + sg_set_buf(&sg, in_buf, len); + sg_mark_end(&sg); + + ahash_request_set_crypt(ahash_req, &sg, + &sha_ctx->authkey[0], len); + + if (sha_ctx->alg == QCE_HASH_SHA1) + ret = _sha1_digest(ahash_req); + else + ret = _sha256_digest(ahash_req); + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = + wait_for_completion_interruptible( + &ahash_req_complete); + reinit_completion(&sha_ctx->ahash_req_complete); + } + + kfree_sensitive(in_buf); + ahash_request_free(ahash_req); + + return ret; +} + +static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; + + memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE); + if (len <= SHA1_BLOCK_SIZE) { + memcpy(&sha_ctx->authkey[0], key, len); + sha_ctx->authkey_in_len = len; + } else { + sha_ctx->alg = QCE_HASH_SHA1; + sha_ctx->diglen = SHA1_DIGEST_SIZE; + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA1 hmac setkey failed\n"); + sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE; + } + return ret; +} + +static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; + + memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE); + if (len <= SHA256_BLOCK_SIZE) { + memcpy(&sha_ctx->authkey[0], key, len); + sha_ctx->authkey_in_len = len; + } else { + sha_ctx->alg = QCE_HASH_SHA256; + sha_ctx->diglen = SHA256_DIGEST_SIZE; + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA256 hmac setkey failed\n"); + sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE; + } + + return ret; +} + +static int _sha_hmac_init_ihash(struct ahash_request *req, + uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + int i; + + for (i = 0; i < sha_block_size; i++) + rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36; + rctx->trailing_buf_len = sha_block_size; + + return 0; +} + +static int _sha1_hmac_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + int ret = 0; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + pstat->sha1_hmac_digest++; + + _sha_init(req); + memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); + memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + + if (cp->ce_support.sha_hmac) + sha_ctx->alg = QCE_HASH_SHA1_HMAC; + else { + sha_ctx->alg = QCE_HASH_SHA1; + ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE); + } + + return ret; +} + +static int _sha256_hmac_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + int ret = 0; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + pstat->sha256_hmac_digest++; + + _sha_init(req); + + memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); + memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + + if (cp->ce_support.sha_hmac) + sha_ctx->alg = QCE_HASH_SHA256_HMAC; + else { + sha_ctx->alg = QCE_HASH_SHA256; + ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE); + } + + return ret; +} + +static int _sha1_hmac_update(struct ahash_request *req) +{ + return _sha1_update(req); +} + +static int _sha256_hmac_update(struct ahash_request *req) +{ + return _sha256_update(req); +} + +static int _sha_hmac_outer_hash(struct ahash_request *req, + uint32_t sha_digest_size, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_priv *cp = sha_ctx->cp; + int i; + uint8_t *staging; + uint8_t *p; + + staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf), + L1_CACHE_BYTES); + p = staging; + for (i = 0; i < sha_block_size; i++) + *p++ = sha_ctx->authkey[i] ^ 0x5c; + memcpy(p, &rctx->digest[0], sha_digest_size); + sg_set_buf(&rctx->sg[0], staging, sha_block_size + + sha_digest_size); + sg_mark_end(&rctx->sg[0]); + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + req->src = &rctx->sg[0]; + req->nbytes = sha_block_size + sha_digest_size; + + _sha_init(req); + if (sha_ctx->alg == QCE_HASH_SHA1) { + memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + } else { + memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + } + + rctx->last_blk = 1; + return _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base); +} + +static int _sha_hmac_inner_hash(struct ahash_request *req, + uint32_t sha_digest_size, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct ahash_request *areq = sha_ctx->ahash_req; + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + uint8_t *staging; + + staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf), + L1_CACHE_BYTES); + memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len); + sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len); + sg_mark_end(&rctx->sg[0]); + + ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0], + rctx->trailing_buf_len); + rctx->last_blk = 1; + ret = _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base); + + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = + wait_for_completion_interruptible(&sha_ctx->ahash_req_complete); + reinit_completion(&sha_ctx->ahash_req_complete); + } + + return ret; +} + +static int _sha1_hmac_final(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + if (cp->ce_support.sha_hmac) + return _sha_final(req, SHA1_BLOCK_SIZE); + ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE); + if (ret) + return ret; + return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE); +} + +static int _sha256_hmac_final(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + if (cp->ce_support.sha_hmac) + return _sha_final(req, SHA256_BLOCK_SIZE); + + ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE); + if (ret) + return ret; + + return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE); +} + + +static int _sha1_hmac_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_stat *pstat; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + pstat->sha1_hmac_digest++; + + _sha_init(req); + memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + sha_ctx->alg = QCE_HASH_SHA1_HMAC; + + return _sha_digest(req); +} + +static int _sha256_hmac_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_stat *pstat; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + + pstat = &_qcrypto_stat; + pstat->sha256_hmac_digest++; + + _sha_init(req); + memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + sha_ctx->alg = QCE_HASH_SHA256_HMAC; + + return _sha_digest(req); +} + +static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size) +{ + char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-"; + + if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-")) + return -EINVAL; + strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME); + strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME); + return 0; +} + + +int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_engine *pengine = NULL; + + pengine = _qrypto_find_pengine_device(cp, dev); + if (pengine == NULL) + return -ENODEV; + ctx->pengine = pengine; + + return 0; +} +EXPORT_SYMBOL(qcrypto_cipher_set_device); + +int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev, + u32 hw_inst) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_engine *pengine = NULL; + + pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst); + if (pengine == NULL) + return -ENODEV; + ctx->pengine = pengine; + + return 0; +} +EXPORT_SYMBOL(qcrypto_cipher_set_device_hw); + +int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_engine *pengine = NULL; + + pengine = _qrypto_find_pengine_device(cp, dev); + if (pengine == NULL) + return -ENODEV; + ctx->pengine = pengine; + + return 0; +} +EXPORT_SYMBOL(qcrypto_aead_set_device); + +int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev) +{ + struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_engine *pengine = NULL; + + pengine = _qrypto_find_pengine_device(cp, dev); + if (pengine == NULL) + return -ENODEV; + ctx->pengine = pengine; + + return 0; +} +EXPORT_SYMBOL(qcrypto_ahash_set_device); + +int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + if ((flags & QCRYPTO_CTX_USE_HW_KEY) && + (!cp->platform_support.hw_key_support)) { + pr_err("%s HW key usage not supported\n", __func__); + return -EINVAL; + } + if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) == + QCRYPTO_CTX_KEY_MASK) { + pr_err("%s Cannot set all key flags\n", __func__); + return -EINVAL; + } + + ctx->flags |= flags; + return 0; +} +EXPORT_SYMBOL(qcrypto_cipher_set_flag); + +int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + + if ((flags & QCRYPTO_CTX_USE_HW_KEY) && + (!cp->platform_support.hw_key_support)) { + pr_err("%s HW key usage not supported\n", __func__); + return -EINVAL; + } + if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) == + QCRYPTO_CTX_KEY_MASK) { + pr_err("%s Cannot set all key flags\n", __func__); + return -EINVAL; + } + + ctx->flags |= flags; + return 0; +} +EXPORT_SYMBOL(qcrypto_aead_set_flag); + +int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags) +{ + struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + + if ((flags & QCRYPTO_CTX_USE_HW_KEY) && + (!cp->platform_support.hw_key_support)) { + pr_err("%s HW key usage not supported\n", __func__); + return -EINVAL; + } + if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) == + QCRYPTO_CTX_KEY_MASK) { + pr_err("%s Cannot set all key flags\n", __func__); + return -EINVAL; + } + + ctx->flags |= flags; + return 0; +} +EXPORT_SYMBOL(qcrypto_ahash_set_flag); + +int qcrypto_cipher_clear_flag(struct skcipher_request *req, + unsigned int flags) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct qcrypto_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->flags &= ~flags; + return 0; + +} +EXPORT_SYMBOL(qcrypto_cipher_clear_flag); + +int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->flags &= ~flags; + return 0; + +} +EXPORT_SYMBOL(qcrypto_aead_clear_flag); + +int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags) +{ + struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + + ctx->flags &= ~flags; + return 0; +} +EXPORT_SYMBOL(qcrypto_ahash_clear_flag); + +static struct ahash_alg _qcrypto_ahash_algos[] = { + { + .init = _sha1_init, + .update = _sha1_update, + .final = _sha1_final, + .digest = _sha1_digest, + .export = _sha1_export, + .import = _sha1_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "qcrypto-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, + { + .init = _sha256_init, + .update = _sha256_update, + .final = _sha256_final, + .digest = _sha256_digest, + .export = _sha256_export, + .import = _sha256_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "qcrypto-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, +}; + +static struct ahash_alg _qcrypto_sha_hmac_algos[] = { + { + .init = _sha1_hmac_init, + .update = _sha1_hmac_update, + .final = _sha1_hmac_final, + .export = _sha1_hmac_export, + .import = _sha1_hmac_import, + .digest = _sha1_hmac_digest, + .setkey = _sha1_hmac_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "qcrypto-hmac-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_hmac_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, + { + .init = _sha256_hmac_init, + .update = _sha256_hmac_update, + .final = _sha256_hmac_final, + .export = _sha256_hmac_export, + .import = _sha256_hmac_import, + .digest = _sha256_hmac_digest, + .setkey = _sha256_hmac_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "qcrypto-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_hmac_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, +}; + +static struct skcipher_alg _qcrypto_sk_cipher_algos[] = { + { + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_ecb, + .decrypt = _qcrypto_dec_aes_ecb, + .init = _qcrypto_aes_skcipher_init, + .exit = _qcrypto_aes_skcipher_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "qcrypto-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_cbc, + .decrypt = _qcrypto_dec_aes_cbc, + .init = _qcrypto_aes_skcipher_init, + .exit = _qcrypto_aes_skcipher_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "qcrypto-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_ctr, + .decrypt = _qcrypto_dec_aes_ctr, + .init = _qcrypto_aes_skcipher_init, + .exit = _qcrypto_aes_skcipher_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "qcrypto-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_des, + .encrypt = _qcrypto_enc_des_ecb, + .decrypt = _qcrypto_dec_des_ecb, + .init = _qcrypto_skcipher_init, + .exit = _qcrypto_skcipher_exit, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "qcrypto-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_des, + .encrypt = _qcrypto_enc_des_cbc, + .decrypt = _qcrypto_dec_des_cbc, + .init = _qcrypto_skcipher_init, + .exit = _qcrypto_skcipher_exit, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "qcrypto-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_3des, + .encrypt = _qcrypto_enc_3des_ecb, + .decrypt = _qcrypto_dec_3des_ecb, + .init = _qcrypto_skcipher_init, + .exit = _qcrypto_skcipher_exit, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "qcrypto-ecb-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_setkey_3des, + .encrypt = _qcrypto_enc_3des_cbc, + .decrypt = _qcrypto_dec_3des_cbc, + .init = _qcrypto_skcipher_init, + .exit = _qcrypto_skcipher_exit, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "qcrypto-cbc-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, +}; + +static struct skcipher_alg _qcrypto_sk_cipher_xts_algo = { + .setkey = _qcrypto_setkey_aes_xts, + .encrypt = _qcrypto_enc_aes_xts, + .decrypt = _qcrypto_dec_aes_xts, + .init = _qcrypto_skcipher_init, + .exit = _qcrypto_skcipher_exit, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "xts(aes)", + .cra_driver_name = "qcrypto-xts-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +}; + +static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = { + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_cbc, + .decrypt = _qcrypto_aead_decrypt_aes_cbc, + .init = _qcrypto_cra_aead_aes_sha1_init, + .exit = _qcrypto_cra_aead_aes_exit, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_des_cbc, + .decrypt = _qcrypto_aead_decrypt_des_cbc, + .init = _qcrypto_cra_aead_sha1_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_3des_cbc, + .decrypt = _qcrypto_aead_decrypt_3des_cbc, + .init = _qcrypto_cra_aead_sha1_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, +}; + +static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = { + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_cbc, + .decrypt = _qcrypto_aead_decrypt_aes_cbc, + .init = _qcrypto_cra_aead_aes_sha256_init, + .exit = _qcrypto_cra_aead_aes_exit, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_des_cbc, + .decrypt = _qcrypto_aead_decrypt_des_cbc, + .init = _qcrypto_cra_aead_sha256_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, + { + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_3des_cbc, + .decrypt = _qcrypto_aead_decrypt_3des_cbc, + .init = _qcrypto_cra_aead_sha256_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, + }, +}; + +static struct aead_alg _qcrypto_aead_ccm_algo = { + .setkey = _qcrypto_aead_ccm_setkey, + .setauthsize = _qcrypto_aead_ccm_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_ccm, + .decrypt = _qcrypto_aead_decrypt_aes_ccm, + .init = _qcrypto_cra_aead_ccm_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ccm(aes)", + .cra_driver_name = "qcrypto-aes-ccm", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +}; + +static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = { + .setkey = _qcrypto_aead_rfc4309_ccm_setkey, + .setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize, + .encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm, + .decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm, + .init = _qcrypto_cra_aead_rfc4309_ccm_init, + .exit = _qcrypto_cra_aead_exit, + .ivsize = 8, + .maxauthsize = 16, + .base = { + .cra_name = "rfc4309(ccm(aes))", + .cra_driver_name = "qcrypto-rfc4309-aes-ccm", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + }, +}; + +static int _qcrypto_probe(struct platform_device *pdev) +{ + int rc = 0; + void *handle; + struct crypto_priv *cp = &qcrypto_dev; + int i; + struct msm_ce_hw_support *platform_support; + struct crypto_engine *pengine; + unsigned long flags; + struct qcrypto_req_control *pqcrypto_req_control = NULL; + + pengine = kzalloc(sizeof(*pengine), GFP_KERNEL); + if (!pengine) + return -ENOMEM; + + pengine->icc_path = of_icc_get(&pdev->dev, "data_path"); + if (IS_ERR(pengine->icc_path)) { + dev_err(&pdev->dev, "failed to get icc path\n"); + rc = PTR_ERR(pengine->icc_path); + goto exit_kzfree; + } + pengine->bw_state = BUS_NO_BANDWIDTH; + + rc = icc_set_bw(pengine->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + if (rc) { + dev_err(&pdev->dev, "failed to set high bandwidth\n"); + goto exit_kzfree; + } + handle = qce_open(pdev, &rc); + if (handle == NULL) { + rc = -ENODEV; + goto exit_free_pdata; + } + rc = icc_set_bw(pengine->icc_path, 0, 0); + if (rc) { + dev_err(&pdev->dev, "failed to set low bandwidth\n"); + goto exit_qce_close; + } + + platform_set_drvdata(pdev, pengine); + pengine->qce = handle; + pengine->pcp = cp; + pengine->pdev = pdev; + pengine->signature = 0xdeadbeef; + + timer_setup(&(pengine->bw_reaper_timer), + qcrypto_bw_reaper_timer_callback, 0); + INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work); + INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work); + pengine->high_bw_req = false; + pengine->active_seq = 0; + pengine->last_active_seq = 0; + pengine->check_flag = false; + pengine->max_req_used = 0; + pengine->issue_req = false; + + crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH); + + mutex_lock(&cp->engine_lock); + cp->total_units++; + pengine->unit = cp->total_units; + + spin_lock_irqsave(&cp->lock, flags); + pengine->first_engine = list_empty(&cp->engine_list); + if (pengine->first_engine) + cp->first_engine = pengine; + list_add_tail(&pengine->elist, &cp->engine_list); + cp->next_engine = pengine; + spin_unlock_irqrestore(&cp->lock, flags); + + qce_hw_support(pengine->qce, &cp->ce_support); + pengine->ce_hw_instance = cp->ce_support.ce_hw_instance; + pengine->max_req = cp->ce_support.max_request; + pqcrypto_req_control = kcalloc(pengine->max_req, + sizeof(struct qcrypto_req_control), + GFP_KERNEL); + if (pqcrypto_req_control == NULL) { + rc = -ENOMEM; + goto exit_unlock_mutex; + } + qcrypto_init_req_control(pengine, pqcrypto_req_control); + if (cp->ce_support.bam) { + cp->platform_support.ce_shared = cp->ce_support.is_shared; + cp->platform_support.shared_ce_resource = 0; + cp->platform_support.hw_key_support = cp->ce_support.hw_key; + cp->platform_support.sha_hmac = 1; + pengine->ce_device = cp->ce_support.ce_device; + } else { + platform_support = + (struct msm_ce_hw_support *)pdev->dev.platform_data; + cp->platform_support.ce_shared = platform_support->ce_shared; + cp->platform_support.shared_ce_resource = + platform_support->shared_ce_resource; + cp->platform_support.hw_key_support = + platform_support->hw_key_support; + cp->platform_support.sha_hmac = platform_support->sha_hmac; + } + + if (cp->total_units != 1) + goto exit_unlock_mutex; + + /* register crypto cipher algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_sk_cipher_algos); i++) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, + &_qcrypto_sk_cipher_algos[i]); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->cipher_alg.base.cra_name, + strlen(q_alg->cipher_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->cipher_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_skcipher(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->cipher_alg.base.cra_driver_name); + kfree_sensitive(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.base.cra_driver_name); + } + } + + /* register crypto cipher algorithms the device supports */ + if (cp->ce_support.aes_xts) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, + &_qcrypto_sk_cipher_xts_algo); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_aes_xts_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->cipher_alg.base.cra_name, + strlen(q_alg->cipher_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->cipher_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_skcipher(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->cipher_alg.base.cra_driver_name); + kfree_sensitive(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.base.cra_driver_name); + } + } + + /* + * Register crypto hash (sha1 and sha256) algorithms the + * device supports + */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) { + struct qcrypto_alg *q_alg = NULL; + + q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]); + + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_ahash_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->sha_alg.halg.base.cra_name, + strlen(q_alg->sha_alg.halg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->sha_alg.halg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_ahash(&q_alg->sha_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->sha_alg.halg.base.cra_driver_name); + kfree_sensitive(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->sha_alg.halg.base.cra_driver_name); + } + } + + /* register crypto aead (hmac-sha1) algorithms the device supports */ + if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac + || cp->ce_support.sha_hmac) { + for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos); + i++) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_aead_alg_alloc(cp, + &_qcrypto_aead_sha1_hmac_algos[i]); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_aead_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->aead_alg.base.cra_name, + strlen(q_alg->aead_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->aead_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_aead(&q_alg->aead_alg); + if (rc) { + dev_err(&pdev->dev, + "%s alg registration failed\n", + q_alg->aead_alg.base.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->aead_alg.base.cra_driver_name); + } + } + } + + /* register crypto aead (hmac-sha256) algorithms the device supports */ + if (cp->ce_support.sha_hmac) { + for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos); + i++) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_aead_alg_alloc(cp, + &_qcrypto_aead_sha256_hmac_algos[i]); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_aead_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->aead_alg.base.cra_name, + strlen(q_alg->aead_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->aead_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_aead(&q_alg->aead_alg); + if (rc) { + dev_err(&pdev->dev, + "%s alg registration failed\n", + q_alg->aead_alg.base.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->aead_alg.base.cra_driver_name); + } + } + } + + if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) { + /* register crypto hmac algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) { + struct qcrypto_alg *q_alg = NULL; + + q_alg = _qcrypto_sha_alg_alloc(cp, + &_qcrypto_sha_hmac_algos[i]); + + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_hmac_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->sha_alg.halg.base.cra_name, + strlen( + q_alg->sha_alg.halg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->sha_alg.halg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_ahash(&q_alg->sha_alg); + if (rc) { + dev_err(&pdev->dev, + "%s alg registration failed\n", + q_alg->sha_alg.halg.base.cra_driver_name); + kfree_sensitive(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->sha_alg.halg.base.cra_driver_name); + } + } + } + /* + * Register crypto cipher (aes-ccm) algorithms the + * device supports + */ + if (cp->ce_support.aes_ccm) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + if (cp->ce_support.use_sw_aes_ccm_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->aead_alg.base.cra_name, + strlen(q_alg->aead_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->aead_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_aead(&q_alg->aead_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->aead_alg.base.cra_driver_name); + kfree_sensitive(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->aead_alg.base.cra_driver_name); + } + + q_alg = _qcrypto_aead_alg_alloc(cp, + &_qcrypto_aead_rfc4309_ccm_algo); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + + if (cp->ce_support.use_sw_aes_ccm_algo) { + rc = _qcrypto_prefix_alg_cra_name( + q_alg->aead_alg.base.cra_name, + strlen(q_alg->aead_alg.base.cra_name)); + if (rc) { + dev_err(&pdev->dev, + "The algorithm name %s is too long.\n", + q_alg->aead_alg.base.cra_name); + kfree(q_alg); + goto err; + } + } + rc = crypto_register_aead(&q_alg->aead_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->aead_alg.base.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->aead_alg.base.cra_driver_name); + } + } + mutex_unlock(&cp->engine_lock); + + return 0; +err: + _qcrypto_remove_engine(pengine); + kfree_sensitive(pqcrypto_req_control); +exit_unlock_mutex: + mutex_unlock(&cp->engine_lock); +exit_qce_close: + if (pengine->qce) + qce_close(pengine->qce); +exit_free_pdata: + icc_set_bw(pengine->icc_path, 0, 0); + platform_set_drvdata(pdev, NULL); +exit_kzfree: + memset(pengine, 0, ksize((void *)pengine)); + kfree(pengine); + return rc; +} + +static int _qcrypto_engine_in_use(struct crypto_engine *pengine) +{ + struct crypto_priv *cp = pengine->pcp; + + if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen + || cp->req_queue.qlen) + return 1; + return 0; +} + +static void _qcrypto_do_suspending(struct crypto_engine *pengine) +{ + del_timer_sync(&pengine->bw_reaper_timer); + qcrypto_ce_set_bus(pengine, false); +} + +static int _qcrypto_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret = 0; + struct crypto_engine *pengine; + struct crypto_priv *cp; + unsigned long flags; + + pengine = platform_get_drvdata(pdev); + if (!pengine) + return -EINVAL; + + /* + * Check if this platform supports clock management in suspend/resume + * If not, just simply return 0. + */ + cp = pengine->pcp; + if (!cp->ce_support.clk_mgmt_sus_res) + return 0; + spin_lock_irqsave(&cp->lock, flags); + switch (pengine->bw_state) { + case BUS_NO_BANDWIDTH: + if (!pengine->high_bw_req) + pengine->bw_state = BUS_SUSPENDED; + else + ret = -EBUSY; + break; + case BUS_HAS_BANDWIDTH: + if (_qcrypto_engine_in_use(pengine)) { + ret = -EBUSY; + } else { + pengine->bw_state = BUS_SUSPENDING; + spin_unlock_irqrestore(&cp->lock, flags); + _qcrypto_do_suspending(pengine); + spin_lock_irqsave(&cp->lock, flags); + pengine->bw_state = BUS_SUSPENDED; + } + break; + case BUS_BANDWIDTH_RELEASING: + case BUS_BANDWIDTH_ALLOCATING: + case BUS_SUSPENDED: + case BUS_SUSPENDING: + default: + ret = -EBUSY; + break; + } + + spin_unlock_irqrestore(&cp->lock, flags); + if (ret) + return ret; + if (qce_pm_table.suspend) { + qcrypto_ce_set_bus(pengine, true); + qce_pm_table.suspend(pengine->qce); + qcrypto_ce_set_bus(pengine, false); + } + return 0; +} + +static int _qcrypto_resume(struct platform_device *pdev) +{ + struct crypto_engine *pengine; + struct crypto_priv *cp; + unsigned long flags; + int ret = 0; + + pengine = platform_get_drvdata(pdev); + + if (!pengine) + return -EINVAL; + cp = pengine->pcp; + if (!cp->ce_support.clk_mgmt_sus_res) + return 0; + spin_lock_irqsave(&cp->lock, flags); + if (pengine->bw_state == BUS_SUSPENDED) { + spin_unlock_irqrestore(&cp->lock, flags); + if (qce_pm_table.resume) { + qcrypto_ce_set_bus(pengine, true); + qce_pm_table.resume(pengine->qce); + qcrypto_ce_set_bus(pengine, false); + } + spin_lock_irqsave(&cp->lock, flags); + pengine->bw_state = BUS_NO_BANDWIDTH; + pengine->active_seq++; + pengine->check_flag = false; + if (cp->req_queue.qlen || pengine->req_queue.qlen) { + if (!pengine->high_bw_req) { + qcrypto_ce_bw_allocate_req(pengine); + pengine->high_bw_req = true; + } + } + } else + ret = -EBUSY; + + spin_unlock_irqrestore(&cp->lock, flags); + return ret; +} + +static const struct of_device_id qcrypto_match[] = { + {.compatible = "qcom,qcrypto",}, + {} +}; + +static struct platform_driver __qcrypto = { + .probe = _qcrypto_probe, + .remove = _qcrypto_remove, + .suspend = _qcrypto_suspend, + .resume = _qcrypto_resume, + .driver = { + .name = "qcrypto", + .of_match_table = qcrypto_match, + }, +}; + +static int _debug_qcrypto; + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + int qcrypto = *((int *) file->private_data); + int len; + + len = _disp_stats(qcrypto); + + if (len <= count) + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long flags; + struct crypto_priv *cp = &qcrypto_dev; + struct crypto_engine *pe; + int i; + + memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat)); + spin_lock_irqsave(&cp->lock, flags); + list_for_each_entry(pe, &cp->engine_list, elist) { + pe->total_req = 0; + pe->err_req = 0; + qce_clear_driver_stats(pe->qce); + pe->max_req_used = 0; + } + cp->max_qlen = 0; + cp->resp_start = 0; + cp->resp_stop = 0; + cp->no_avail = 0; + cp->max_resp_qlen = 0; + cp->queue_work_eng3 = 0; + cp->queue_work_not_eng3 = 0; + cp->queue_work_not_eng3_nz = 0; + cp->max_reorder_cnt = 0; + for (i = 0; i < MAX_SMP_CPU + 1; i++) + cp->cpu_req[i] = 0; + spin_unlock_irqrestore(&cp->lock, flags); + return count; +} + +static const struct file_operations _debug_stats_ops = { + .open = simple_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcrypto_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcrypto", NULL); + if (IS_ERR(_debug_dent)) { + pr_debug("qcrypto debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1); + _debug_qcrypto = 0; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcrypto, &_debug_stats_ops); + if (dent == NULL) { + pr_debug("qcrypto debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int __init _qcrypto_init(void) +{ + struct crypto_priv *pcp = &qcrypto_dev; + + _qcrypto_debug_init(); + INIT_LIST_HEAD(&pcp->alg_list); + INIT_LIST_HEAD(&pcp->engine_list); + init_llist_head(&pcp->ordered_resp_list); + spin_lock_init(&pcp->lock); + mutex_init(&pcp->engine_lock); + pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq", + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); + if (!pcp->resp_wq) { + pr_err("Error allocating workqueue\n"); + return -ENOMEM; + } + INIT_WORK(&pcp->resp_work, seq_response); + pcp->total_units = 0; + pcp->next_engine = NULL; + pcp->scheduled_eng = NULL; + pcp->ce_req_proc_sts = IN_PROGRESS; + crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH); + return platform_driver_register(&__qcrypto); +} + +static void __exit _qcrypto_exit(void) +{ + pr_debug("%s Unregister QCRYPTO\n", __func__); + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&__qcrypto); +} + +module_init(_qcrypto_init); +module_exit(_qcrypto_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QTI Crypto driver"); diff --git a/crypto-qti/qcryptohw_50.h b/crypto-qti/qcryptohw_50.h new file mode 100644 index 0000000000..16bb7d5ede --- /dev/null +++ b/crypto-qti/qcryptohw_50.h @@ -0,0 +1,521 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ +#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ + + +#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C +#define CRYPTO_BAM_CD_ENABLE 27 +#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE) + +#define QCE_AUTH_REG_BYTE_COUNT 4 +#define CRYPTO_VERSION_REG 0x1A000 + +#define CRYPTO_DATA_IN0_REG 0x1A010 +#define CRYPTO_DATA_IN1_REG 0x1A014 +#define CRYPTO_DATA_IN2_REG 0x1A018 +#define CRYPTO_DATA_IN3_REG 0x1A01C + +#define CRYPTO_DATA_OUT0_REG 0x1A020 +#define CRYPTO_DATA_OUT1_REG 0x1A024 +#define CRYPTO_DATA_OUT2_REG 0x1A028 +#define CRYPTO_DATA_OUT3_REG 0x1A02C + +#define CRYPTO_STATUS_REG 0x1A100 +#define CRYPTO_STATUS2_REG 0x1A104 +#define CRYPTO_ENGINES_AVAIL 0x1A108 +#define CRYPTO_FIFO_SIZES_REG 0x1A10C + +#define CRYPTO_SEG_SIZE_REG 0x1A110 +#define CRYPTO_GOPROC_REG 0x1A120 +#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000 +#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000 + +#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200 +#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204 +#define CRYPTO_ENCR_SEG_START_REG 0x1A208 + +#define CRYPTO_ENCR_KEY0_REG 0x1D000 +#define CRYPTO_ENCR_KEY1_REG 0x1D004 +#define CRYPTO_ENCR_KEY2_REG 0x1D008 +#define CRYPTO_ENCR_KEY3_REG 0x1D00C +#define CRYPTO_ENCR_KEY4_REG 0x1D010 +#define CRYPTO_ENCR_KEY5_REG 0x1D014 +#define CRYPTO_ENCR_KEY6_REG 0x1D018 +#define CRYPTO_ENCR_KEY7_REG 0x1D01C + +#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020 +#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024 +#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028 +#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C +#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030 +#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034 +#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038 +#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C + +#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000 +#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004 +#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008 +#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C +#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010 +#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014 +#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018 +#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C + +#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020 +#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024 +#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028 +#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C +#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030 +#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034 +#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038 +#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C + +#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040 +#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044 +#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048 +#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C +#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050 +#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054 +#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058 +#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C + +#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060 +#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064 +#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068 +#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C +#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070 +#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074 +#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078 +#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C + + +#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200 +#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204 +#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208 +#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C +#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210 +#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214 +#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218 +#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C + +#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220 +#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224 +#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228 +#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C +#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230 +#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234 +#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238 +#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C + +#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240 +#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244 +#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248 +#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C +#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250 +#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254 +#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258 +#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C + +#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260 +#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264 +#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268 +#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C +#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270 +#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274 +#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278 +#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C + + +#define CRYPTO_CNTR0_IV0_REG 0x1A20C +#define CRYPTO_CNTR1_IV1_REG 0x1A210 +#define CRYPTO_CNTR2_IV2_REG 0x1A214 +#define CRYPTO_CNTR3_IV3_REG 0x1A218 + +#define CRYPTO_CNTR_MASK_REG0 0x1A23C +#define CRYPTO_CNTR_MASK_REG1 0x1A238 +#define CRYPTO_CNTR_MASK_REG2 0x1A234 +#define CRYPTO_CNTR_MASK_REG 0x1A21C + +#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220 +#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224 +#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228 +#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C + +#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230 + +#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300 +#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304 +#define CRYPTO_AUTH_SEG_START_REG 0x1A308 + +#define CRYPTO_AUTH_KEY0_REG 0x1D040 +#define CRYPTO_AUTH_KEY1_REG 0x1D044 +#define CRYPTO_AUTH_KEY2_REG 0x1D048 +#define CRYPTO_AUTH_KEY3_REG 0x1D04C +#define CRYPTO_AUTH_KEY4_REG 0x1D050 +#define CRYPTO_AUTH_KEY5_REG 0x1D054 +#define CRYPTO_AUTH_KEY6_REG 0x1D058 +#define CRYPTO_AUTH_KEY7_REG 0x1D05C +#define CRYPTO_AUTH_KEY8_REG 0x1D060 +#define CRYPTO_AUTH_KEY9_REG 0x1D064 +#define CRYPTO_AUTH_KEY10_REG 0x1D068 +#define CRYPTO_AUTH_KEY11_REG 0x1D06C +#define CRYPTO_AUTH_KEY12_REG 0x1D070 +#define CRYPTO_AUTH_KEY13_REG 0x1D074 +#define CRYPTO_AUTH_KEY14_REG 0x1D078 +#define CRYPTO_AUTH_KEY15_REG 0x1D07C + +#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800 +#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804 +#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808 +#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C +#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810 +#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814 +#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818 +#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C +#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820 +#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824 +#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828 +#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C +#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830 +#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834 +#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838 +#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C + +#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880 +#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884 +#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888 +#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C +#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890 +#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894 +#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898 +#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C +#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0 +#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4 +#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8 +#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC +#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0 +#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4 +#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8 +#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC + +#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900 +#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904 +#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908 +#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C +#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910 +#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914 +#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918 +#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C +#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920 +#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924 +#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928 +#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C +#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930 +#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934 +#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938 +#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C + +#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980 +#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984 +#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988 +#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C +#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990 +#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994 +#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998 +#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C +#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0 +#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4 +#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8 +#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC +#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0 +#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4 +#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8 +#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC + + +#define CRYPTO_AUTH_IV0_REG 0x1A310 +#define CRYPTO_AUTH_IV1_REG 0x1A314 +#define CRYPTO_AUTH_IV2_REG 0x1A318 +#define CRYPTO_AUTH_IV3_REG 0x1A31C +#define CRYPTO_AUTH_IV4_REG 0x1A320 +#define CRYPTO_AUTH_IV5_REG 0x1A324 +#define CRYPTO_AUTH_IV6_REG 0x1A328 +#define CRYPTO_AUTH_IV7_REG 0x1A32C +#define CRYPTO_AUTH_IV8_REG 0x1A330 +#define CRYPTO_AUTH_IV9_REG 0x1A334 +#define CRYPTO_AUTH_IV10_REG 0x1A338 +#define CRYPTO_AUTH_IV11_REG 0x1A33C +#define CRYPTO_AUTH_IV12_REG 0x1A340 +#define CRYPTO_AUTH_IV13_REG 0x1A344 +#define CRYPTO_AUTH_IV14_REG 0x1A348 +#define CRYPTO_AUTH_IV15_REG 0x1A34C + +#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350 +#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354 +#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358 +#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C + +#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390 +#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394 +#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398 +#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C + +#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0 +#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4 +#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8 +#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC +#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0 +#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4 +#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8 +#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC + +#define CRYPTO_CONFIG_REG 0x1A400 +#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00 +#define CRYPTO_DEBUG_REG 0x1AF04 + + + +/* Register bits */ +#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF +#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */ +#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000 +#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */ +#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000 +#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */ + +/* status reg */ +#define CRYPTO_MAC_FAILED 31 +#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */ +#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL) +#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */ +#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL) +#define CRYPTO_HSD_ERR 20 +#define CRYPTO_ACCESS_VIOL 19 +#define CRYPTO_PIPE_ACTIVE_ERR 18 +#define CRYPTO_CFG_CHNG_ERR 17 +#define CRYPTO_DOUT_ERR 16 +#define CRYPTO_DIN_ERR 15 +#define CRYPTO_AXI_ERR 14 +#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */ +#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE) +#define CRYPTO_ENCR_BUSY 9 +#define CRYPTO_AUTH_BUSY 8 +#define CRYPTO_DOUT_INTR 7 +#define CRYPTO_DIN_INTR 6 +#define CRYPTO_OP_DONE_INTR 5 +#define CRYPTO_ERR_INTR 4 +#define CRYPTO_DOUT_RDY 3 +#define CRYPTO_DIN_RDY 2 +#define CRYPTO_OPERATION_DONE 1 +#define CRYPTO_SW_ERR 0 + +/* status2 reg */ +#define CRYPTO_AXI_EXTRA 1 +#define CRYPTO_LOCKED 2 + +/* config reg */ +#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */ +#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE) +#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0 +#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1 +#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2 +#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3 +#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4 +#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5 +#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6 +#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7 +#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8 +#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9 +#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10 +#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11 +#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12 +#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13 +#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14 +#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15 + +#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */ +#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ) +#define CRYPTO_ENUM_1_QUEUED_REQS 0 +#define CRYPTO_ENUM_2_QUEUED_REQS 1 +#define CRYPTO_ENUM_3_QUEUED_REQS 2 + +#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */ +#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES) + +#define CRYPTO_LITTLE_ENDIAN_MODE 9 +#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE) +#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */ +#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT) + +#define CRYPTO_HIGH_SPD_EN_N 4 + +#define CRYPTO_MASK_DOUT_INTR 3 +#define CRYPTO_MASK_DIN_INTR 2 +#define CRYPTO_MASK_OP_DONE_INTR 1 +#define CRYPTO_MASK_ERR_INTR 0 + +/* auth_seg_cfg reg */ +#define CRYPTO_COMP_EXP_MAC 24 +#define CRYPTO_COMP_EXP_MAC_DISABLED 0 +#define CRYPTO_COMP_EXP_MAC_ENABLED 1 + +#define CRYPTO_F9_DIRECTION 23 +#define CRYPTO_F9_DIRECTION_UPLINK 0 +#define CRYPTO_F9_DIRECTION_DOWNLINK 1 + +#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */ +#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \ + (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS) + +#define CRYPTO_USE_PIPE_KEY_AUTH 19 +#define CRYPTO_USE_HW_KEY_AUTH 18 +#define CRYPTO_FIRST 17 +#define CRYPTO_LAST 16 + +#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/ +#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS) +#define CRYPTO_AUTH_POS_BEFORE 0 +#define CRYPTO_AUTH_POS_AFTER 1 + +#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/ +#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE) +#define CRYPTO_AUTH_SIZE_SHA1 0 +#define CRYPTO_AUTH_SIZE_SHA256 1 +#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0 +#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1 +#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2 +#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3 +#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4 +#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5 +#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6 +#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7 +#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8 +#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9 +#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10 +#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11 +#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12 +#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13 +#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14 +#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15 + + +#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/ +#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE) +#define CRYPTO_AUTH_MODE_HASH 0 +#define CRYPTO_AUTH_MODE_HMAC 1 +#define CRYPTO_AUTH_MODE_CCM 0 +#define CRYPTO_AUTH_MODE_CMAC 1 + +#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/ +#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE) +#define CRYPTO_AUTH_KEY_SZ_AES128 0 +#define CRYPTO_AUTH_KEY_SZ_AES256 2 + +#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/ +#define CRYPTO_AUTH_ALG_MASK 7 +#define CRYPTO_AUTH_ALG_NONE 0 +#define CRYPTO_AUTH_ALG_SHA 1 +#define CRYPTO_AUTH_ALG_AES 2 +#define CRYPTO_AUTH_ALG_KASUMI 3 +#define CRYPTO_AUTH_ALG_SNOW3G 4 +#define CRYPTO_AUTH_ALG_ZUC 5 + +/* encr_xts_du_size reg */ +#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */ +#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff + +/* encr_seg_cfg reg */ +#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */ +#define CRYPTO_F8_KEYSTREAM_DISABLED 0 +#define CRYPTO_F8_KEYSTREAM_ENABLED 1 + +#define CRYPTO_F8_DIRECTION 16 /* bit */ +#define CRYPTO_F8_DIRECTION_UPLINK 0 +#define CRYPTO_F8_DIRECTION_DOWNLINK 1 + + +#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */ +#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1 +#define CRYPTO_USE_KEY_REGISTERS 0 + + +#define CRYPTO_USE_HW_KEY_ENCR 14 +#define CRYPTO_USE_KEY_REG 0 +#define CRYPTO_USE_HW_KEY 1 + +#define CRYPTO_LAST_CCM 13 +#define CRYPTO_LAST_CCM_XFR 1 +#define CRYPTO_INTERM_CCM_XFR 0 + + +#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */ +#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG) +#define CRYPTO_CNTR_ALG_NIST 0 + +#define CRYPTO_ENCODE 10 + +#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */ +#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE) +/* only valid when AES */ +#define CRYPTO_ENCR_MODE_ECB 0 +#define CRYPTO_ENCR_MODE_CBC 1 +#define CRYPTO_ENCR_MODE_CTR 2 +#define CRYPTO_ENCR_MODE_XTS 3 +#define CRYPTO_ENCR_MODE_CCM 4 + +#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */ +#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ) +#define CRYPTO_ENCR_KEY_SZ_DES 0 +#define CRYPTO_ENCR_KEY_SZ_3DES 1 +#define CRYPTO_ENCR_KEY_SZ_AES128 0 +#define CRYPTO_ENCR_KEY_SZ_AES256 2 + +#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */ +#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG) +#define CRYPTO_ENCR_ALG_NONE 0 +#define CRYPTO_ENCR_ALG_DES 1 +#define CRYPTO_ENCR_ALG_AES 2 +#define CRYPTO_ENCR_ALG_KASUMI 4 +#define CRYPTO_ENCR_ALG_SNOW_3G 5 +#define CRYPTO_ENCR_ALG_ZUC 6 + +/* goproc reg */ +#define CRYPTO_GO 0 +#define CRYPTO_CLR_CNTXT 1 +#define CRYPTO_RESULTS_DUMP 2 + +/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */ +#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */ +#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \ + (0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT) + +#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */ +#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \ + (0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER) + +/* F9 definition of CRYPTO_AUTH_IV4 REG */ +#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */ +#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \ + (0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS) + +/* engines_avail */ +#define CRYPTO_ENCR_AES_SEL 0 +#define CRYPTO_DES_SEL 1 +#define CRYPTO_ENCR_SNOW3G_SEL 2 +#define CRYPTO_ENCR_KASUMI_SEL 3 +#define CRYPTO_SHA_SEL 4 +#define CRYPTO_SHA512_SEL 5 +#define CRYPTO_AUTH_AES_SEL 6 +#define CRYPTO_AUTH_SNOW3G_SEL 7 +#define CRYPTO_AUTH_KASUMI_SEL 8 +#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */ +#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */ +#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */ +#define CRYPTO_ENCR_ZUC_SEL 26 +#define CRYPTO_AUTH_ZUC_SEL 27 +#define CRYPTO_ZUC_ENABLE 28 +#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */ diff --git a/linux/platform_data/qcom_crypto_device.h b/linux/platform_data/qcom_crypto_device.h new file mode 100644 index 0000000000..819df7c5e5 --- /dev/null +++ b/linux/platform_data/qcom_crypto_device.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __QCOM_CRYPTO_DEVICE__H +#define __QCOM_CRYPTO_DEVICE__H + +#include + +struct msm_ce_hw_support { + uint32_t ce_shared; + uint32_t shared_ce_resource; + uint32_t hw_key_support; + uint32_t sha_hmac; +}; + +#endif /* __QCOM_CRYPTO_DEVICE__H */ diff --git a/linux/qcedev.h b/linux/qcedev.h new file mode 100644 index 0000000000..6968e92c4b --- /dev/null +++ b/linux/qcedev.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _QCEDEV__H +#define _QCEDEV__H + +#include +#include +#include "fips_status.h" + +#define QCEDEV_MAX_SHA_BLOCK_SIZE 64 +#define QCEDEV_MAX_BEARER 31 +#define QCEDEV_MAX_KEY_SIZE 64 +#define QCEDEV_MAX_IV_SIZE 32 + +#define QCEDEV_MAX_BUFFERS 16 +#define QCEDEV_MAX_SHA_DIGEST 32 + +#define QCEDEV_USE_PMEM 1 +#define QCEDEV_NO_PMEM 0 + +#define QCEDEV_AES_KEY_128 16 +#define QCEDEV_AES_KEY_192 24 +#define QCEDEV_AES_KEY_256 32 +/** + *qcedev_oper_enum: Operation types + * @QCEDEV_OPER_ENC: Encrypt + * @QCEDEV_OPER_DEC: Decrypt + * @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by + * user. Key already set by an external processor. + * @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by + * user. Key already set by an external processor. + */ +enum qcedev_oper_enum { + QCEDEV_OPER_DEC = 0, + QCEDEV_OPER_ENC = 1, + QCEDEV_OPER_DEC_NO_KEY = 2, + QCEDEV_OPER_ENC_NO_KEY = 3, + QCEDEV_OPER_LAST +}; + +/** + *qcedev_oper_enum: Cipher algorithm types + * @QCEDEV_ALG_DES: DES + * @QCEDEV_ALG_3DES: 3DES + * @QCEDEV_ALG_AES: AES + */ +enum qcedev_cipher_alg_enum { + QCEDEV_ALG_DES = 0, + QCEDEV_ALG_3DES = 1, + QCEDEV_ALG_AES = 2, + QCEDEV_ALG_LAST +}; + +/** + *qcedev_cipher_mode_enum : AES mode + * @QCEDEV_AES_MODE_CBC: CBC + * @QCEDEV_AES_MODE_ECB: ECB + * @QCEDEV_AES_MODE_CTR: CTR + * @QCEDEV_AES_MODE_XTS: XTS + * @QCEDEV_AES_MODE_CCM: CCM + * @QCEDEV_DES_MODE_CBC: CBC + * @QCEDEV_DES_MODE_ECB: ECB + */ +enum qcedev_cipher_mode_enum { + QCEDEV_AES_MODE_CBC = 0, + QCEDEV_AES_MODE_ECB = 1, + QCEDEV_AES_MODE_CTR = 2, + QCEDEV_AES_MODE_XTS = 3, + QCEDEV_AES_MODE_CCM = 4, + QCEDEV_DES_MODE_CBC = 5, + QCEDEV_DES_MODE_ECB = 6, + QCEDEV_AES_DES_MODE_LAST +}; + +/** + *enum qcedev_sha_alg_enum : Secure Hashing Algorithm + * @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits) + * @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit) + * @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits) + * @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit) + * @QCEDEV_ALG_AES_CMAC: Configurable MAC size + */ +enum qcedev_sha_alg_enum { + QCEDEV_ALG_SHA1 = 0, + QCEDEV_ALG_SHA256 = 1, + QCEDEV_ALG_SHA1_HMAC = 2, + QCEDEV_ALG_SHA256_HMAC = 3, + QCEDEV_ALG_AES_CMAC = 4, + QCEDEV_ALG_SHA_ALG_LAST +}; + +/** + * struct buf_info - Buffer information + * @offset: Offset from the base address of the buffer + * (Used when buffer is allocated using PMEM) + * @vaddr: Virtual buffer address pointer + * @len: Size of the buffer + */ +struct buf_info { + union { + __u32 offset; + __u8 *vaddr; + }; + __u32 len; +}; + +/** + * struct qcedev_vbuf_info - Source and destination Buffer information + * @src: Array of buf_info for input/source + * @dst: Array of buf_info for output/destination + */ +struct qcedev_vbuf_info { + struct buf_info src[QCEDEV_MAX_BUFFERS]; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_pmem_info - Stores PMEM buffer information + * @fd_src: Handle to /dev/adsp_pmem used to allocate + * memory for input/src buffer + * @src: Array of buf_info for input/source + * @fd_dst: Handle to /dev/adsp_pmem used to allocate + * memory for output/dst buffer + * @dst: Array of buf_info for output/destination + * @pmem_src_offset: The offset from input/src buffer + * (allocated by PMEM) + */ +struct qcedev_pmem_info { + int fd_src; + struct buf_info src[QCEDEV_MAX_BUFFERS]; + int fd_dst; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_cipher_op_req - Holds the ciphering request information + * @use_pmem (IN): Flag to indicate if buffer source is PMEM + * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM + * @pmem (IN): Stores PMEM buffer information. + * Refer struct qcedev_pmem_info + * @vbuf (IN/OUT): Stores Source and destination Buffer information + * Refer to struct qcedev_vbuf_info + * @data_len (IN): Total Length of input/src and output/dst in bytes + * @in_place_op (IN): Indicates whether the operation is inplace where + * source == destination + * When using PMEM allocated memory, must set this to 1 + * @enckey (IN): 128 bits of confidentiality key + * enckey[0] bit 127-120, enckey[1] bit 119-112,.. + * enckey[15] bit 7-0 + * @encklen (IN): Length of the encryption key(set to 128 bits/16 + * bytes in the driver) + * @iv (IN/OUT): Initialisation vector data + * This is updated by the driver, incremented by + * number of blocks encrypted/decrypted. + * @ivlen (IN): Length of the IV + * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set + * for AES-128 CTR mode only) + * @alg (IN): Type of ciphering algorithm: AES/DES/3DES + * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR + * Apllicabel when using AES algorithm only + * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or + * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY + * + *If use_pmem is set to 0, the driver assumes that memory was not allocated + * via PMEM, and kernel will need to allocate memory and copy data from user + * space buffer (data_src/dta_dst) and process accordingly and copy data back + * to the user space buffer + * + * If use_pmem is set to 1, the driver assumes that memory was allocated via + * PMEM. + * The kernel driver will use the fd_src to determine the kernel virtual address + * base that maps to the user space virtual address base for the buffer + * allocated in user space. + * The final input/src and output/dst buffer pointer will be determined + * by adding the offsets to the kernel virtual addr. + * + * If use of hardware key is supported in the target, user can configure the + * key parameters (encklen, enckey) to use the hardware key. + * In order to use the hardware key, set encklen to 0 and set the enckey + * data array to 0. + */ +struct qcedev_cipher_op_req { + __u8 use_pmem; + union { + struct qcedev_pmem_info pmem; + struct qcedev_vbuf_info vbuf; + }; + __u32 entries; + __u32 data_len; + __u8 in_place_op; + __u8 enckey[QCEDEV_MAX_KEY_SIZE]; + __u32 encklen; + __u8 iv[QCEDEV_MAX_IV_SIZE]; + __u32 ivlen; + __u32 byteoffset; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_oper_enum op; +}; + +/** + * struct qcedev_sha_op_req - Holds the hashing request information + * @data (IN): Array of pointers to the data to be hashed + * @entries (IN): Number of buf_info entries in the data array + * @data_len (IN): Length of data to be hashed + * @digest (IN/OUT): Returns the hashed data information + * @diglen (OUT): Size of the hashed/digest data + * @authkey (IN): Pointer to authentication key for HMAC + * @authklen (IN): Size of the authentication key + * @alg (IN): Secure Hash algorithm + */ +struct qcedev_sha_op_req { + struct buf_info data[QCEDEV_MAX_BUFFERS]; + __u32 entries; + __u32 data_len; + __u8 digest[QCEDEV_MAX_SHA_DIGEST]; + __u32 diglen; + __u8 *authkey; + __u32 authklen; + enum qcedev_sha_alg_enum alg; +}; + +/** + * struct qfips_verify_t - Holds data for FIPS Integrity test + * @kernel_size (IN): Size of kernel Image + * @kernel (IN): pointer to buffer containing the kernel Image + */ +struct qfips_verify_t { + unsigned int kernel_size; + void *kernel; +}; + +/** + * struct qcedev_map_buf_req - Holds the mapping request information + * fd (IN): Array of fds. + * num_fds (IN): Number of fds in fd[]. + * fd_size (IN): Array of sizes corresponding to each fd in fd[]. + * fd_offset (IN): Array of offset corresponding to each fd in fd[]. + * vaddr (OUT): Array of mapped virtual address corresponding to + * each fd in fd[]. + */ +struct qcedev_map_buf_req { + __s32 fd[QCEDEV_MAX_BUFFERS]; + __u32 num_fds; + __u32 fd_size[QCEDEV_MAX_BUFFERS]; + __u32 fd_offset[QCEDEV_MAX_BUFFERS]; + __u64 buf_vaddr[QCEDEV_MAX_BUFFERS]; +}; + +/** + * struct qcedev_unmap_buf_req - Holds the hashing request information + * fd (IN): Array of fds to unmap + * num_fds (IN): Number of fds in fd[]. + */ +struct qcedev_unmap_buf_req { + __s32 fd[QCEDEV_MAX_BUFFERS]; + __u32 num_fds; +}; + +struct file; + +#define QCEDEV_IOC_MAGIC 0x87 + +#define QCEDEV_IOCTL_ENC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_DEC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_SHA_INIT_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_UPDATE_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_FINAL_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_GET_SHA_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_LOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 7) +#define QCEDEV_IOCTL_UNLOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 8) +#define QCEDEV_IOCTL_GET_CMAC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_MAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req) +#define QCEDEV_IOCTL_UNMAP_BUF_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req) +#endif /* _QCEDEV__H */ diff --git a/linux/qcrypto.h b/linux/qcrypto.h new file mode 100644 index 0000000000..4c034a9c1e --- /dev/null +++ b/linux/qcrypto.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ +#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ + +#include +#include +#include +#include + +#define QCRYPTO_CTX_KEY_MASK 0x000000ff +#define QCRYPTO_CTX_USE_HW_KEY 0x00000001 +#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002 + +#define QCRYPTO_CTX_XTS_MASK 0x0000ff00 +#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100 +#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200 + + +int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev); +int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev); +int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev); + +int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags); +int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags); +int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags); + +int qcrypto_cipher_clear_flag(struct skcipher_request *req, + unsigned int flags); +int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags); +int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags); + +struct crypto_engine_entry { + u32 hw_instance; + u32 ce_device; + int shared; +}; + +int qcrypto_get_num_engines(void); +void qcrypto_get_engine_list(size_t num_engines, + struct crypto_engine_entry *arr); +int qcrypto_cipher_set_device_hw(struct skcipher_request *req, + unsigned int fde_pfe, + unsigned int hw_inst); + + +struct qcrypto_func_set { + int (*cipher_set)(struct skcipher_request *req, + unsigned int fde_pfe, + unsigned int hw_inst); + int (*cipher_flag)(struct skcipher_request *req, unsigned int flags); + int (*get_num_engines)(void); + void (*get_engine_list)(size_t num_engines, + struct crypto_engine_entry *arr); +}; + +#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */ diff --git a/linux/smcinvoke.h b/linux/smcinvoke.h new file mode 100644 index 0000000000..3364975450 --- /dev/null +++ b/linux/smcinvoke.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ +#ifndef _UAPI_SMCINVOKE_H_ +#define _UAPI_SMCINVOKE_H_ + +#include +#include + +#define SMCINVOKE_USERSPACE_OBJ_NULL -1 + +struct smcinvoke_buf { + __u64 addr; + __u64 size; +}; + +struct smcinvoke_obj { + __s64 fd; + __s32 cb_server_fd; + __s32 reserved; +}; + +union smcinvoke_arg { + struct smcinvoke_buf b; + struct smcinvoke_obj o; +}; + +/* + * struct smcinvoke_cmd_req: This structure is transparently sent to TEE + * @op - Operation to be performed + * @counts - number of aruments passed + * @result - result of invoke operation + * @argsize - size of each of arguments + * @args - args is pointer to buffer having all arguments + */ +struct smcinvoke_cmd_req { + __u32 op; + __u32 counts; + __s32 result; + __u32 argsize; + __u64 args; +}; + +/* + * struct smcinvoke_accept: structure to process CB req from TEE + * @has_resp: IN: Whether IOCTL is carrying response data + * @txn_id: OUT: An id that should be passed as it is for response + * @result: IN: Outcome of operation op + * @cbobj_id: OUT: Callback object which is target of operation op + * @op: OUT: Operation to be performed on target object + * @counts: OUT: Number of arguments, embedded in buffer pointed by + * buf_addr, to complete operation + * @reserved: IN/OUT: Usage is not defined but should be set to 0. + * @argsize: IN: Size of any argument, all of equal size, embedded + * in buffer pointed by buf_addr + * @buf_len: IN: Len of buffer pointed by buf_addr + * @buf_addr: IN: Buffer containing all arguments which are needed + * to complete operation op + */ +struct smcinvoke_accept { + __u32 has_resp; + __u32 txn_id; + __s32 result; + __s32 cbobj_id; + __u32 op; + __u32 counts; + __s32 reserved; + __u32 argsize; + __u64 buf_len; + __u64 buf_addr; +}; + +/* + * @cb_buf_size: IN: Max buffer size for any callback obj implemented by client + */ +struct smcinvoke_server { + __u32 cb_buf_size; +}; + +#define SMCINVOKE_IOC_MAGIC 0x98 + +#define SMCINVOKE_IOCTL_INVOKE_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req) + +#define SMCINVOKE_IOCTL_ACCEPT_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept) + +#define SMCINVOKE_IOCTL_SERVER_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server) + +#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 4, __s32) + +#endif /* _UAPI_SMCINVOKE_H_ */ diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk new file mode 100644 index 0000000000..3673f60204 --- /dev/null +++ b/securemsm_kernel_product_board.mk @@ -0,0 +1,8 @@ +#Build ssg kernel driver +PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ + $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ + + diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk new file mode 100644 index 0000000000..4c8f5f1e9d --- /dev/null +++ b/securemsm_kernel_vendor_board.mk @@ -0,0 +1,5 @@ +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ + $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ diff --git a/smcinvoke/IClientEnv.h b/smcinvoke/IClientEnv.h new file mode 100644 index 0000000000..1ad17971f2 --- /dev/null +++ b/smcinvoke/IClientEnv.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + */ + +#define IClientEnv_OP_open 0 +#define IClientEnv_OP_registerLegacy 1 +#define IClientEnv_OP_register 2 +#define IClientEnv_OP_registerWithWhitelist 3 + +static inline int32_t +IClientEnv_release(struct Object self) +{ + return Object_invoke(self, Object_OP_release, 0, 0); +} + +static inline int32_t +IClientEnv_retain(struct Object self) +{ + return Object_invoke(self, Object_OP_retain, 0, 0); +} + +static inline int32_t +IClientEnv_open(struct Object self, uint32_t uid_val, struct Object *obj_ptr) +{ + union ObjectArg a[2]; + int32_t result; + + a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) }; + + result = Object_invoke(self, IClientEnv_OP_open, a, ObjectCounts_pack(1, 0, 0, 1)); + + *obj_ptr = a[1].o; + + return result; +} + +static inline int32_t +IClientEnv_registerLegacy(struct Object self, const void *credentials_ptr, size_t credentials_len, + struct Object *clientEnv_ptr) +{ + union ObjectArg a[2]; + int32_t result; + + a[0].bi = (struct ObjectBufIn) { credentials_ptr, credentials_len * 1 }; + + result = Object_invoke(self, IClientEnv_OP_registerLegacy, a, + ObjectCounts_pack(1, 0, 0, 1)); + + *clientEnv_ptr = a[1].o; + + return result; +} + +static inline int32_t +IClientEnv_register(struct Object self, struct Object credentials_val, + struct Object *clientEnv_ptr) +{ + union ObjectArg a[2]; + int32_t result; + + a[0].o = credentials_val; + + result = Object_invoke(self, IClientEnv_OP_register, a, + ObjectCounts_pack(0, 0, 1, 1)); + + *clientEnv_ptr = a[1].o; + + return result; +} + +static inline int32_t +IClientEnv_registerWithWhitelist(struct Object self, + struct Object credentials_val, const uint32_t *uids_ptr, + size_t uids_len, struct Object *clientEnv_ptr) +{ + union ObjectArg a[3]; + int32_t result; + + a[1].o = credentials_val; + a[0].bi = (struct ObjectBufIn) { uids_ptr, uids_len * + sizeof(uint32_t) }; + + result = Object_invoke(self, IClientEnv_OP_registerWithWhitelist, a, + ObjectCounts_pack(1, 0, 1, 1)); + + *clientEnv_ptr = a[2].o; + + return result; +} + diff --git a/smcinvoke/IQSEEComCompat.h b/smcinvoke/IQSEEComCompat.h new file mode 100644 index 0000000000..5c42583023 --- /dev/null +++ b/smcinvoke/IQSEEComCompat.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + */ + +#include "smcinvoke_object.h" + +#define IQSEEComCompat_ERROR_APP_UNAVAILABLE INT32_C(10) +#define IQSEEComCompat_OP_sendRequest 0 +#define IQSEEComCompat_OP_disconnect 1 +#define IQSEEComCompat_OP_unload 2 + + +static inline int32_t +IQSEEComCompat_release(struct Object self) +{ + return Object_invoke(self, Object_OP_release, 0, 0); +} + +static inline int32_t +IQSEEComCompat_retain(struct Object self) +{ + return Object_invoke(self, Object_OP_retain, 0, 0); +} + +static inline int32_t +IQSEEComCompat_sendRequest(struct Object self, + const void *reqIn_ptr, size_t reqIn_len, + const void *rspIn_ptr, size_t rspIn_len, + void *reqOut_ptr, size_t reqOut_len, size_t *reqOut_lenout, + void *rspOut_ptr, size_t rspOut_len, size_t *rspOut_lenout, + const uint32_t *embeddedBufOffsets_ptr, + size_t embeddedBufOffsets_len, uint32_t is64_val, + struct Object smo1_val, struct Object smo2_val, + struct Object smo3_val, struct Object smo4_val) +{ + union ObjectArg a[10]; + int32_t result; + + a[0].bi = (struct ObjectBufIn) { reqIn_ptr, reqIn_len * 1 }; + a[1].bi = (struct ObjectBufIn) { rspIn_ptr, rspIn_len * 1 }; + a[4].b = (struct ObjectBuf) { reqOut_ptr, reqOut_len * 1 }; + a[5].b = (struct ObjectBuf) { rspOut_ptr, rspOut_len * 1 }; + a[2].bi = (struct ObjectBufIn) { embeddedBufOffsets_ptr, + embeddedBufOffsets_len * sizeof(uint32_t) }; + a[3].b = (struct ObjectBuf) { &is64_val, sizeof(uint32_t) }; + a[6].o = smo1_val; + a[7].o = smo2_val; + a[8].o = smo3_val; + a[9].o = smo4_val; + + result = Object_invoke(self, IQSEEComCompat_OP_sendRequest, a, + ObjectCounts_pack(4, 2, 4, 0)); + + *reqOut_lenout = a[4].b.size / 1; + *rspOut_lenout = a[5].b.size / 1; + + return result; +} + +static inline int32_t +IQSEEComCompat_disconnect(struct Object self) +{ + return Object_invoke(self, IQSEEComCompat_OP_disconnect, 0, 0); +} + +static inline int32_t +IQSEEComCompat_unload(struct Object self) +{ + return Object_invoke(self, IQSEEComCompat_OP_unload, 0, 0); +} diff --git a/smcinvoke/IQSEEComCompatAppLoader.h b/smcinvoke/IQSEEComCompatAppLoader.h new file mode 100644 index 0000000000..9bc390049b --- /dev/null +++ b/smcinvoke/IQSEEComCompatAppLoader.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2021 The Linux Foundation. All rights reserved. + */ + +#include "smcinvoke_object.h" + +#define IQSEEComCompatAppLoader_ERROR_INVALID_BUFFER INT32_C(10) +#define IQSEEComCompatAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11) +#define IQSEEComCompatAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12) +#define IQSEEComCompatAppLoader_ERROR_METADATA_INVALID INT32_C(13) +#define IQSEEComCompatAppLoader_ERROR_MAX_NUM_APPS INT32_C(14) +#define IQSEEComCompatAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15) +#define IQSEEComCompatAppLoader_ERROR_ALREADY_LOADED INT32_C(16) +#define IQSEEComCompatAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17) +#define IQSEEComCompatAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18) +#define IQSEEComCompatAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19) +#define IQSEEComCompatAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20) +#define IQSEEComCompatAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21) +#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(22) +#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(23) +#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(24) + +#define IQSEEComCompatAppLoader_OP_loadFromRegion 0 +#define IQSEEComCompatAppLoader_OP_loadFromBuffer 1 +#define IQSEEComCompatAppLoader_OP_lookupTA 2 + + +static inline int32_t +IQSEEComCompatAppLoader_release(struct Object self) +{ + return Object_invoke(self, Object_OP_release, 0, 0); +} + +static inline int32_t +IQSEEComCompatAppLoader_retain(struct Object self) +{ + return Object_invoke(self, Object_OP_retain, 0, 0); +} + +static inline int32_t +IQSEEComCompatAppLoader_loadFromRegion(struct Object self, + struct Object appElf_val, const void *filename_ptr, + size_t filename_len, struct Object *appCompat_ptr) +{ + union ObjectArg a[3]; + int32_t result; + + a[1].o = appElf_val; + a[0].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 }; + + result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromRegion, a, + ObjectCounts_pack(1, 0, 1, 1)); + + *appCompat_ptr = a[2].o; + + return result; +} + +static inline int32_t +IQSEEComCompatAppLoader_loadFromBuffer(struct Object self, + const void *appElf_ptr, size_t appElf_len, + const void *filename_ptr, size_t filename_len, + void *distName_ptr, size_t distName_len, + size_t *distName_lenout, struct Object *appCompat_ptr) +{ + union ObjectArg a[4]; + int32_t result; + + a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 }; + a[1].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 }; + a[2].b = (struct ObjectBuf) { distName_ptr, distName_len * 1 }; + + result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromBuffer, + a, ObjectCounts_pack(2, 1, 0, 1)); + + *distName_lenout = a[2].b.size / 1; + *appCompat_ptr = a[3].o; + + return result; +} + +static inline int32_t +IQSEEComCompatAppLoader_lookupTA(struct Object self, const void *appName_ptr, + size_t appName_len, struct Object *appCompat_ptr) +{ + union ObjectArg a[2]; + int32_t result; + + a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 }; + + result = Object_invoke(self, IQSEEComCompatAppLoader_OP_lookupTA, + a, ObjectCounts_pack(1, 0, 0, 1)); + + *appCompat_ptr = a[1].o; + + return result; +} + diff --git a/smcinvoke/misc/qseecom_kernel.h b/smcinvoke/misc/qseecom_kernel.h new file mode 100644 index 0000000000..2c0ffeca76 --- /dev/null +++ b/smcinvoke/misc/qseecom_kernel.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __QSEECOM_KERNEL_H_ +#define __QSEECOM_KERNEL_H_ + +#include + + +#define QSEECOM_ALIGN_SIZE 0x40 +#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1) +#define QSEECOM_ALIGN(x) \ + ((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK)) + +/* + * struct qseecom_handle - + * Handle to the qseecom device for kernel clients + * @sbuf - shared buffer pointer + * @sbbuf_len - shared buffer size + */ +struct qseecom_handle { + void *dev; /* in/out */ + unsigned char *sbuf; /* in/out */ + uint32_t sbuf_len; /* in/out */ +}; + +int qseecom_start_app(struct qseecom_handle **handle, + char *app_name, uint32_t size); +int qseecom_shutdown_app(struct qseecom_handle **handle); +int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, + uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len); + +int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high); +#if IS_ENABLED(CONFIG_QSEECOM) +int qseecom_process_listener_from_smcinvoke(uint32_t *result, + u64 *response_type, unsigned int *data); +#else +static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result, + u64 *response_type, unsigned int *data) +{ + return -EOPNOTSUPP; +} +#endif + + +#endif /* __QSEECOM_KERNEL_H_ */ diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c new file mode 100644 index 0000000000..4f2972e063 --- /dev/null +++ b/smcinvoke/smcinvoke.c @@ -0,0 +1,2449 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smcinvoke.h" +#include "smcinvoke_object.h" +#include "misc/qseecom_kernel.h" + +#define CREATE_TRACE_POINTS +#include "trace_smcinvoke.h" + +#define SMCINVOKE_DEV "smcinvoke" +#define SMCINVOKE_TZ_ROOT_OBJ 1 +#define SMCINVOKE_TZ_OBJ_NULL 0 +#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096 +#define SMCINVOKE_ARGS_ALIGN_SIZE (sizeof(uint64_t)) +#define SMCINVOKE_NEXT_AVAILABLE_TXN 0 +#define SMCINVOKE_REQ_PLACED 1 +#define SMCINVOKE_REQ_PROCESSING 2 +#define SMCINVOKE_REQ_PROCESSED 3 +#define SMCINVOKE_INCREMENT 1 +#define SMCINVOKE_DECREMENT 0 +#define SMCINVOKE_OBJ_TYPE_TZ_OBJ 0 +#define SMCINVOKE_OBJ_TYPE_SERVER 1 +#define SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL 2 +#define SMCINVOKE_MEM_MAP_OBJ 0 +#define SMCINVOKE_MEM_RGN_OBJ 1 +#define SMCINVOKE_MEM_PERM_RW 6 +#define SMCINVOKE_SCM_EBUSY_WAIT_MS 30 +#define SMCINVOKE_SCM_EBUSY_MAX_RETRY 67 + + +/* TZ defined values - Start */ +#define SMCINVOKE_INVOKE_PARAM_ID 0x224 +#define SMCINVOKE_CB_RSP_PARAM_ID 0x22 +#define SMCINVOKE_INVOKE_CMD_LEGACY 0x32000600 +#define SMCINVOKE_INVOKE_CMD 0x32000602 +#define SMCINVOKE_CB_RSP_CMD 0x32000601 +#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3 +/* TZ defined values - End */ + +/* + * This is the state when server FD has been closed but + * TZ still has refs of CBOBjs served by this server + */ +#define SMCINVOKE_SERVER_STATE_DEFUNCT 1 + +#define CBOBJ_MAX_RETRIES 5 +#define FOR_ARGS(ndxvar, counts, section) \ + for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \ + ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \ + + OBJECT_COUNTS_NUM_##section(counts)); \ + ++ndxvar) + +#define TZCB_BUF_OFFSET(tzcb_req) (sizeof(tzcb_req->result) + \ + sizeof(struct smcinvoke_msg_hdr) + \ + sizeof(union smcinvoke_tz_args) * \ + OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts)) + +/* + * +ve uhandle : either remote obj or mem obj, decided by f_ops + * -ve uhandle : either Obj NULL or CBObj + * - -1: OBJ NULL + * - < -1: CBObj + */ +#define UHANDLE_IS_FD(h) ((h) >= 0) +#define UHANDLE_IS_NULL(h) ((h) == SMCINVOKE_USERSPACE_OBJ_NULL) +#define UHANDLE_IS_CB_OBJ(h) (h < SMCINVOKE_USERSPACE_OBJ_NULL) +#define UHANDLE_NULL (SMCINVOKE_USERSPACE_OBJ_NULL) +/* + * MAKE => create handle for other domain i.e. TZ or userspace + * GET => retrieve obj from incoming handle + */ +#define UHANDLE_GET_CB_OBJ(h) (-2-(h)) +#define UHANDLE_MAKE_CB_OBJ(o) (-2-(o)) +#define UHANDLE_GET_FD(h) (h) + +/* + * +ve tzhandle : remote object i.e. owned by TZ + * -ve tzhandle : local object i.e. owned by linux + * -------------------------------------------------- + *| 1 (1 bit) | Obj Id (15 bits) | srvr id (16 bits) | + * --------------------------------------------------- + * Server ids are defined below for various local objects + * server id 0 : Kernel Obj + * server id 1 : Memory region Obj + * server id 2 : Memory map Obj + * server id 3-15: Reserverd + * server id 16 & up: Callback Objs + */ +#define KRNL_SRVR_ID 0 +#define MEM_RGN_SRVR_ID 1 +#define MEM_MAP_SRVR_ID 2 +#define CBOBJ_SERVER_ID_START 0x10 +#define CBOBJ_SERVER_ID_END ((1<<16) - 1) +/* local obj id is represented by 15 bits */ +#define MAX_LOCAL_OBJ_ID ((1<<15) - 1) +/* CBOBJs will be served by server id 0x10 onwards */ +#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF)) +#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF) +#define TZHANDLE_MAKE_LOCAL(s, o) (((0x8000 | (o)) << 16) | s) + +#define TZHANDLE_IS_NULL(h) ((h) == SMCINVOKE_TZ_OBJ_NULL) +#define TZHANDLE_IS_LOCAL(h) ((h) & 0x80000000) +#define TZHANDLE_IS_REMOTE(h) (!TZHANDLE_IS_NULL(h) && !TZHANDLE_IS_LOCAL(h)) + +#define TZHANDLE_IS_KERNEL_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == KRNL_SRVR_ID) +#define TZHANDLE_IS_MEM_RGN_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == MEM_RGN_SRVR_ID) +#define TZHANDLE_IS_MEM_MAP_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) == MEM_MAP_SRVR_ID) +#define TZHANDLE_IS_MEM_OBJ(h) (TZHANDLE_IS_MEM_RGN_OBJ(h) || \ + TZHANDLE_IS_MEM_MAP_OBJ(h)) +#define TZHANDLE_IS_CB_OBJ(h) (TZHANDLE_IS_LOCAL(h) && \ + TZHANDLE_GET_SERVER(h) >= CBOBJ_SERVER_ID_START) + +#define FILE_IS_REMOTE_OBJ(f) ((f)->f_op && (f)->f_op == &g_smcinvoke_fops) + +static DEFINE_MUTEX(g_smcinvoke_lock); +#define NO_LOCK 0 +#define TAKE_LOCK 1 +#define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); } +#define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); } +static DEFINE_HASHTABLE(g_cb_servers, 8); +static LIST_HEAD(g_mem_objs); +static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START; +static uint16_t g_last_mem_rgn_id, g_last_mem_map_obj_id; +static size_t g_max_cb_buf_size = SMCINVOKE_TZ_MIN_BUF_SIZE; +static unsigned int cb_reqs_inflight; +static bool legacy_smc_call; +static int invoke_cmd; + +static long smcinvoke_ioctl(struct file *, unsigned int, unsigned long); +static int smcinvoke_open(struct inode *, struct file *); +static int smcinvoke_release(struct inode *, struct file *); +static int release_cb_server(uint16_t); + +static const struct file_operations g_smcinvoke_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = smcinvoke_ioctl, + .compat_ioctl = smcinvoke_ioctl, + .open = smcinvoke_open, + .release = smcinvoke_release, +}; + +static dev_t smcinvoke_device_no; +static struct cdev smcinvoke_cdev; +static struct class *driver_class; +static struct device *class_dev; +static struct platform_device *smcinvoke_pdev; + +struct smcinvoke_buf_hdr { + uint32_t offset; + uint32_t size; +}; + +union smcinvoke_tz_args { + struct smcinvoke_buf_hdr b; + int32_t handle; +}; + +struct smcinvoke_msg_hdr { + uint32_t tzhandle; + uint32_t op; + uint32_t counts; +}; + +/* Inbound reqs from TZ */ +struct smcinvoke_tzcb_req { + int32_t result; + struct smcinvoke_msg_hdr hdr; + union smcinvoke_tz_args args[0]; +}; + +struct smcinvoke_file_data { + uint32_t context_type; + union { + uint32_t tzhandle; + uint16_t server_id; + }; +}; + +struct smcinvoke_piggyback_msg { + uint32_t version; + uint32_t op; + uint32_t counts; + int32_t objs[0]; +}; + +/* Data structure to hold request coming from TZ */ +struct smcinvoke_cb_txn { + uint32_t txn_id; + int32_t state; + struct smcinvoke_tzcb_req *cb_req; + size_t cb_req_bytes; + struct file **filp_to_release; + struct hlist_node hash; + struct kref ref_cnt; +}; + +struct smcinvoke_server_info { + uint16_t server_id; + uint16_t state; + uint32_t txn_id; + struct kref ref_cnt; + wait_queue_head_t req_wait_q; + wait_queue_head_t rsp_wait_q; + size_t cb_buf_size; + DECLARE_HASHTABLE(reqs_table, 4); + DECLARE_HASHTABLE(responses_table, 4); + struct hlist_node hash; + struct list_head pending_cbobjs; +}; + +struct smcinvoke_cbobj { + uint16_t cbobj_id; + struct kref ref_cnt; + struct smcinvoke_server_info *server; + struct list_head list; +}; + +/* + * We require couple of objects, one for mem region & another + * for mapped mem_obj once mem region has been mapped. It is + * possible that TZ can release either independent of other. + */ +struct smcinvoke_mem_obj { + /* these ids are objid part of tzhandle */ + uint16_t mem_region_id; + uint16_t mem_map_obj_id; + struct dma_buf *dma_buf; + struct dma_buf_attachment *buf_attach; + struct sg_table *sgt; + struct kref mem_regn_ref_cnt; + struct kref mem_map_obj_ref_cnt; + uint64_t p_addr; + size_t p_addr_len; + struct list_head list; + bool bridge_created_by_others; + uint64_t shmbridge_handle; +}; + +static void destroy_cb_server(struct kref *kref) +{ + struct smcinvoke_server_info *server = container_of(kref, + struct smcinvoke_server_info, ref_cnt); + if (server) { + hash_del(&server->hash); + kfree(server); + } +} + +/* + * A separate find func is reqd mainly for couple of cases: + * next_cb_server_id_locked which checks if server id had been utilized or not. + * - It would be overhead if we do ref_cnt for this case + * smcinvoke_release: which is called when server is closed from userspace. + * - During server creation we init ref count, now put it back + */ +static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id) +{ + struct smcinvoke_server_info *data = NULL; + + hash_for_each_possible(g_cb_servers, data, hash, server_id) { + if (data->server_id == server_id) + return data; + } + return NULL; +} + +static struct smcinvoke_server_info *get_cb_server_locked(uint16_t server_id) +{ + struct smcinvoke_server_info *server = find_cb_server_locked(server_id); + + if (server) + kref_get(&server->ref_cnt); + + return server; +} + +static uint16_t next_cb_server_id_locked(void) +{ + if (g_last_cb_server_id == CBOBJ_SERVER_ID_END) + g_last_cb_server_id = CBOBJ_SERVER_ID_START; + + while (find_cb_server_locked(++g_last_cb_server_id)) + ; + + return g_last_cb_server_id; +} + +static inline void release_filp(struct file **filp_to_release, size_t arr_len) +{ + size_t i = 0; + + for (i = 0; i < arr_len; i++) { + if (filp_to_release[i]) { + fput(filp_to_release[i]); + filp_to_release[i] = NULL; + } + } +} + +static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id, + bool is_mem_rgn_obj) +{ + struct smcinvoke_mem_obj *mem_obj = NULL; + + if (list_empty(&g_mem_objs)) + return NULL; + + list_for_each_entry(mem_obj, &g_mem_objs, list) { + if ((is_mem_rgn_obj && + (mem_obj->mem_region_id == mem_obj_id)) || + (!is_mem_rgn_obj && + (mem_obj->mem_map_obj_id == mem_obj_id))) + return mem_obj; + } + return NULL; +} + +static uint32_t next_mem_region_obj_id_locked(void) +{ + if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID) + g_last_mem_rgn_id = 0; + + while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ)) + ; + + return g_last_mem_rgn_id; +} + +static uint32_t next_mem_map_obj_id_locked(void) +{ + if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID) + g_last_mem_map_obj_id = 0; + + while (find_mem_obj_locked(++g_last_mem_map_obj_id, + SMCINVOKE_MEM_MAP_OBJ)) + ; + + return g_last_mem_map_obj_id; +} + +static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) +{ + list_del(&mem_obj->list); + dma_buf_put(mem_obj->dma_buf); + if (!mem_obj->bridge_created_by_others) + qtee_shmbridge_deregister(mem_obj->shmbridge_handle); + kfree(mem_obj); +} + +static void del_mem_regn_obj_locked(struct kref *kref) +{ + struct smcinvoke_mem_obj *mem_obj = container_of(kref, + struct smcinvoke_mem_obj, mem_regn_ref_cnt); + + /* + * mem_regn obj and mem_map obj are held into mem_obj structure which + * can't be released until both kinds of objs have been released. + * So check whether mem_map iobj has ref 0 and only then release mem_obj + */ + if (kref_read(&mem_obj->mem_map_obj_ref_cnt) == 0) + free_mem_obj_locked(mem_obj); +} + +static void del_mem_map_obj_locked(struct kref *kref) +{ + struct smcinvoke_mem_obj *mem_obj = container_of(kref, + struct smcinvoke_mem_obj, mem_map_obj_ref_cnt); + + mem_obj->p_addr_len = 0; + mem_obj->p_addr = 0; + if (mem_obj->sgt) + dma_buf_unmap_attachment(mem_obj->buf_attach, + mem_obj->sgt, DMA_BIDIRECTIONAL); + if (mem_obj->buf_attach) + dma_buf_detach(mem_obj->dma_buf, mem_obj->buf_attach); + + /* + * mem_regn obj and mem_map obj are held into mem_obj structure which + * can't be released until both kinds of objs have been released. + * So check if mem_regn obj has ref 0 and only then release mem_obj + */ + if (kref_read(&mem_obj->mem_regn_ref_cnt) == 0) + free_mem_obj_locked(mem_obj); +} + +static int release_mem_obj_locked(int32_t tzhandle) +{ + int is_mem_regn_obj = TZHANDLE_IS_MEM_RGN_OBJ(tzhandle); + struct smcinvoke_mem_obj *mem_obj = find_mem_obj_locked( + TZHANDLE_GET_OBJID(tzhandle), is_mem_regn_obj); + + if (!mem_obj) { + pr_err("memory object not found\n"); + return OBJECT_ERROR_BADOBJ; + } + + if (is_mem_regn_obj) + kref_put(&mem_obj->mem_regn_ref_cnt, del_mem_regn_obj_locked); + else + kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); + + return OBJECT_OK; +} + +static void free_pending_cbobj_locked(struct kref *kref) +{ + struct smcinvoke_server_info *server = NULL; + struct smcinvoke_cbobj *obj = container_of(kref, + struct smcinvoke_cbobj, ref_cnt); + list_del(&obj->list); + server = obj->server; + kfree(obj); + if (server) + kref_put(&server->ref_cnt, destroy_cb_server); +} + +static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id) +{ + int ret = 0; + bool release_server = true; + struct list_head *head = NULL; + struct smcinvoke_cbobj *cbobj = NULL; + struct smcinvoke_cbobj *obj = NULL; + struct smcinvoke_server_info *server = get_cb_server_locked(srvr_id); + + if (!server) { + pr_err("%s, server id : %u not found\n", __func__, srvr_id); + return OBJECT_ERROR_BADOBJ; + } + + head = &server->pending_cbobjs; + list_for_each_entry(cbobj, head, list) + if (cbobj->cbobj_id == obj_id) { + kref_get(&cbobj->ref_cnt); + goto out; + } + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) { + ret = OBJECT_ERROR_KMEM; + goto out; + } + + obj->cbobj_id = obj_id; + kref_init(&obj->ref_cnt); + obj->server = server; + /* + * we are holding server ref in cbobj; we will + * release server ref when cbobj is destroyed + */ + release_server = false; + list_add_tail(&obj->list, head); +out: + if (release_server) + kref_put(&server->ref_cnt, destroy_cb_server); + return ret; +} + +static int put_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id) +{ + int ret = -EINVAL; + struct smcinvoke_server_info *srvr_info = + get_cb_server_locked(srvr_id); + struct list_head *head = NULL; + struct smcinvoke_cbobj *cbobj = NULL; + + if (!srvr_info) { + pr_err("%s, server id : %u not found\n", __func__, srvr_id); + return ret; + } + + trace_put_pending_cbobj_locked(srvr_id, obj_id); + + head = &srvr_info->pending_cbobjs; + list_for_each_entry(cbobj, head, list) + if (cbobj->cbobj_id == obj_id) { + kref_put(&cbobj->ref_cnt, free_pending_cbobj_locked); + ret = 0; + break; + } + kref_put(&srvr_info->ref_cnt, destroy_cb_server); + return ret; +} + +static int release_tzhandle_locked(int32_t tzhandle) +{ + if (TZHANDLE_IS_MEM_OBJ(tzhandle)) + return release_mem_obj_locked(tzhandle); + else if (TZHANDLE_IS_CB_OBJ(tzhandle)) + return put_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle), + TZHANDLE_GET_OBJID(tzhandle)); + return OBJECT_ERROR; +} + +static void release_tzhandles(const int32_t *tzhandles, size_t len) +{ + size_t i; + + mutex_lock(&g_smcinvoke_lock); + for (i = 0; i < len; i++) + release_tzhandle_locked(tzhandles[i]); + mutex_unlock(&g_smcinvoke_lock); +} + +static void delete_cb_txn(struct kref *kref) +{ + struct smcinvoke_cb_txn *cb_txn = container_of(kref, + struct smcinvoke_cb_txn, ref_cnt); + + if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE) + release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle); + + kfree(cb_txn->cb_req); + hash_del(&cb_txn->hash); + kfree(cb_txn); +} + +static struct smcinvoke_cb_txn *find_cbtxn_locked( + struct smcinvoke_server_info *server, + uint32_t txn_id, int32_t state) +{ + int i = 0; + struct smcinvoke_cb_txn *cb_txn = NULL; + + /* + * Since HASH_BITS() does not work on pointers, we can't select hash + * table using state and loop over it. + */ + if (state == SMCINVOKE_REQ_PLACED) { + /* pick up 1st req */ + hash_for_each(server->reqs_table, i, cb_txn, hash) { + kref_get(&cb_txn->ref_cnt); + hash_del(&cb_txn->hash); + return cb_txn; + } + } else if (state == SMCINVOKE_REQ_PROCESSING) { + hash_for_each_possible( + server->responses_table, cb_txn, hash, txn_id) { + if (cb_txn->txn_id == txn_id) { + kref_get(&cb_txn->ref_cnt); + hash_del(&cb_txn->hash); + return cb_txn; + } + } + } + return NULL; +} + +/* + * size_add saturates at SIZE_MAX. If integer overflow is detected, + * this function would return SIZE_MAX otherwise normal a+b is returned. + */ +static inline size_t size_add(size_t a, size_t b) +{ + return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b; +} + +/* + * pad_size is used along with size_align to define a buffer overflow + * protected version of ALIGN + */ +static inline size_t pad_size(size_t a, size_t b) +{ + return (~a + 1) % b; +} + +/* + * size_align saturates at SIZE_MAX. If integer overflow is detected, this + * function would return SIZE_MAX otherwise next aligned size is returned. + */ +static inline size_t size_align(size_t a, size_t b) +{ + return size_add(a, pad_size(a, b)); +} + +static uint16_t get_server_id(int cb_server_fd) +{ + uint16_t server_id = 0; + struct smcinvoke_file_data *svr_cxt = NULL; + struct file *tmp_filp = fget(cb_server_fd); + + if (!tmp_filp) + return server_id; + + svr_cxt = tmp_filp->private_data; + if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER) + server_id = svr_cxt->server_id; + + if (tmp_filp) + fput(tmp_filp); + + return server_id; +} + +static bool is_dma_fd(int32_t uhandle, struct dma_buf **dma_buf) +{ + *dma_buf = dma_buf_get(uhandle); + return IS_ERR_OR_NULL(*dma_buf) ? false : true; +} + +static bool is_remote_obj(int32_t uhandle, struct smcinvoke_file_data **tzobj, + struct file **filp) +{ + bool ret = false; + struct file *tmp_filp = fget(uhandle); + + if (!tmp_filp) + return ret; + + if (FILE_IS_REMOTE_OBJ(tmp_filp)) { + *tzobj = tmp_filp->private_data; + if ((*tzobj)->context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + *filp = tmp_filp; + tmp_filp = NULL; + ret = true; + } + } + + if (tmp_filp) + fput(tmp_filp); + return ret; +} + +static int create_mem_obj(struct dma_buf *dma_buf, int32_t *mem_obj) +{ + struct smcinvoke_mem_obj *t_mem_obj = + kzalloc(sizeof(*t_mem_obj), GFP_KERNEL); + + if (!t_mem_obj) { + dma_buf_put(dma_buf); + return -ENOMEM; + } + + kref_init(&t_mem_obj->mem_regn_ref_cnt); + t_mem_obj->dma_buf = dma_buf; + mutex_lock(&g_smcinvoke_lock); + t_mem_obj->mem_region_id = next_mem_region_obj_id_locked(); + list_add_tail(&t_mem_obj->list, &g_mem_objs); + mutex_unlock(&g_smcinvoke_lock); + *mem_obj = TZHANDLE_MAKE_LOCAL(MEM_RGN_SRVR_ID, + t_mem_obj->mem_region_id); + return 0; +} + +/* + * This function retrieves file pointer corresponding to FD provided. It stores + * retrieved file pointer until IOCTL call is concluded. Once call is completed, + * all stored file pointers are released. file pointers are stored to prevent + * other threads from releasing that FD while IOCTL is in progress. + */ +static int get_tzhandle_from_uhandle(int32_t uhandle, int32_t server_fd, + struct file **filp, uint32_t *tzhandle) +{ + int ret = -EBADF; + uint16_t server_id = 0; + + if (UHANDLE_IS_NULL(uhandle)) { + *tzhandle = SMCINVOKE_TZ_OBJ_NULL; + ret = 0; + } else if (UHANDLE_IS_CB_OBJ(uhandle)) { + server_id = get_server_id(server_fd); + if (server_id < CBOBJ_SERVER_ID_START) + goto out; + + mutex_lock(&g_smcinvoke_lock); + ret = get_pending_cbobj_locked(server_id, + UHANDLE_GET_CB_OBJ(uhandle)); + mutex_unlock(&g_smcinvoke_lock); + if (ret) + goto out; + *tzhandle = TZHANDLE_MAKE_LOCAL(server_id, + UHANDLE_GET_CB_OBJ(uhandle)); + ret = 0; + } else if (UHANDLE_IS_FD(uhandle)) { + struct dma_buf *dma_buf = NULL; + struct smcinvoke_file_data *tzobj = NULL; + + if (is_dma_fd(UHANDLE_GET_FD(uhandle), &dma_buf)) { + ret = create_mem_obj(dma_buf, tzhandle); + } else if (is_remote_obj(UHANDLE_GET_FD(uhandle), + &tzobj, filp)) { + *tzhandle = tzobj->tzhandle; + ret = 0; + } + } +out: + return ret; +} + +static int get_fd_for_obj(uint32_t obj_type, uint32_t obj, int32_t *fd) +{ + int unused_fd = -1, ret = -EINVAL; + struct file *f = NULL; + struct smcinvoke_file_data *cxt = NULL; + + cxt = kzalloc(sizeof(*cxt), GFP_KERNEL); + if (!cxt) { + ret = -ENOMEM; + goto out; + } + if (obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ || + obj_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) { + cxt->context_type = obj_type; + cxt->tzhandle = obj; + } else if (obj_type == SMCINVOKE_OBJ_TYPE_SERVER) { + cxt->context_type = SMCINVOKE_OBJ_TYPE_SERVER; + cxt->server_id = obj; + } else { + goto out; + } + + unused_fd = get_unused_fd_flags(O_RDWR); + if (unused_fd < 0) + goto out; + + if (fd == NULL) + goto out; + + f = anon_inode_getfile(SMCINVOKE_DEV, &g_smcinvoke_fops, cxt, O_RDWR); + if (IS_ERR(f)) + goto out; + + *fd = unused_fd; + fd_install(*fd, f); + return 0; +out: + if (unused_fd >= 0) + put_unused_fd(unused_fd); + kfree(cxt); + + return ret; +} + +static int get_uhandle_from_tzhandle(int32_t tzhandle, int32_t srvr_id, + int32_t *uhandle, bool lock, uint32_t context_type) +{ + int ret = -1; + + if (TZHANDLE_IS_NULL(tzhandle)) { + *uhandle = UHANDLE_NULL; + ret = 0; + } else if (TZHANDLE_IS_CB_OBJ(tzhandle)) { + if (srvr_id != TZHANDLE_GET_SERVER(tzhandle)) + goto out; + *uhandle = UHANDLE_MAKE_CB_OBJ(TZHANDLE_GET_OBJID(tzhandle)); + MUTEX_LOCK(lock) + ret = get_pending_cbobj_locked(TZHANDLE_GET_SERVER(tzhandle), + TZHANDLE_GET_OBJID(tzhandle)); + MUTEX_UNLOCK(lock) + } else if (TZHANDLE_IS_MEM_RGN_OBJ(tzhandle)) { + struct smcinvoke_mem_obj *mem_obj = NULL; + + MUTEX_LOCK(lock) + mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(tzhandle), + SMCINVOKE_MEM_RGN_OBJ); + + if (mem_obj != NULL) { + int fd; + + fd = dma_buf_fd(mem_obj->dma_buf, O_CLOEXEC); + + if (fd < 0) + goto exit_lock; + *uhandle = fd; + ret = 0; + } +exit_lock: + MUTEX_UNLOCK(lock) + } else if (TZHANDLE_IS_REMOTE(tzhandle)) { + /* if execution comes here => tzhandle is an unsigned int */ + ret = get_fd_for_obj(context_type, + (uint32_t)tzhandle, uhandle); + } +out: + return ret; +} + +static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj) +{ + int ret = 0; + int tz_perm = PERM_READ|PERM_WRITE; + uint32_t *vmid_list; + uint32_t *perms_list; + uint32_t nelems = 0; + struct dma_buf *dmabuf = mem_obj->dma_buf; + phys_addr_t phys = mem_obj->p_addr; + size_t size = mem_obj->p_addr_len; + + if (!qtee_shmbridge_is_enabled()) + return 0; + + ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list, + (int **)&perms_list, (int *)&nelems); + if (ret) { + pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret); + return ret; + } + + if (mem_buf_dma_buf_exclusive_owner(dmabuf)) + perms_list[0] = PERM_READ | PERM_WRITE; + + ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems, + tz_perm, &mem_obj->shmbridge_handle); + + if (ret && ret != -EEXIST) { + pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n", + mem_obj->mem_region_id, ret); + goto exit; + } + + if (ret == -EEXIST) { + mem_obj->bridge_created_by_others = true; + ret = 0; + } + + trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id); +exit: + kfree(perms_list); + kfree(vmid_list); + return ret; +} + +static int32_t smcinvoke_release_mem_obj_locked(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *msg = buf; + + if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 0, 0, 0)) { + pr_err("Invalid object count in %s\n", __func__); + return OBJECT_ERROR_INVALID; + } + + trace_release_mem_obj_locked(msg->hdr.tzhandle, buf_len); + + return release_tzhandle_locked(msg->hdr.tzhandle); +} + +static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len) +{ + int ret = OBJECT_OK; + struct smcinvoke_tzcb_req *msg = buf; + struct { + uint64_t p_addr; + uint64_t len; + uint32_t perms; + } *ob = NULL; + int32_t *oo = NULL; + struct smcinvoke_mem_obj *mem_obj = NULL; + struct dma_buf_attachment *buf_attach = NULL; + struct sg_table *sgt = NULL; + + if (msg->hdr.counts != OBJECT_COUNTS_PACK(0, 1, 1, 1) || + (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) { + pr_err("Invalid counts received for mapping mem obj\n"); + return OBJECT_ERROR_INVALID; + } + /* args[0] = BO, args[1] = OI, args[2] = OO */ + ob = buf + msg->args[0].b.offset; + oo = &msg->args[2].handle; + + mutex_lock(&g_smcinvoke_lock); + mem_obj = find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle), + SMCINVOKE_MEM_RGN_OBJ); + if (!mem_obj) { + mutex_unlock(&g_smcinvoke_lock); + pr_err("Memory object not found\n"); + return OBJECT_ERROR_BADOBJ; + } + + if (!mem_obj->p_addr) { + kref_init(&mem_obj->mem_map_obj_ref_cnt); + buf_attach = dma_buf_attach(mem_obj->dma_buf, + &smcinvoke_pdev->dev); + if (IS_ERR(buf_attach)) { + ret = OBJECT_ERROR_KMEM; + pr_err("dma buf attach failed, ret: %d\n", ret); + goto out; + } + mem_obj->buf_attach = buf_attach; + + sgt = dma_buf_map_attachment(buf_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + pr_err("mapping dma buffers failed, ret: %d\n", + PTR_ERR(sgt)); + ret = OBJECT_ERROR_KMEM; + goto out; + } + mem_obj->sgt = sgt; + + /* contiguous only => nents=1 */ + if (sgt->nents != 1) { + ret = OBJECT_ERROR_INVALID; + pr_err("sg enries are not contigous, ret: %d\n", ret); + goto out; + } + mem_obj->p_addr = sg_dma_address(sgt->sgl); + mem_obj->p_addr_len = sgt->sgl->length; + if (!mem_obj->p_addr) { + ret = OBJECT_ERROR_INVALID; + pr_err("invalid physical address, ret: %d\n", ret); + goto out; + } + ret = smcinvoke_create_bridge(mem_obj); + if (ret) { + ret = OBJECT_ERROR_INVALID; + goto out; + } + mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked(); + } else { + kref_get(&mem_obj->mem_map_obj_ref_cnt); + } + ob->p_addr = mem_obj->p_addr; + ob->len = mem_obj->p_addr_len; + ob->perms = SMCINVOKE_MEM_PERM_RW; + *oo = TZHANDLE_MAKE_LOCAL(MEM_MAP_SRVR_ID, mem_obj->mem_map_obj_id); +out: + if (ret != OBJECT_OK) + kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); + mutex_unlock(&g_smcinvoke_lock); + return ret; +} + +static void process_kernel_obj(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *cb_req = buf; + + switch (cb_req->hdr.op) { + case OBJECT_OP_MAP_REGION: + cb_req->result = smcinvoke_map_mem_region(buf, buf_len); + break; + case OBJECT_OP_YIELD: + cb_req->result = OBJECT_OK; + break; + default: + pr_err(" invalid operation for tz kernel object\n"); + cb_req->result = OBJECT_ERROR_INVALID; + break; + } +} + +static void process_mem_obj(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *cb_req = buf; + + mutex_lock(&g_smcinvoke_lock); + cb_req->result = (cb_req->hdr.op == OBJECT_OP_RELEASE) ? + smcinvoke_release_mem_obj_locked(buf, buf_len) : + OBJECT_ERROR_INVALID; + mutex_unlock(&g_smcinvoke_lock); +} + +static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len, + uint8_t *out_buf, phys_addr_t out_paddr, + size_t out_buf_len, int32_t *result, u64 *response_type, + unsigned int *data, struct qtee_shm *in_shm, + struct qtee_shm *out_shm) +{ + int ret = 0; + + switch (cmd) { + case SMCINVOKE_INVOKE_CMD_LEGACY: + qtee_shmbridge_flush_shm_buf(in_shm); + qtee_shmbridge_flush_shm_buf(out_shm); + ret = qcom_scm_invoke_smc_legacy(in_paddr, in_buf_len, out_paddr, out_buf_len, + result, response_type, data); + qtee_shmbridge_inv_shm_buf(in_shm); + qtee_shmbridge_inv_shm_buf(out_shm); + break; + + case SMCINVOKE_INVOKE_CMD: + ret = qcom_scm_invoke_smc(in_paddr, in_buf_len, out_paddr, out_buf_len, + result, response_type, data); + break; + + case SMCINVOKE_CB_RSP_CMD: + ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len, + result, response_type, data); + break; + + default: + ret = -EINVAL; + break; + } + + trace_invoke_cmd_handler(cmd, *response_type, *result, ret); + return ret; +} +/* + * Buf should be aligned to struct smcinvoke_tzcb_req + */ +static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp) +{ + /* ret is going to TZ. Provide values from OBJECT_ERROR_<> */ + int ret = OBJECT_ERROR_DEFUNCT; + int cbobj_retries = 0; + long timeout_jiff; + struct smcinvoke_cb_txn *cb_txn = NULL; + struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL; + struct smcinvoke_server_info *srvr_info = NULL; + + if (buf_len < sizeof(struct smcinvoke_tzcb_req)) { + pr_err("smaller buffer length : %u\n", buf_len); + return; + } + + cb_req = buf; + + /* check whether it is to be served by kernel or userspace */ + if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) { + return process_kernel_obj(buf, buf_len); + } else if (TZHANDLE_IS_MEM_OBJ(cb_req->hdr.tzhandle)) { + return process_mem_obj(buf, buf_len); + } else if (!TZHANDLE_IS_CB_OBJ(cb_req->hdr.tzhandle)) { + pr_err("Request object is not a callback object\n"); + cb_req->result = OBJECT_ERROR_INVALID; + return; + } + + /* + * We need a copy of req that could be sent to server. Otherwise, if + * someone kills invoke caller, buf would go away and server would be + * working on already freed buffer, causing a device crash. + */ + tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL); + if (!tmp_cb_req) { + /* we need to return error to caller so fill up result */ + cb_req->result = OBJECT_ERROR_KMEM; + pr_err("failed to create copy of request, set result: %d\n", + cb_req->result); + return; + } + + cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL); + if (!cb_txn) { + cb_req->result = OBJECT_ERROR_KMEM; + pr_err("failed to allocate memory for request, result: %d\n", + cb_req->result); + kfree(tmp_cb_req); + return; + } + /* no need for memcpy as we did kmemdup() above */ + cb_req = tmp_cb_req; + + trace_process_tzcb_req_handle(cb_req->hdr.tzhandle, cb_req->hdr.op, cb_req->hdr.counts); + + cb_txn->state = SMCINVOKE_REQ_PLACED; + cb_txn->cb_req = cb_req; + cb_txn->cb_req_bytes = buf_len; + cb_txn->filp_to_release = arr_filp; + kref_init(&cb_txn->ref_cnt); + + mutex_lock(&g_smcinvoke_lock); + ++cb_reqs_inflight; + srvr_info = get_cb_server_locked( + TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle)); + if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) { + /* ret equals Object_ERROR_DEFUNCT, at this point go to out */ + if (!srvr_info) + pr_err("server is invalid\n"); + else { + pr_err("server is defunct, state= %d tzhandle = %d\n", + srvr_info->state, cb_req->hdr.tzhandle); + } + mutex_unlock(&g_smcinvoke_lock); + goto out; + } + + cb_txn->txn_id = ++srvr_info->txn_id; + hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id); + mutex_unlock(&g_smcinvoke_lock); + + trace_process_tzcb_req_wait(cb_req->hdr.tzhandle, cbobj_retries, cb_txn->txn_id, + current->pid, current->tgid, srvr_info->state, srvr_info->server_id, + cb_reqs_inflight); + /* + * we need not worry that server_info will be deleted because as long + * as this CBObj is served by this server, srvr_info will be valid. + */ + wake_up_interruptible_all(&srvr_info->req_wait_q); + /* timeout before 1s otherwise tzbusy would come */ + timeout_jiff = msecs_to_jiffies(1000); + + while (cbobj_retries < CBOBJ_MAX_RETRIES) { + ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q, + (cb_txn->state == SMCINVOKE_REQ_PROCESSED) || + (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT), + timeout_jiff); + + if (ret == 0) { + pr_err("CBobj timed out cb-tzhandle:%d, retry:%d, op:%d counts :%d\n", + cb_req->hdr.tzhandle, cbobj_retries, + cb_req->hdr.op, cb_req->hdr.counts); + pr_err("CBobj %d timedout pid %x,tid %x, srvr state=%d, srvr id:%u\n", + cb_req->hdr.tzhandle, current->pid, + current->tgid, srvr_info->state, + srvr_info->server_id); + } else { + break; + } + cbobj_retries++; + } + +out: + /* + * we could be here because of either: + * a. Req is PROCESSED + * b. Server was killed + * c. Invoke thread is killed + * sometime invoke thread and server are part of same process. + */ + mutex_lock(&g_smcinvoke_lock); + hash_del(&cb_txn->hash); + if (ret == 0) { + pr_err("CBObj timed out! No more retries\n"); + cb_req->result = Object_ERROR_TIMEOUT; + } else if (ret == -ERESTARTSYS) { + pr_err("wait event interruped, ret: %d\n", ret); + cb_req->result = OBJECT_ERROR_ABORT; + } else { + if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) { + /* + * it is possible that server was killed immediately + * after CB Req was processed but who cares now! + */ + } else if (!srvr_info || + srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) { + cb_req->result = OBJECT_ERROR_DEFUNCT; + pr_err("server invalid, res: %d\n", cb_req->result); + } else { + pr_err("%s: unexpected event happened, ret:%d\n", __func__, ret); + cb_req->result = OBJECT_ERROR_ABORT; + } + } + --cb_reqs_inflight; + + trace_process_tzcb_req_result(cb_req->result, cb_req->hdr.tzhandle, cb_req->hdr.op, + cb_req->hdr.counts, cb_reqs_inflight); + + memcpy(buf, cb_req, buf_len); + kref_put(&cb_txn->ref_cnt, delete_cb_txn); + if (srvr_info) + kref_put(&srvr_info->ref_cnt, destroy_cb_server); + mutex_unlock(&g_smcinvoke_lock); +} + +static int marshal_out_invoke_req(const uint8_t *buf, uint32_t buf_size, + struct smcinvoke_cmd_req *req, + union smcinvoke_arg *args_buf, + uint32_t context_type) +{ + int ret = -EINVAL, i = 0; + int32_t temp_fd = UHANDLE_NULL; + union smcinvoke_tz_args *tz_args = NULL; + size_t offset = sizeof(struct smcinvoke_msg_hdr) + + OBJECT_COUNTS_TOTAL(req->counts) * + sizeof(union smcinvoke_tz_args); + + if (offset > buf_size) + goto out; + + tz_args = (union smcinvoke_tz_args *) + (buf + sizeof(struct smcinvoke_msg_hdr)); + + tz_args += OBJECT_COUNTS_NUM_BI(req->counts); + + if (args_buf == NULL) + return 0; + + FOR_ARGS(i, req->counts, BO) { + args_buf[i].b.size = tz_args->b.size; + if ((buf_size - tz_args->b.offset < tz_args->b.size) || + tz_args->b.offset > buf_size) { + pr_err("%s: buffer overflow detected\n", __func__); + goto out; + } + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + if (copy_to_user((void __user *) + (uintptr_t)(args_buf[i].b.addr), + (uint8_t *)(buf) + tz_args->b.offset, + tz_args->b.size)) { + pr_err("Error %d copying ctxt to user\n", ret); + goto out; + } + } else { + memcpy((uint8_t *)(args_buf[i].b.addr), + (uint8_t *)(buf) + tz_args->b.offset, + tz_args->b.size); + } + tz_args++; + } + tz_args += OBJECT_COUNTS_NUM_OI(req->counts); + + FOR_ARGS(i, req->counts, OO) { + /* + * create a new FD and assign to output object's context. + * We are passing cb_server_fd from output param in case OO + * is a CBObj. For CBObj, we have to ensure that it is sent + * to server who serves it and that info comes from USpace. + */ + temp_fd = UHANDLE_NULL; + + ret = get_uhandle_from_tzhandle(tz_args->handle, + TZHANDLE_GET_SERVER(tz_args->handle), + &temp_fd, NO_LOCK, context_type); + + args_buf[i].o.fd = temp_fd; + + if (ret) + goto out; + + trace_marshal_out_invoke_req(i, tz_args->handle, + TZHANDLE_GET_SERVER(tz_args->handle), temp_fd); + + tz_args++; + } + ret = 0; +out: + return ret; +} + +static bool is_inbound_req(int val) +{ + return (val == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED || + val == QSEOS_RESULT_INCOMPLETE || + val == QSEOS_RESULT_BLOCKED_ON_LISTENER); +} + +static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr, + size_t in_buf_len, + uint8_t *out_buf, phys_addr_t out_paddr, + size_t out_buf_len, + struct smcinvoke_cmd_req *req, + union smcinvoke_arg *args_buf, + bool *tz_acked, uint32_t context_type, + struct qtee_shm *in_shm, struct qtee_shm *out_shm) +{ + int ret = 0, cmd, retry_count = 0; + u64 response_type; + unsigned int data; + struct file *arr_filp[OBJECT_COUNTS_MAX_OO] = {NULL}; + + *tz_acked = false; + /* buf size should be page aligned */ + if ((in_buf_len % PAGE_SIZE) != 0 || (out_buf_len % PAGE_SIZE) != 0) + return -EINVAL; + + cmd = invoke_cmd; + /* + * purpose of lock here is to ensure that any CB obj that may be going + * to user as OO is not released by piggyback message on another invoke + * request. We should not move this lock to process_invoke_req() because + * that will either cause deadlock or prevent any other invoke request + * to come in. We release this lock when either + * a) TZ requires HLOS action to complete ongoing invoke operation + * b) Final response to invoke has been marshalled out + */ + while (1) { + mutex_lock(&g_smcinvoke_lock); + + do { + ret = invoke_cmd_handler(cmd, in_paddr, in_buf_len, out_buf, + out_paddr, out_buf_len, &req->result, + &response_type, &data, in_shm, out_shm); + + if (ret == -EBUSY) { + pr_err("Secure side is busy,will retry after 30 ms\n"); + mutex_unlock(&g_smcinvoke_lock); + msleep(SMCINVOKE_SCM_EBUSY_WAIT_MS); + mutex_lock(&g_smcinvoke_lock); + } + + } while ((ret == -EBUSY) && + (retry_count++ < SMCINVOKE_SCM_EBUSY_MAX_RETRY)); + + if (!ret && !is_inbound_req(response_type)) { + /* dont marshal if Obj returns an error */ + if (!req->result) { + if (args_buf != NULL) + ret = marshal_out_invoke_req(in_buf, + in_buf_len, req, args_buf, + context_type); + } + *tz_acked = true; + } + mutex_unlock(&g_smcinvoke_lock); + + if (cmd == SMCINVOKE_CB_RSP_CMD) + release_filp(arr_filp, OBJECT_COUNTS_MAX_OO); + + if (ret || !is_inbound_req(response_type)) + break; + + /* process listener request */ + if (response_type == QSEOS_RESULT_INCOMPLETE || + response_type == QSEOS_RESULT_BLOCKED_ON_LISTENER) { + ret = qseecom_process_listener_from_smcinvoke( + &req->result, &response_type, &data); + + trace_prepare_send_scm_msg(response_type, req->result); + + if (!req->result && + response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) { + ret = marshal_out_invoke_req(in_buf, + in_buf_len, req, args_buf, + context_type); + } + *tz_acked = true; + } + + /* + * qseecom does not understand smcinvoke's callback object && + * erringly sets ret value as -EINVAL :( We need to handle it. + */ + if (response_type != SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) + break; + + if (response_type == SMCINVOKE_RESULT_INBOUND_REQ_NEEDED) { + trace_status(__func__, "looks like inbnd req reqd"); + process_tzcb_req(out_buf, out_buf_len, arr_filp); + cmd = SMCINVOKE_CB_RSP_CMD; + } + } + return ret; +} +/* + * SMC expects arguments in following format + * --------------------------------------------------------------------------- + * | cxt | op | counts | ptr|size |ptr|size...|ORef|ORef|...| rest of payload | + * --------------------------------------------------------------------------- + * cxt: target, op: operation, counts: total arguments + * offset: offset is from beginning of buffer i.e. cxt + * size: size is 8 bytes aligned value + */ +static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req, + const union smcinvoke_arg *args_buf) +{ + uint32_t i = 0; + + size_t total_size = sizeof(struct smcinvoke_msg_hdr) + + OBJECT_COUNTS_TOTAL(req->counts) * + sizeof(union smcinvoke_tz_args); + + /* Computed total_size should be 8 bytes aligned from start of buf */ + total_size = ALIGN(total_size, SMCINVOKE_ARGS_ALIGN_SIZE); + + /* each buffer has to be 8 bytes aligned */ + while (i < OBJECT_COUNTS_NUM_buffers(req->counts)) + total_size = size_add(total_size, + size_align(args_buf[i++].b.size, + SMCINVOKE_ARGS_ALIGN_SIZE)); + + return PAGE_ALIGN(total_size); +} + +static int marshal_in_invoke_req(const struct smcinvoke_cmd_req *req, + const union smcinvoke_arg *args_buf, uint32_t tzhandle, + uint8_t *buf, size_t buf_size, struct file **arr_filp, + int32_t *tzhandles_to_release, uint32_t context_type) +{ + int ret = -EINVAL, i = 0, j = 0, k = 0; + const struct smcinvoke_msg_hdr msg_hdr = { + tzhandle, req->op, req->counts}; + uint32_t offset = sizeof(struct smcinvoke_msg_hdr) + + sizeof(union smcinvoke_tz_args) * + OBJECT_COUNTS_TOTAL(req->counts); + union smcinvoke_tz_args *tz_args = NULL; + + if (buf_size < offset) + goto out; + + *(struct smcinvoke_msg_hdr *)buf = msg_hdr; + tz_args = (union smcinvoke_tz_args *)(buf + + sizeof(struct smcinvoke_msg_hdr)); + + if (args_buf == NULL) + return 0; + + FOR_ARGS(i, req->counts, BI) { + offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE); + if ((offset > buf_size) || + (args_buf[i].b.size > (buf_size - offset))) + goto out; + + tz_args[i].b.offset = offset; + tz_args[i].b.size = args_buf[i].b.size; + if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) { + if (copy_from_user(buf + offset, + (void __user *)(uintptr_t)(args_buf[i].b.addr), + args_buf[i].b.size)) + goto out; + } else { + memcpy(buf + offset, (void *)(args_buf[i].b.addr), + args_buf[i].b.size); + } + offset += args_buf[i].b.size; + } + FOR_ARGS(i, req->counts, BO) { + offset = size_align(offset, SMCINVOKE_ARGS_ALIGN_SIZE); + if ((offset > buf_size) || + (args_buf[i].b.size > (buf_size - offset))) + goto out; + + tz_args[i].b.offset = offset; + tz_args[i].b.size = args_buf[i].b.size; + offset += args_buf[i].b.size; + } + FOR_ARGS(i, req->counts, OI) { + ret = get_tzhandle_from_uhandle(args_buf[i].o.fd, + args_buf[i].o.cb_server_fd, &arr_filp[j++], + &(tz_args[i].handle)); + if (ret) + goto out; + + trace_marshal_in_invoke_req(i, args_buf[i].o.fd, + args_buf[i].o.cb_server_fd, tz_args[i].handle); + + tzhandles_to_release[k++] = tz_args[i].handle; + } + ret = 0; +out: + return ret; +} + +static int marshal_in_tzcb_req(const struct smcinvoke_cb_txn *cb_txn, + struct smcinvoke_accept *user_req, int srvr_id) +{ + int ret = 0, i = 0; + int32_t temp_fd = UHANDLE_NULL; + union smcinvoke_arg tmp_arg; + struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req; + union smcinvoke_tz_args *tz_args = tzcb_req->args; + size_t tzcb_req_len = cb_txn->cb_req_bytes; + size_t tz_buf_offset = TZCB_BUF_OFFSET(tzcb_req); + size_t user_req_buf_offset = sizeof(union smcinvoke_arg) * + OBJECT_COUNTS_TOTAL(tzcb_req->hdr.counts); + + if (tz_buf_offset > tzcb_req_len) { + ret = -EINVAL; + goto out; + } + + user_req->txn_id = cb_txn->txn_id; + if (get_uhandle_from_tzhandle(tzcb_req->hdr.tzhandle, srvr_id, + &user_req->cbobj_id, TAKE_LOCK, + SMCINVOKE_OBJ_TYPE_TZ_OBJ)) { + ret = -EINVAL; + goto out; + } + user_req->op = tzcb_req->hdr.op; + user_req->counts = tzcb_req->hdr.counts; + user_req->argsize = sizeof(union smcinvoke_arg); + + trace_marshal_in_tzcb_req_handle(tzcb_req->hdr.tzhandle, srvr_id, + user_req->cbobj_id, user_req->op, user_req->counts); + + FOR_ARGS(i, tzcb_req->hdr.counts, BI) { + user_req_buf_offset = size_align(user_req_buf_offset, + SMCINVOKE_ARGS_ALIGN_SIZE); + tmp_arg.b.size = tz_args[i].b.size; + if ((tz_args[i].b.offset > tzcb_req_len) || + (tz_args[i].b.size > tzcb_req_len - tz_args[i].b.offset) || + (user_req_buf_offset > user_req->buf_len) || + (tmp_arg.b.size > + user_req->buf_len - user_req_buf_offset)) { + ret = -EINVAL; + pr_err("%s: buffer overflow detected\n", __func__); + goto out; + } + tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset; + + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg)) || + copy_to_user(u64_to_user_ptr(tmp_arg.b.addr), + (uint8_t *)(tzcb_req) + tz_args[i].b.offset, + tz_args[i].b.size)) { + ret = -EFAULT; + goto out; + } + user_req_buf_offset += tmp_arg.b.size; + } + FOR_ARGS(i, tzcb_req->hdr.counts, BO) { + user_req_buf_offset = size_align(user_req_buf_offset, + SMCINVOKE_ARGS_ALIGN_SIZE); + + tmp_arg.b.size = tz_args[i].b.size; + if ((user_req_buf_offset > user_req->buf_len) || + (tmp_arg.b.size > + user_req->buf_len - user_req_buf_offset)) { + ret = -EINVAL; + pr_err("%s: buffer overflow detected\n", __func__); + goto out; + } + tmp_arg.b.addr = user_req->buf_addr + user_req_buf_offset; + + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg))) { + ret = -EFAULT; + goto out; + } + user_req_buf_offset += tmp_arg.b.size; + } + FOR_ARGS(i, tzcb_req->hdr.counts, OI) { + /* + * create a new FD and assign to output object's + * context + */ + temp_fd = UHANDLE_NULL; + + ret = get_uhandle_from_tzhandle(tz_args[i].handle, srvr_id, + &temp_fd, TAKE_LOCK, SMCINVOKE_OBJ_TYPE_TZ_OBJ); + + tmp_arg.o.fd = temp_fd; + + if (ret) { + ret = -EINVAL; + goto out; + } + if (copy_to_user(u64_to_user_ptr + (user_req->buf_addr + i * sizeof(tmp_arg)), + &tmp_arg, sizeof(tmp_arg))) { + ret = -EFAULT; + goto out; + } + + trace_marshal_in_tzcb_req_fd(i, tz_args[i].handle, srvr_id, temp_fd); + } +out: + return ret; +} + +static int marshal_out_tzcb_req(const struct smcinvoke_accept *user_req, + struct smcinvoke_cb_txn *cb_txn, + struct file **arr_filp) +{ + int ret = -EINVAL, i = 0; + int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0}; + struct smcinvoke_tzcb_req *tzcb_req = cb_txn->cb_req; + union smcinvoke_tz_args *tz_args = tzcb_req->args; + + release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1); + tzcb_req->result = user_req->result; + FOR_ARGS(i, tzcb_req->hdr.counts, BO) { + union smcinvoke_arg tmp_arg; + + if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr( + user_req->buf_addr + i * sizeof(union smcinvoke_arg)), + sizeof(union smcinvoke_arg))) { + ret = -EFAULT; + goto out; + } + if (tmp_arg.b.size > tz_args[i].b.size) + goto out; + if (copy_from_user((uint8_t *)(tzcb_req) + tz_args[i].b.offset, + u64_to_user_ptr(tmp_arg.b.addr), + tmp_arg.b.size)) { + ret = -EFAULT; + goto out; + } + } + + FOR_ARGS(i, tzcb_req->hdr.counts, OO) { + union smcinvoke_arg tmp_arg; + + if (copy_from_user((uint8_t *)&tmp_arg, u64_to_user_ptr( + user_req->buf_addr + i * sizeof(union smcinvoke_arg)), + sizeof(union smcinvoke_arg))) { + ret = -EFAULT; + goto out; + } + ret = get_tzhandle_from_uhandle(tmp_arg.o.fd, + tmp_arg.o.cb_server_fd, &arr_filp[i], + &(tz_args[i].handle)); + if (ret) + goto out; + tzhandles_to_release[i] = tz_args[i].handle; + + trace_marshal_out_tzcb_req(i, tmp_arg.o.fd, + tmp_arg.o.cb_server_fd, tz_args[i].handle); + } + FOR_ARGS(i, tzcb_req->hdr.counts, OI) { + if (TZHANDLE_IS_CB_OBJ(tz_args[i].handle)) + release_tzhandles(&tz_args[i].handle, 1); + } + ret = 0; +out: + if (ret) + release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO); + return ret; +} + +static void process_piggyback_data(void *buf, size_t buf_size) +{ + int i; + struct smcinvoke_tzcb_req req = {0}; + struct smcinvoke_piggyback_msg *msg = buf; + int32_t *objs = msg->objs; + + for (i = 0; i < msg->counts; i++) { + req.hdr.op = msg->op; + req.hdr.counts = 0; /* release op does not require any args */ + req.hdr.tzhandle = objs[i]; + process_tzcb_req(&req, sizeof(struct smcinvoke_tzcb_req), NULL); + /* cbobjs_in_flight will be adjusted during CB processing */ + } +} + + +static long process_ack_local_obj(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + int32_t local_obj = SMCINVOKE_USERSPACE_OBJ_NULL; + struct smcinvoke_file_data *filp_data = filp->private_data; + + if (_IOC_SIZE(cmd) != sizeof(int32_t)) + return -EINVAL; + + ret = copy_from_user(&local_obj, (void __user *)(uintptr_t)arg, + sizeof(int32_t)); + if (ret) + return -EFAULT; + + mutex_lock(&g_smcinvoke_lock); + if (UHANDLE_IS_CB_OBJ(local_obj)) + ret = put_pending_cbobj_locked(filp_data->server_id, + UHANDLE_GET_CB_OBJ(local_obj)); + mutex_unlock(&g_smcinvoke_lock); + + return ret; +} + +static long process_server_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + int32_t server_fd = -1; + struct smcinvoke_server server_req = {0}; + struct smcinvoke_server_info *server_info = NULL; + + if (_IOC_SIZE(cmd) != sizeof(server_req)) { + pr_err("invalid command size received for server request\n"); + return -EINVAL; + } + ret = copy_from_user(&server_req, (void __user *)(uintptr_t)arg, + sizeof(server_req)); + if (ret) { + pr_err("copying server request from user failed\n"); + return -EFAULT; + } + server_info = kzalloc(sizeof(*server_info), GFP_KERNEL); + if (!server_info) + return -ENOMEM; + + kref_init(&server_info->ref_cnt); + init_waitqueue_head(&server_info->req_wait_q); + init_waitqueue_head(&server_info->rsp_wait_q); + server_info->cb_buf_size = server_req.cb_buf_size; + hash_init(server_info->reqs_table); + hash_init(server_info->responses_table); + INIT_LIST_HEAD(&server_info->pending_cbobjs); + + mutex_lock(&g_smcinvoke_lock); + + server_info->server_id = next_cb_server_id_locked(); + hash_add(g_cb_servers, &server_info->hash, + server_info->server_id); + if (g_max_cb_buf_size < server_req.cb_buf_size) + g_max_cb_buf_size = server_req.cb_buf_size; + + mutex_unlock(&g_smcinvoke_lock); + ret = get_fd_for_obj(SMCINVOKE_OBJ_TYPE_SERVER, + server_info->server_id, &server_fd); + + if (ret) + release_cb_server(server_info->server_id); + + return server_fd; +} + +static long process_accept_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1; + struct smcinvoke_file_data *server_obj = filp->private_data; + struct smcinvoke_accept user_args = {0}; + struct smcinvoke_cb_txn *cb_txn = NULL; + struct smcinvoke_server_info *server_info = NULL; + + if (_IOC_SIZE(cmd) != sizeof(struct smcinvoke_accept)) { + pr_err("command size invalid for accept request\n"); + return -EINVAL; + } + + if (copy_from_user(&user_args, (void __user *)arg, + sizeof(struct smcinvoke_accept))) { + pr_err("copying accept request from user failed\n"); + return -EFAULT; + } + + if (user_args.argsize != sizeof(union smcinvoke_arg)) { + pr_err("arguments size is invalid for accept thread\n"); + return -EINVAL; + } + + /* ACCEPT is available only on server obj */ + if (server_obj->context_type != SMCINVOKE_OBJ_TYPE_SERVER) { + pr_err("invalid object type received for accept req\n"); + return -EPERM; + } + + mutex_lock(&g_smcinvoke_lock); + server_info = get_cb_server_locked(server_obj->server_id); + + if (!server_info) { + pr_err("No matching server with server id : %u found\n", + server_obj->server_id); + mutex_unlock(&g_smcinvoke_lock); + return -EINVAL; + } + + if (server_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) + server_info->state = 0; + + mutex_unlock(&g_smcinvoke_lock); + + /* First check if it has response otherwise wait for req */ + if (user_args.has_resp) { + trace_process_accept_req_has_response(current->pid, current->tgid); + + mutex_lock(&g_smcinvoke_lock); + cb_txn = find_cbtxn_locked(server_info, user_args.txn_id, + SMCINVOKE_REQ_PROCESSING); + mutex_unlock(&g_smcinvoke_lock); + /* + * cb_txn can be null if userspace provides wrong txn id OR + * invoke thread died while server was processing cb req. + * if invoke thread dies, it would remove req from Q. So + * no matching cb_txn would be on Q and hence NULL cb_txn. + * In this case, we want this thread to come back and start + * waiting for new cb requests, hence return EAGAIN here + */ + if (!cb_txn) { + pr_err("%s txn %d either invalid or removed from Q\n", + __func__, user_args.txn_id); + ret = -EAGAIN; + goto out; + } + ret = marshal_out_tzcb_req(&user_args, cb_txn, + cb_txn->filp_to_release); + /* + * if client did not set error and we get error locally, + * we return local error to TA + */ + if (ret && cb_txn->cb_req->result == 0) + cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; + + cb_txn->state = SMCINVOKE_REQ_PROCESSED; + kref_put(&cb_txn->ref_cnt, delete_cb_txn); + wake_up(&server_info->rsp_wait_q); + /* + * if marshal_out fails, we should let userspace release + * any ref/obj it created for CB processing + */ + if (ret && OBJECT_COUNTS_NUM_OO(user_args.counts)) + goto out; + } + /* + * Once response has been delivered, thread will wait for another + * callback req to process. + */ + do { + ret = wait_event_interruptible(server_info->req_wait_q, + !hash_empty(server_info->reqs_table)); + if (ret) { + trace_process_accept_req_ret(current->pid, current->tgid, ret); + /* + * Ideally, we should destroy server if accept threads + * are returning due to client being killed or device + * going down (Shutdown/Reboot) but that would make + * server_info invalid. Other accept/invoke threads are + * using server_info and would crash. So dont do that. + */ + mutex_lock(&g_smcinvoke_lock); + server_info->state = SMCINVOKE_SERVER_STATE_DEFUNCT; + mutex_unlock(&g_smcinvoke_lock); + wake_up_interruptible(&server_info->rsp_wait_q); + goto out; + } + mutex_lock(&g_smcinvoke_lock); + cb_txn = find_cbtxn_locked(server_info, + SMCINVOKE_NEXT_AVAILABLE_TXN, + SMCINVOKE_REQ_PLACED); + mutex_unlock(&g_smcinvoke_lock); + if (cb_txn) { + cb_txn->state = SMCINVOKE_REQ_PROCESSING; + ret = marshal_in_tzcb_req(cb_txn, &user_args, + server_obj->server_id); + if (ret) { + pr_err("failed to marshal in the callback request\n"); + cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; + cb_txn->state = SMCINVOKE_REQ_PROCESSED; + kref_put(&cb_txn->ref_cnt, delete_cb_txn); + wake_up_interruptible(&server_info->rsp_wait_q); + continue; + } + mutex_lock(&g_smcinvoke_lock); + hash_add(server_info->responses_table, &cb_txn->hash, + cb_txn->txn_id); + kref_put(&cb_txn->ref_cnt, delete_cb_txn); + mutex_unlock(&g_smcinvoke_lock); + + trace_process_accept_req_placed(current->pid, current->tgid); + + ret = copy_to_user((void __user *)arg, &user_args, + sizeof(struct smcinvoke_accept)); + } + } while (!cb_txn); +out: + if (server_info) + kref_put(&server_info->ref_cnt, destroy_cb_server); + + if (ret && ret != -ERESTARTSYS) + pr_err("accept thread returning with ret: %d\n", ret); + + return ret; +} + +static long process_invoke_req(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = -1, nr_args = 0; + struct smcinvoke_cmd_req req = {0}; + void *in_msg = NULL, *out_msg = NULL; + size_t inmsg_size = 0, outmsg_size = SMCINVOKE_TZ_MIN_BUF_SIZE; + union smcinvoke_arg *args_buf = NULL; + struct smcinvoke_file_data *tzobj = filp->private_data; + struct qtee_shm in_shm = {0}, out_shm = {0}; + + /* + * Hold reference to remote object until invoke op is not + * completed. Release once invoke is done. + */ + struct file *filp_to_release[OBJECT_COUNTS_MAX_OO] = {NULL}; + /* + * If anything goes wrong, release alloted tzhandles for + * local objs which could be either CBObj or MemObj. + */ + int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0}; + bool tz_acked = false; + uint32_t context_type = tzobj->context_type; + + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ && + _IOC_SIZE(cmd) != sizeof(req)) { + pr_err("command size for invoke req is invalid\n"); + return -EINVAL; + } + + if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ && + context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) { + pr_err("invalid context_type %d\n", context_type); + return -EPERM; + } + if (context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL) { + ret = copy_from_user(&req, (void __user *)arg, sizeof(req)); + if (ret) { + pr_err("copying invoke req failed\n"); + return -EFAULT; + } + } else { + req = *(struct smcinvoke_cmd_req *)arg; + } + if (req.argsize != sizeof(union smcinvoke_arg)) { + pr_err("arguments size for invoke req is invalid\n"); + return -EINVAL; + } + + nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) + + OBJECT_COUNTS_NUM_objects(req.counts); + + if (nr_args) { + args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL); + if (!args_buf) + return -ENOMEM; + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + ret = copy_from_user(args_buf, + u64_to_user_ptr(req.args), + nr_args * req.argsize); + if (ret) { + ret = -EFAULT; + goto out; + } + } else { + memcpy(args_buf, (void *)(req.args), + nr_args * req.argsize); + } + } + + inmsg_size = compute_in_msg_size(&req, args_buf); + ret = qtee_shmbridge_allocate_shm(inmsg_size, &in_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for in msg in invoke req\n"); + goto out; + } + in_msg = in_shm.vaddr; + + mutex_lock(&g_smcinvoke_lock); + outmsg_size = PAGE_ALIGN(g_max_cb_buf_size); + mutex_unlock(&g_smcinvoke_lock); + ret = qtee_shmbridge_allocate_shm(outmsg_size, &out_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for out msg in invoke req\n"); + goto out; + } + out_msg = out_shm.vaddr; + + trace_process_invoke_req_tzhandle(tzobj->tzhandle, req.op, req.counts); + + ret = marshal_in_invoke_req(&req, args_buf, tzobj->tzhandle, in_msg, + inmsg_size, filp_to_release, tzhandles_to_release, + context_type); + if (ret) { + pr_err("failed to marshal in invoke req, ret :%d\n", ret); + goto out; + } + + ret = prepare_send_scm_msg(in_msg, in_shm.paddr, inmsg_size, + out_msg, out_shm.paddr, outmsg_size, + &req, args_buf, &tz_acked, context_type, + &in_shm, &out_shm); + + /* + * If scm_call is success, TZ owns responsibility to release + * refs for local objs. + */ + if (!tz_acked) { + trace_status(__func__, "scm call successful"); + goto out; + } + memset(tzhandles_to_release, 0, sizeof(tzhandles_to_release)); + + /* + * if invoke op results in an err, no need to marshal_out and + * copy args buf to user space + */ + if (!req.result) { + /* + * Dont check ret of marshal_out because there might be a + * FD for OO which userspace must release even if an error + * occurs. Releasing FD from user space is much simpler than + * doing here. ORing of ret is reqd not to miss past error + */ + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) + ret |= copy_to_user(u64_to_user_ptr(req.args), + args_buf, nr_args * req.argsize); + else + memcpy((void *)(req.args), args_buf, + nr_args * req.argsize); + + } + /* copy result of invoke op */ + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ) { + ret |= copy_to_user((void __user *)arg, &req, sizeof(req)); + if (ret) + goto out; + } else { + memcpy((void *)arg, (void *)&req, sizeof(req)); + } + + /* Outbuf could be carrying local objs to be released. */ + process_piggyback_data(out_msg, outmsg_size); +out: + trace_process_invoke_req_result(ret, req.result, tzobj->tzhandle, + req.op, req.counts); + + release_filp(filp_to_release, OBJECT_COUNTS_MAX_OO); + if (ret) + release_tzhandles(tzhandles_to_release, OBJECT_COUNTS_MAX_OO); + qtee_shmbridge_free_shm(&in_shm); + qtee_shmbridge_free_shm(&out_shm); + kfree(args_buf); + + if (ret) + pr_err("invoke thread returning with ret = %d\n", ret); + + return ret; +} + +static long process_log_info(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + char buf[SMCINVOKE_LOG_BUF_SIZE]; + struct smcinvoke_file_data *tzobj = filp->private_data; + + ret = copy_from_user(buf, (void __user *)arg, SMCINVOKE_LOG_BUF_SIZE); + if (ret) { + pr_err("logging HLOS info copy failed\n"); + return -EFAULT; + } + buf[SMCINVOKE_LOG_BUF_SIZE - 1] = '\0'; + + trace_process_log_info(buf, tzobj->context_type, tzobj->tzhandle); + + return ret; +} + +static long smcinvoke_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + + switch (cmd) { + case SMCINVOKE_IOCTL_INVOKE_REQ: + ret = process_invoke_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_ACCEPT_REQ: + ret = process_accept_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_SERVER_REQ: + ret = process_server_req(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_ACK_LOCAL_OBJ: + ret = process_ack_local_obj(filp, cmd, arg); + break; + case SMCINVOKE_IOCTL_LOG: + ret = process_log_info(filp, cmd, arg); + break; + default: + ret = -ENOIOCTLCMD; + break; + } + trace_smcinvoke_ioctl(cmd, ret); + return ret; +} + +int get_root_fd(int *root_fd) +{ + if (!root_fd) + return -EINVAL; + else + return get_fd_for_obj(SMCINVOKE_OBJ_TYPE_TZ_OBJ_FOR_KERNEL, + SMCINVOKE_TZ_ROOT_OBJ, root_fd); +} + +int process_invoke_request_from_kernel_client(int fd, + struct smcinvoke_cmd_req *req) +{ + struct file *filp = NULL; + int ret = 0; + + if (!req) { + pr_err("NULL req\n"); + return -EINVAL; + } + + filp = fget(fd); + if (!filp) { + pr_err("Invalid fd %d\n", fd); + return -EINVAL; + } + ret = process_invoke_req(filp, 0, (uintptr_t)req); + fput(filp); + trace_process_invoke_request_from_kernel_client(fd, filp, file_count(filp)); + return ret; +} + +char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm) +{ + + int rc = 0; + const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entry07 = NULL; + char fw_name[MAX_APP_NAME_SIZE] = "\0"; + int num_images = 0, phi = 0; + unsigned char app_arch = 0; + u8 *img_data_ptr = NULL; + size_t offset[8], bufferOffset = 0, phdr_table_offset = 0; + Elf32_Phdr phdr32; + Elf64_Phdr phdr64; + struct elf32_hdr *ehdr = NULL; + struct elf64_hdr *ehdr64 = NULL; + + + /* load b00*/ + snprintf(fw_name, sizeof(fw_name), "%s.b00", appname); + rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev); + if (rc) { + pr_err("Load %s failed, ret:%d\n", fw_name, rc); + return NULL; + } + + app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS); + + /*Get the offsets for split images header*/ + offset[0] = 0; + if (app_arch == ELFCLASS32) { + + ehdr = (struct elf32_hdr *)fw_entry00->data; + num_images = ehdr->e_phnum; + if (num_images != 8) { + pr_err("Number of images :%d is not valid\n", num_images); + goto release_fw_entry00; + } + phdr_table_offset = (size_t) ehdr->e_phoff; + for (phi = 1; phi < num_images; ++phi) { + bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr); + phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset); + offset[phi] = (size_t)phdr32.p_offset; + } + + } else if (app_arch == ELFCLASS64) { + + ehdr64 = (struct elf64_hdr *)fw_entry00->data; + num_images = ehdr64->e_phnum; + if (num_images != 8) { + pr_err("Number of images :%d is not valid\n", num_images); + goto release_fw_entry00; + } + phdr_table_offset = (size_t) ehdr64->e_phoff; + for (phi = 1; phi < num_images; ++phi) { + bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr); + phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset); + offset[phi] = (size_t)phdr64.p_offset; + } + + } else { + + pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch); + goto release_fw_entry00; + } + + /*Find the size of last split bin image*/ + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1); + rc = firmware_request_nowarn(&fw_entry07, fw_name, class_dev); + if (rc) { + pr_err("Failed to locate blob %s\n", fw_name); + goto release_fw_entry00; + } + + /*Total size of image will be the offset of last image + the size of last split image*/ + *fw_size = fw_entry07->size + offset[num_images-1]; + + /*Allocate memory for the buffer that will hold the split image*/ + rc = qtee_shmbridge_allocate_shm((*fw_size), shm); + if (rc) { + pr_err("smbridge alloc failed for size: %zu\n", *fw_size); + goto release_fw_entry07; + } + img_data_ptr = shm->vaddr; + + /* + * Copy contents of split bins to the buffer + */ + memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size); + for (phi = 1; phi < num_images-1; phi++) { + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi); + rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev); + if (rc) { + pr_err("Failed to locate blob %s\n", fw_name); + qtee_shmbridge_free_shm(shm); + img_data_ptr = NULL; + goto release_fw_entry07; + } + memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size); + release_firmware(fw_entry); + fw_entry = NULL; + } + memcpy(img_data_ptr + offset[phi], fw_entry07->data, fw_entry07->size); + +release_fw_entry07: + release_firmware(fw_entry07); +release_fw_entry00: + release_firmware(fw_entry00); + return img_data_ptr; +} +EXPORT_SYMBOL(firmware_request_from_smcinvoke); + +static int smcinvoke_open(struct inode *nodp, struct file *filp) +{ + struct smcinvoke_file_data *tzcxt = NULL; + + tzcxt = kzalloc(sizeof(*tzcxt), GFP_KERNEL); + if (!tzcxt) + return -ENOMEM; + + tzcxt->tzhandle = SMCINVOKE_TZ_ROOT_OBJ; + tzcxt->context_type = SMCINVOKE_OBJ_TYPE_TZ_OBJ; + filp->private_data = tzcxt; + + return 0; +} + +static int release_cb_server(uint16_t server_id) +{ + struct smcinvoke_server_info *server = NULL; + + mutex_lock(&g_smcinvoke_lock); + server = find_cb_server_locked(server_id); + if (server) + kref_put(&server->ref_cnt, destroy_cb_server); + mutex_unlock(&g_smcinvoke_lock); + return 0; +} + +int smcinvoke_release_filp(struct file *filp) +{ + int ret = 0; + bool release_handles; + uint8_t *in_buf = NULL; + uint8_t *out_buf = NULL; + struct smcinvoke_msg_hdr hdr = {0}; + struct smcinvoke_file_data *file_data = filp->private_data; + struct smcinvoke_cmd_req req = {0}; + uint32_t tzhandle = 0; + struct qtee_shm in_shm = {0}, out_shm = {0}; + + trace_smcinvoke_release_filp(current->files, filp, + file_count(filp), file_data->context_type); + + if (file_data->context_type == SMCINVOKE_OBJ_TYPE_SERVER) { + ret = release_cb_server(file_data->server_id); + goto out; + } + + tzhandle = file_data->tzhandle; + /* Root object is special in sense it is indestructible */ + if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) + goto out; + + ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for in msg in release\n"); + goto out; + } + + ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for out msg in release\n"); + goto out; + } + + in_buf = in_shm.vaddr; + out_buf = out_shm.vaddr; + hdr.tzhandle = tzhandle; + hdr.op = OBJECT_OP_RELEASE; + hdr.counts = 0; + *(struct smcinvoke_msg_hdr *)in_buf = hdr; + + ret = prepare_send_scm_msg(in_buf, in_shm.paddr, + SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr, + SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, &release_handles, + file_data->context_type, &in_shm, &out_shm); + + process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE); +out: + kfree(filp->private_data); + filp->private_data = NULL; + qtee_shmbridge_free_shm(&in_shm); + qtee_shmbridge_free_shm(&out_shm); + + return ret; + +} + +int smcinvoke_release_from_kernel_client(int fd) +{ + struct file *filp = NULL; + + /* use fget() to get filp, but this will increase file ref_cnt to 1, + * then decrease file ref_cnt to 0 with fput(). + */ + filp = fget(fd); + if (!filp) { + pr_err("invalid fd %d to release\n", fd); + return -EINVAL; + } + trace_smcinvoke_release_from_kernel_client(current->files, filp, + file_count(filp)); + /* free filp, notify TZ to release object */ + smcinvoke_release_filp(filp); + fput(filp); + return 0; +} + +static int smcinvoke_release(struct inode *nodp, struct file *filp) +{ + trace_smcinvoke_release(current->files, filp, file_count(filp), + filp->private_data); + + if (filp->private_data) + return smcinvoke_release_filp(filp); + else + return 0; +} + +static int smcinvoke_probe(struct platform_device *pdev) +{ + unsigned int baseminor = 0; + unsigned int count = 1; + int rc = 0; + + rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count, + SMCINVOKE_DEV); + if (rc < 0) { + pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV); + return rc; + } + driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("class_create failed %d\n", rc); + goto exit_unreg_chrdev_region; + } + class_dev = device_create(driver_class, NULL, smcinvoke_device_no, + NULL, SMCINVOKE_DEV); + if (!class_dev) { + pr_err("class_device_create failed %d\n", rc); + rc = -ENOMEM; + goto exit_destroy_class; + } + + cdev_init(&smcinvoke_cdev, &g_smcinvoke_fops); + smcinvoke_cdev.owner = THIS_MODULE; + + rc = cdev_add(&smcinvoke_cdev, MKDEV(MAJOR(smcinvoke_device_no), 0), + count); + if (rc < 0) { + pr_err("cdev_add failed %d for %s\n", rc, SMCINVOKE_DEV); + goto exit_destroy_device; + } + smcinvoke_pdev = pdev; + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + pr_err("dma_set_mask_and_coherent failed %d\n", rc); + goto exit_destroy_device; + } + legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-legacy_smc"); + invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD; + + return 0; + +exit_destroy_device: + device_destroy(driver_class, smcinvoke_device_no); +exit_destroy_class: + class_destroy(driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(smcinvoke_device_no, count); + return rc; +} + +static int smcinvoke_remove(struct platform_device *pdev) +{ + int count = 1; + + cdev_del(&smcinvoke_cdev); + device_destroy(driver_class, smcinvoke_device_no); + class_destroy(driver_class); + unregister_chrdev_region(smcinvoke_device_no, count); + return 0; +} + +static int __maybe_unused smcinvoke_suspend(struct platform_device *pdev, + pm_message_t state) +{ + int ret = 0; + + mutex_lock(&g_smcinvoke_lock); + if (cb_reqs_inflight) { + pr_err("Failed to suspend smcinvoke driver\n"); + ret = -EIO; + } + mutex_unlock(&g_smcinvoke_lock); + return ret; +} + +static int __maybe_unused smcinvoke_resume(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id smcinvoke_match[] = { + { + .compatible = "qcom,smcinvoke", + }, + {}, +}; + +static struct platform_driver smcinvoke_plat_driver = { + .probe = smcinvoke_probe, + .remove = smcinvoke_remove, + .suspend = smcinvoke_suspend, + .resume = smcinvoke_resume, + .driver = { + .name = "smcinvoke", + .of_match_table = smcinvoke_match, + }, +}; + +static int smcinvoke_init(void) +{ + return platform_driver_register(&smcinvoke_plat_driver); +} + +static void smcinvoke_exit(void) +{ + platform_driver_unregister(&smcinvoke_plat_driver); +} + +module_init(smcinvoke_init); +module_exit(smcinvoke_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("SMC Invoke driver"); diff --git a/smcinvoke/smcinvoke.h b/smcinvoke/smcinvoke.h new file mode 100644 index 0000000000..7c3ff1b047 --- /dev/null +++ b/smcinvoke/smcinvoke.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ +#ifndef _UAPI_SMCINVOKE_H_ +#define _UAPI_SMCINVOKE_H_ + +#include +#include + +#define SMCINVOKE_USERSPACE_OBJ_NULL -1 + +struct smcinvoke_buf { + __u64 addr; + __u64 size; +}; + +struct smcinvoke_obj { + __s64 fd; + __s32 cb_server_fd; + __s32 reserved; +}; + +union smcinvoke_arg { + struct smcinvoke_buf b; + struct smcinvoke_obj o; +}; + +/* + * struct smcinvoke_cmd_req: This structure is transparently sent to TEE + * @op - Operation to be performed + * @counts - number of aruments passed + * @result - result of invoke operation + * @argsize - size of each of arguments + * @args - args is pointer to buffer having all arguments + */ +struct smcinvoke_cmd_req { + __u32 op; + __u32 counts; + __s32 result; + __u32 argsize; + __u64 args; +}; + +/* + * struct smcinvoke_accept: structure to process CB req from TEE + * @has_resp: IN: Whether IOCTL is carrying response data + * @txn_id: OUT: An id that should be passed as it is for response + * @result: IN: Outcome of operation op + * @cbobj_id: OUT: Callback object which is target of operation op + * @op: OUT: Operation to be performed on target object + * @counts: OUT: Number of arguments, embedded in buffer pointed by + * buf_addr, to complete operation + * @reserved: IN/OUT: Usage is not defined but should be set to 0. + * @argsize: IN: Size of any argument, all of equal size, embedded + * in buffer pointed by buf_addr + * @buf_len: IN: Len of buffer pointed by buf_addr + * @buf_addr: IN: Buffer containing all arguments which are needed + * to complete operation op + */ +struct smcinvoke_accept { + __u32 has_resp; + __u32 txn_id; + __s32 result; + __s32 cbobj_id; + __u32 op; + __u32 counts; + __s32 reserved; + __u32 argsize; + __u64 buf_len; + __u64 buf_addr; +}; + +/* + * @cb_buf_size: IN: Max buffer size for any callback obj implemented by client + */ +struct smcinvoke_server { + __u32 cb_buf_size; +}; + +#define SMCINVOKE_IOC_MAGIC 0x98 + +#define SMCINVOKE_IOCTL_INVOKE_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req) + +#define SMCINVOKE_IOCTL_ACCEPT_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept) + +#define SMCINVOKE_IOCTL_SERVER_REQ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server) + +#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \ + _IOWR(SMCINVOKE_IOC_MAGIC, 4, __s32) + +/* + * smcinvoke logging buffer is for communicating with the smcinvoke driver additional + * info for debugging to be included in driver's log (if any) + */ +#define SMCINVOKE_LOG_BUF_SIZE 100 +#define SMCINVOKE_IOCTL_LOG \ + _IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE) + +#endif /* _UAPI_SMCINVOKE_H_ */ diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c new file mode 100644 index 0000000000..c4e764d87b --- /dev/null +++ b/smcinvoke/smcinvoke_kernel.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#if !IS_ENABLED(CONFIG_QSEECOM) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "smcinvoke.h" +#include "linux/qseecom.h" +#include "smcinvoke_object.h" +#include "misc/qseecom_kernel.h" +#include "IQSEEComCompat.h" +#include "IQSEEComCompatAppLoader.h" +#include "IClientEnv.h" + +const uint32_t CQSEEComCompatAppLoader_UID = 122; + +struct qseecom_compat_context { + void *dev; /* in/out */ + unsigned char *sbuf; /* in/out */ + uint32_t sbuf_len; /* in/out */ + struct qtee_shm shm; + uint8_t app_arch; + struct Object client_env; + struct Object app_loader; + struct Object app_controller; +}; + +struct tzobject_context { + int fd; + struct kref refs; +}; + +static int invoke_over_smcinvoke(void *cxt, + uint32_t op, + union ObjectArg *args, + uint32_t counts); + +static struct Object tzobject_new(int fd) +{ + struct tzobject_context *me = + kzalloc(sizeof(struct tzobject_context), GFP_KERNEL); + if (!me) + return Object_NULL; + + kref_init(&me->refs); + me->fd = fd; + pr_debug("%s: me->fd = %d, me->refs = %u\n", __func__, + me->fd, kref_read(&me->refs)); + return (struct Object) { invoke_over_smcinvoke, me }; +} + +static void tzobject_delete(struct kref *refs) +{ + struct tzobject_context *me = container_of(refs, + struct tzobject_context, refs); + + pr_info("%s: me->fd = %d, me->refs = %d, files = %p\n", + __func__, me->fd, kref_read(&me->refs), current->files); + /* + * after _close_fd(), ref_cnt will be 0, + * but smcinvoke_release() was still not called, + * so we first call smcinvoke_release_from_kernel_client() to + * free filp and ask TZ to release object, then call _close_fd() + */ + smcinvoke_release_from_kernel_client(me->fd); + close_fd(me->fd); + kfree(me); +} + +int getObjectFromHandle(int handle, struct Object *obj) +{ + int ret = 0; + + if (handle == SMCINVOKE_USERSPACE_OBJ_NULL) { + /* NULL object*/ + Object_ASSIGN_NULL(*obj); + } else if (handle > SMCINVOKE_USERSPACE_OBJ_NULL) { + *obj = tzobject_new(handle); + if (Object_isNull(*obj)) + ret = OBJECT_ERROR_BADOBJ; + } else { + pr_err("CBobj not supported for handle %d\n", handle); + ret = OBJECT_ERROR_BADOBJ; + } + + return ret; +} + +int getHandleFromObject(struct Object obj, int *handle) +{ + int ret = 0; + + if (Object_isNull(obj)) { + /* set NULL Object's fd to be -1 */ + *handle = SMCINVOKE_USERSPACE_OBJ_NULL; + return ret; + } + + if (obj.invoke == invoke_over_smcinvoke) { + struct tzobject_context *ctx = (struct tzobject_context *)(obj.context); + + if (ctx != NULL) { + *handle = ctx->fd; + } else { + pr_err("Failed to get tzobject_context obj handle, ret = %d\n", ret); + ret = OBJECT_ERROR_BADOBJ; + } + } else { + pr_err("CBobj not supported\n"); + ret = OBJECT_ERROR_BADOBJ; + } + + return ret; +} + +static int marshalIn(struct smcinvoke_cmd_req *req, + union smcinvoke_arg *argptr, + uint32_t op, union ObjectArg *args, + uint32_t counts) +{ + size_t i = 0; + + req->op = op; + req->counts = counts; + req->argsize = sizeof(union smcinvoke_arg); + req->args = (uintptr_t)argptr; + + FOR_ARGS(i, counts, buffers) { + argptr[i].b.addr = (uintptr_t) args[i].b.ptr; + argptr[i].b.size = args[i].b.size; + } + + FOR_ARGS(i, counts, OI) { + int handle = -1, ret; + + ret = getHandleFromObject(args[i].o, &handle); + if (ret) { + pr_err("invalid OI[%zu]\n", i); + return OBJECT_ERROR_BADOBJ; + } + argptr[i].o.fd = handle; + } + + FOR_ARGS(i, counts, OO) { + argptr[i].o.fd = SMCINVOKE_USERSPACE_OBJ_NULL; + } + return OBJECT_OK; +} + +static int marshalOut(struct smcinvoke_cmd_req *req, + union smcinvoke_arg *argptr, + union ObjectArg *args, uint32_t counts, + struct tzobject_context *me) +{ + int ret = req->result; + bool failed = false; + size_t i = 0; + + argptr = (union smcinvoke_arg *)(uintptr_t)(req->args); + + FOR_ARGS(i, counts, BO) { + args[i].b.size = argptr[i].b.size; + } + + FOR_ARGS(i, counts, OO) { + ret = getObjectFromHandle(argptr[i].o.fd, &(args[i].o)); + if (ret) { + pr_err("Failed to get OO[%zu] from handle = %d\n", + i, (int)argptr[i].o.fd); + failed = true; + break; + } + pr_debug("Succeed to create OO for args[%zu].o, fd = %d\n", + i, (int)argptr[i].o.fd); + } + if (failed) { + FOR_ARGS(i, counts, OO) { + Object_ASSIGN_NULL(args[i].o); + } + /* Only overwrite ret value if invoke result is 0 */ + if (ret == 0) + ret = OBJECT_ERROR_BADOBJ; + } + return ret; +} + +static int invoke_over_smcinvoke(void *cxt, + uint32_t op, + union ObjectArg *args, + uint32_t counts) +{ + int ret = OBJECT_OK; + struct smcinvoke_cmd_req req = {0, 0, 0, 0, 0}; + size_t i = 0; + struct tzobject_context *me = NULL; + uint32_t method; + union smcinvoke_arg *argptr = NULL; + + FOR_ARGS(i, counts, OO) { + args[i].o = Object_NULL; + } + + me = (struct tzobject_context *)cxt; + method = ObjectOp_methodID(op); + pr_debug("%s: cxt = %p, fd = %d, op = %u, cnt = %x, refs = %u\n", + __func__, me, me->fd, op, counts, kref_read(&me->refs)); + + if (ObjectOp_isLocal(op)) { + switch (method) { + case Object_OP_retain: + kref_get(&me->refs); + return OBJECT_OK; + case Object_OP_release: + kref_put(&me->refs, tzobject_delete); + return OBJECT_OK; + } + return OBJECT_ERROR_REMOTE; + } + + argptr = kcalloc(OBJECT_COUNTS_TOTAL(counts), + sizeof(union smcinvoke_arg), GFP_KERNEL); + if (argptr == NULL) + return OBJECT_ERROR_KMEM; + + ret = marshalIn(&req, argptr, op, args, counts); + if (ret) + goto exit; + + ret = process_invoke_request_from_kernel_client(me->fd, &req); + if (ret) { + pr_err("INVOKE failed with ret = %d, result = %d\n" + "obj.context = %p, fd = %d, op = %d, counts = 0x%x\n", + ret, req.result, me, me->fd, op, counts); + FOR_ARGS(i, counts, OO) { + struct smcinvoke_obj obj = argptr[i].o; + + if (obj.fd >= 0) { + pr_err("Close OO[%zu].fd = %d\n", i, obj.fd); + close_fd(obj.fd); + } + } + ret = OBJECT_ERROR_KMEM; + goto exit; + } + + if (!req.result) + ret = marshalOut(&req, argptr, args, counts, me); +exit: + kfree(argptr); + return ret | req.result; +} + +static int get_root_obj(struct Object *rootObj) +{ + int ret = 0; + int root_fd = -1; + + ret = get_root_fd(&root_fd); + if (ret) { + pr_err("Failed to get root fd, ret = %d\n"); + return ret; + } + *rootObj = tzobject_new(root_fd); + if (Object_isNull(*rootObj)) { + close_fd(root_fd); + ret = -ENOMEM; + } + return ret; +} + +/* + * Get a client environment using CBOR encoded credentials + * with UID of SYSTEM_UID (1000) + */ +static int32_t get_client_env_object(struct Object *clientEnvObj) +{ + int32_t ret = OBJECT_ERROR; + struct Object rootObj = Object_NULL; + /* Hardcode self cred buffer in CBOR encoded format. + * CBOR encoded credentials is created using following parameters, + * #define ATTR_UID 1 + * #define ATTR_PKG_NAME 3 + * #define SYSTEM_UID 1000 + * static const uint8_t bufString[] = {"UefiSmcInvoke"}; + */ + uint8_t encodedBuf[] = {0xA2, 0x01, 0x19, 0x03, 0xE8, 0x03, 0x6E, 0x55, + 0x65, 0x66, 0x69, 0x53, 0x6D, 0x63, 0x49, 0x6E, + 0x76, 0x6F, 0x6B, 0x65, 0x0}; + + /* get rootObj */ + ret = get_root_obj(&rootObj); + if (ret) { + pr_err("Failed to create rootobj\n"); + return ret; + } + + /* get client env */ + ret = IClientEnv_registerLegacy(rootObj, encodedBuf, + sizeof(encodedBuf), clientEnvObj); + if (ret) + pr_err("Failed to get ClientEnvObject, ret = %d\n", ret); + Object_release(rootObj); + return ret; +} + +static int load_app(struct qseecom_compat_context *cxt, const char *app_name) +{ + size_t fw_size = 0; + u8 *imgbuf_va = NULL; + int ret = 0; + char dist_name[MAX_APP_NAME_SIZE] = {0}; + size_t dist_name_len = 0; + struct qtee_shm shm = {0}; + + if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) { + pr_err("The app_name (%s) with length %zu is not valid\n", + app_name, strnlen(app_name, MAX_APP_NAME_SIZE)); + return -EINVAL; + } + + ret = IQSEEComCompatAppLoader_lookupTA(cxt->app_loader, + app_name, strlen(app_name), &cxt->app_controller); + if (!ret) { + pr_info("app %s exists\n", app_name); + return ret; + } + + imgbuf_va = firmware_request_from_smcinvoke(app_name, &fw_size, &shm); + if (imgbuf_va == NULL) { + pr_err("Failed on firmware_request_from_smcinvoke\n"); + return -EINVAL; + } + + ret = IQSEEComCompatAppLoader_loadFromBuffer( + cxt->app_loader, imgbuf_va, fw_size, + app_name, strlen(app_name), + dist_name, MAX_APP_NAME_SIZE, &dist_name_len, + &cxt->app_controller); + if (ret) { + pr_err("loadFromBuffer failed for app %s, ret = %d\n", + app_name, ret); + goto exit_release_shm; + } + cxt->app_arch = *(uint8_t *)(imgbuf_va + EI_CLASS); + + pr_info("%s %d, loaded app %s, dist_name %s, dist_name_len %zu\n", + __func__, __LINE__, app_name, dist_name, dist_name_len); + +exit_release_shm: + qtee_shmbridge_free_shm(&shm); + return ret; +} + +int qseecom_start_app(struct qseecom_handle **handle, + char *app_name, uint32_t size) +{ + int ret = 0; + struct qseecom_compat_context *cxt = NULL; + + pr_warn("%s, start app %s, size %zu\n", + __func__, app_name, size); + if (app_name == NULL || handle == NULL) { + pr_err("app_name is null or invalid handle\n"); + return -EINVAL; + } + /* allocate qseecom_compat_context */ + cxt = kzalloc(sizeof(struct qseecom_compat_context), GFP_KERNEL); + if (!cxt) + return -ENOMEM; + + /* get client env */ + ret = get_client_env_object(&cxt->client_env); + if (ret) { + pr_err("failed to get clientEnv when loading app %s, ret %d\n", + app_name, ret); + ret = -EINVAL; + goto exit_free_cxt; + } + /* get apploader with CQSEEComCompatAppLoader_UID */ + ret = IClientEnv_open(cxt->client_env, CQSEEComCompatAppLoader_UID, + &cxt->app_loader); + if (ret) { + pr_err("failed to get apploader when loading app %s, ret %d\n", + app_name, ret); + ret = -EINVAL; + goto exit_release_clientenv; + } + + /* load app*/ + ret = load_app(cxt, app_name); + if (ret) { + pr_err("failed to load app %s, ret = %d\n", + app_name, ret); + ret = -EINVAL; + goto exit_release_apploader; + } + + /* Get the physical address of the req/resp buffer */ + ret = qtee_shmbridge_allocate_shm(size, &cxt->shm); + + if (ret) { + pr_err("qtee_shmbridge_allocate_shm failed, ret :%d\n", ret); + ret = -EINVAL; + goto exit_release_appcontroller; + } + cxt->sbuf = cxt->shm.vaddr; + cxt->sbuf_len = size; + *handle = (struct qseecom_handle *)cxt; + + return ret; + +exit_release_appcontroller: + Object_release(cxt->app_controller); +exit_release_apploader: + Object_release(cxt->app_loader); +exit_release_clientenv: + Object_release(cxt->client_env); +exit_free_cxt: + kfree(cxt); + + return ret; +} +EXPORT_SYMBOL(qseecom_start_app); + +int qseecom_shutdown_app(struct qseecom_handle **handle) +{ + struct qseecom_compat_context *cxt = + (struct qseecom_compat_context *)(*handle); + + if ((handle == NULL) || (*handle == NULL)) { + pr_err("Handle is NULL\n"); + return -EINVAL; + } + + qtee_shmbridge_free_shm(&cxt->shm); + Object_release(cxt->app_controller); + Object_release(cxt->app_loader); + Object_release(cxt->client_env); + kfree(cxt); + *handle = NULL; + return 0; +} +EXPORT_SYMBOL(qseecom_shutdown_app); + +int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, + uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len) +{ + struct qseecom_compat_context *cxt = + (struct qseecom_compat_context *)handle; + size_t out_len = 0; + + pr_debug("%s, sbuf_len %u, rbuf_len %u\n", + __func__, sbuf_len, rbuf_len); + + if (!handle || !send_buf || !resp_buf || !sbuf_len || !rbuf_len) { + pr_err("One of params is invalid. %s, handle %x, send_buf %x,resp_buf %x,sbuf_len %u, rbuf_len %u\n", + __func__, handle, send_buf, resp_buf, sbuf_len, rbuf_len); + return -EINVAL; + } + return IQSEEComCompat_sendRequest(cxt->app_controller, + send_buf, sbuf_len, + resp_buf, rbuf_len, + send_buf, sbuf_len, &out_len, + resp_buf, rbuf_len, &out_len, + NULL, 0, /* embedded offset array */ + (cxt->app_arch == ELFCLASS64), + Object_NULL, Object_NULL, + Object_NULL, Object_NULL); +} +EXPORT_SYMBOL(qseecom_send_command); +#endif diff --git a/smcinvoke/smcinvoke_object.h b/smcinvoke/smcinvoke_object.h new file mode 100644 index 0000000000..620922bfb0 --- /dev/null +++ b/smcinvoke/smcinvoke_object.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ +#ifndef __SMCINVOKE_OBJECT_H +#define __SMCINVOKE_OBJECT_H + +#include +#include +#include + +/* + * Method bits are not modified by transport layers. These describe the + * method (member function) being requested by the client. + */ + +#define OBJECT_OP_METHOD_MASK (0x0000FFFFu) +#define OBJECT_OP_METHODID(op) ((op) & OBJECT_OP_METHOD_MASK) +#define OBJECT_OP_RELEASE (OBJECT_OP_METHOD_MASK - 0) +#define OBJECT_OP_RETAIN (OBJECT_OP_METHOD_MASK - 1) +#define OBJECT_OP_MAP_REGION 0 +#define OBJECT_OP_YIELD 1 + +#define OBJECT_COUNTS_MAX_BI 0xF +#define OBJECT_COUNTS_MAX_BO 0xF +#define OBJECT_COUNTS_MAX_OI 0xF +#define OBJECT_COUNTS_MAX_OO 0xF + +/* unpack counts */ + +#define OBJECT_COUNTS_NUM_BI(k) ((size_t) (((k) >> 0) & OBJECT_COUNTS_MAX_BI)) +#define OBJECT_COUNTS_NUM_BO(k) ((size_t) (((k) >> 4) & OBJECT_COUNTS_MAX_BO)) +#define OBJECT_COUNTS_NUM_OI(k) ((size_t) (((k) >> 8) & OBJECT_COUNTS_MAX_OI)) +#define OBJECT_COUNTS_NUM_OO(k) ((size_t) (((k) >> 12) & OBJECT_COUNTS_MAX_OO)) +#define OBJECT_COUNTS_NUM_buffers(k) \ + (OBJECT_COUNTS_NUM_BI(k) + OBJECT_COUNTS_NUM_BO(k)) + +#define OBJECT_COUNTS_NUM_objects(k) \ + (OBJECT_COUNTS_NUM_OI(k) + OBJECT_COUNTS_NUM_OO(k)) + +/* Indices into args[] */ + +#define OBJECT_COUNTS_INDEX_BI(k) 0 +#define OBJECT_COUNTS_INDEX_BO(k) \ + (OBJECT_COUNTS_INDEX_BI(k) + OBJECT_COUNTS_NUM_BI(k)) +#define OBJECT_COUNTS_INDEX_OI(k) \ + (OBJECT_COUNTS_INDEX_BO(k) + OBJECT_COUNTS_NUM_BO(k)) +#define OBJECT_COUNTS_INDEX_OO(k) \ + (OBJECT_COUNTS_INDEX_OI(k) + OBJECT_COUNTS_NUM_OI(k)) +#define OBJECT_COUNTS_TOTAL(k) \ + (OBJECT_COUNTS_INDEX_OO(k) + OBJECT_COUNTS_NUM_OO(k)) + +#define OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \ + ((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \ + ((in_objs) << 8) | ((out_objs) << 12))) + +#define OBJECT_COUNTS_INDEX_buffers(k) OBJECT_COUNTS_INDEX_BI(k) + +/* Object_invoke return codes */ + +#define OBJECT_isOK(err) ((err) == 0) +#define OBJECT_isERROR(err) ((err) != 0) + +/* Generic error codes */ + +#define OBJECT_OK 0 /* non-specific success code */ +#define OBJECT_ERROR 1 /* non-specific error */ +#define OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */ +#define OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */ +#define OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */ + +#define OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */ + +/* Transport layer error codes */ + +#define OBJECT_ERROR_DEFUNCT -90 /* object no longer exists */ +#define OBJECT_ERROR_ABORT -91 /* calling thread must exit */ +#define OBJECT_ERROR_BADOBJ -92 /* invalid object context */ +#define OBJECT_ERROR_NOSLOTS -93 /* caller's object table full */ +#define OBJECT_ERROR_MAXARGS -94 /* too many args */ +#define OBJECT_ERROR_MAXDATA -95 /* buffers too large */ +#define OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */ +#define OBJECT_ERROR_KMEM -97 /* kernel out of memory */ +#define OBJECT_ERROR_REMOTE -98 /* local method sent to remote object */ +#define OBJECT_ERROR_BUSY -99 /* Object is busy */ +#define Object_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */ + +#define FOR_ARGS(ndxvar, counts, section) \ + for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \ + ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \ + + OBJECT_COUNTS_NUM_##section(counts)); \ + ++ndxvar) + +/* ObjectOp */ + +#define ObjectOp_METHOD_MASK ((uint32_t) 0x0000FFFFu) +#define ObjectOp_methodID(op) ((op) & ObjectOp_METHOD_MASK) + +#define ObjectOp_LOCAL ((uint32_t) 0x00008000U) + +#define ObjectOp_isLocal(op) (((op) & ObjectOp_LOCAL) != 0) + + +#define Object_OP_release (ObjectOp_METHOD_MASK - 0) +#define Object_OP_retain (ObjectOp_METHOD_MASK - 1) + +/* Object */ + +#define ObjectCounts_pack(nBuffersIn, nBuffersOut, nObjectsIn, nObjectsOut) \ + ((uint32_t) ((nBuffersIn) | \ + ((nBuffersOut) << 4) | \ + ((nObjectsIn) << 8) | \ + ((nObjectsOut) << 12))) + +union ObjectArg; + +typedef int32_t (*ObjectInvoke)(void *h, + uint32_t op, + union ObjectArg *args, + uint32_t counts); + +struct Object { + ObjectInvoke invoke; + void *context; +}; + +struct ObjectBuf { + void *ptr; + size_t size; +}; + +struct ObjectBufIn { + const void *ptr; + size_t size; +}; + +union ObjectArg { + struct ObjectBuf b; + struct ObjectBufIn bi; + struct Object o; +}; + +static inline int32_t Object_invoke(struct Object o, uint32_t op, + union ObjectArg *args, uint32_t k) +{ + return o.invoke(o.context, op, args, k); +} + +#define Object_NULL ((struct Object){NULL, NULL}) + + +#define OBJECT_NOT_RETAINED + +#define OBJECT_CONSUMED + +static inline int32_t Object_release(OBJECT_CONSUMED struct Object o) +{ + return Object_invoke((o), Object_OP_release, 0, 0); +} +static inline int32_t Object_retain(struct Object o) +{ + return Object_invoke((o), Object_OP_retain, 0, 0); +} + +#define Object_isNull(o) ((o).invoke == NULL) + +#define Object_RELEASE_IF(o) \ + do { \ + struct Object o_ = (o); \ + if (!Object_isNull(o_)) \ + (void) Object_release(o_); \ + } while (0) + +static inline void Object_replace(struct Object *loc, struct Object objNew) +{ + if (!Object_isNull(*loc)) + Object_release(*loc); + + if (!Object_isNull(objNew)) + Object_retain(objNew); + *loc = objNew; +} + +#define Object_ASSIGN_NULL(loc) Object_replace(&(loc), Object_NULL) + +int smcinvoke_release_from_kernel_client(int fd); + +int get_root_fd(int *root_fd); + +int process_invoke_request_from_kernel_client( + int fd, struct smcinvoke_cmd_req *req); + +char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm); + +#endif /* __SMCINVOKE_OBJECT_H */ diff --git a/smcinvoke/trace_smcinvoke.h b/smcinvoke/trace_smcinvoke.h new file mode 100644 index 0000000000..97a66069c9 --- /dev/null +++ b/smcinvoke/trace_smcinvoke.h @@ -0,0 +1,498 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM smcinvoke + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_smcinvoke + +#if !defined(_TRACE_SMCINVOKE) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SMCINVOKE_H +#include +#include +#include "smcinvoke.h" + +TRACE_EVENT(put_pending_cbobj_locked, + TP_PROTO(uint16_t srvr_id, uint16_t obj_id), + TP_ARGS(srvr_id, obj_id), + TP_STRUCT__entry( + __field(uint16_t, srvr_id) + __field(uint16_t, obj_id) + ), + TP_fast_assign( + __entry->srvr_id = srvr_id; + __entry->obj_id = obj_id; + ), + TP_printk("srvr_id=0x%x obj_id=0x%x", + __entry->srvr_id, __entry->obj_id) +); + +TRACE_EVENT(release_mem_obj_locked, + TP_PROTO(uint32_t tzhandle, size_t buf_len), + TP_ARGS(tzhandle, buf_len), + TP_STRUCT__entry( + __field(uint32_t, tzhandle) + __field(size_t, buf_len) + ), + TP_fast_assign( + __entry->tzhandle = tzhandle; + __entry->buf_len = buf_len; + ), + TP_printk("tzhandle=0x%08x, buf_len=%zu", + __entry->tzhandle, __entry->buf_len) +); + +TRACE_EVENT(invoke_cmd_handler, + TP_PROTO(int cmd, uint64_t response_type, int32_t result, int ret), + TP_ARGS(cmd, response_type, result, ret), + TP_STRUCT__entry( + __field(int, cmd) + __field(uint64_t, response_type) + __field(int32_t, result) + __field(int, ret) + ), + TP_fast_assign( + __entry->response_type = response_type; + __entry->result = result; + __entry->ret = ret; + __entry->cmd = cmd; + ), + TP_printk("cmd=0x%x (%d), response_type=%ld, result=0x%x (%d), ret=%d", + __entry->cmd, __entry->cmd, __entry->response_type, + __entry->result, __entry->result, __entry->ret) +); + +TRACE_EVENT(process_tzcb_req_handle, + TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts), + TP_ARGS(tzhandle, op, counts), + TP_STRUCT__entry( + __field(uint32_t, tzhandle) + __field(uint32_t, op) + __field(uint32_t, counts) + ), + TP_fast_assign( + __entry->tzhandle = tzhandle; + __entry->op = op; + __entry->counts = counts; + ), + TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x", + __entry->tzhandle, __entry->op, __entry->counts) +); + +TRACE_EVENT(process_tzcb_req_wait, + TP_PROTO(uint32_t tzhandle, int cbobj_retries, uint32_t txn_id, pid_t pid, pid_t tgid, + uint16_t server_state, uint16_t server_id, unsigned int cb_reqs_inflight), + TP_ARGS(tzhandle, cbobj_retries, txn_id, pid, tgid, server_state, server_id, + cb_reqs_inflight), + TP_STRUCT__entry( + __field(uint32_t, tzhandle) + __field(int, cbobj_retries) + __field(uint32_t, txn_id) + __field(pid_t, pid) + __field(pid_t, tgid) + __field(uint16_t, server_state) + __field(uint16_t, server_id) + __field(unsigned int, cb_reqs_inflight) + ), + TP_fast_assign( + __entry->tzhandle = tzhandle; + __entry->cbobj_retries = cbobj_retries; + __entry->txn_id = txn_id; + __entry->pid = pid; + __entry->tgid = tgid; + __entry->server_state = server_state; + __entry->server_id = server_id; + __entry->cb_reqs_inflight = cb_reqs_inflight; + ), + TP_printk("tzhandle=0x%08x, retries=%d, txn_id=%d, pid %x,tid %x, srvr state=%d, server_id=0x%x, cb_reqs_inflight=%d", + __entry->tzhandle, __entry->cbobj_retries, __entry->txn_id, + __entry->pid, __entry->tgid, __entry->server_state, + __entry->server_id, __entry->cb_reqs_inflight) +); + +TRACE_EVENT(process_tzcb_req_result, + TP_PROTO(int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts, + unsigned int cb_reqs_inflight), + TP_ARGS(result, tzhandle, op, counts, cb_reqs_inflight), + TP_STRUCT__entry( + __field(int32_t, result) + __field(uint32_t, tzhandle) + __field(uint32_t, op) + __field(uint32_t, counts) + __field(unsigned int, cb_reqs_inflight) + ), + TP_fast_assign( + __entry->result = result; + __entry->tzhandle = tzhandle; + __entry->op = op; + __entry->counts = counts; + __entry->cb_reqs_inflight = cb_reqs_inflight; + ), + TP_printk("result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x, cb_reqs_inflight=%d", + __entry->result, __entry->tzhandle, __entry->op, __entry->counts, + __entry->cb_reqs_inflight) +); + +TRACE_EVENT(marshal_out_invoke_req, + TP_PROTO(int i, uint32_t tzhandle, uint16_t server, uint32_t fd), + TP_ARGS(i, tzhandle, server, fd), + TP_STRUCT__entry( + __field(int, i) + __field(uint32_t, tzhandle) + __field(uint16_t, server) + __field(uint32_t, fd) + ), + TP_fast_assign( + __entry->i = i; + __entry->tzhandle = tzhandle; + __entry->server = server; + __entry->fd = fd; + ), + TP_printk("OO[%d]: tzhandle=0x%x server=0x%x fd=0x%x", + __entry->i, __entry->tzhandle, __entry->server, __entry->fd) +); + +TRACE_EVENT(prepare_send_scm_msg, + TP_PROTO(uint64_t response_type, int32_t result), + TP_ARGS(response_type, result), + TP_STRUCT__entry( + __field(uint64_t, response_type) + __field(int32_t, result) + ), + TP_fast_assign( + __entry->response_type = response_type; + __entry->result = result; + ), + TP_printk("response_type=0x%lx (%ld), result=0x%x (%d)", + __entry->response_type, __entry->response_type, + __entry->result, __entry->result) +); + +TRACE_EVENT(marshal_in_invoke_req, + TP_PROTO(int i, int64_t fd, int32_t cb_server_fd, uint32_t tzhandle), + TP_ARGS(i, fd, cb_server_fd, tzhandle), + TP_STRUCT__entry( + __field(int, i) + __field(int64_t, fd) + __field(int32_t, cb_server_fd) + __field(uint32_t, tzhandle) + ), + TP_fast_assign( + __entry->i = i; + __entry->fd = fd; + __entry->cb_server_fd = cb_server_fd; + __entry->tzhandle = tzhandle; + ), + TP_printk("OI[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x", + __entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle) +); + +TRACE_EVENT(marshal_in_tzcb_req_handle, + TP_PROTO(uint32_t tzhandle, int srvr_id, int32_t cbobj_id, uint32_t op, uint32_t counts), + TP_ARGS(tzhandle, srvr_id, cbobj_id, op, counts), + TP_STRUCT__entry( + __field(uint32_t, tzhandle) + __field(int, srvr_id) + __field(int32_t, cbobj_id) + __field(uint32_t, op) + __field(uint32_t, counts) + ), + TP_fast_assign( + __entry->tzhandle = tzhandle; + __entry->srvr_id = srvr_id; + __entry->cbobj_id = cbobj_id; + __entry->op = op; + __entry->counts = counts; + ), + TP_printk("tzhandle=0x%x srvr_id=0x%x cbobj_id=0x%08x op=0x%02x counts=0x%04x", + __entry->tzhandle, __entry->srvr_id, __entry->cbobj_id, + __entry->op, __entry->counts) +); + +TRACE_EVENT(marshal_in_tzcb_req_fd, + TP_PROTO(int i, uint32_t tzhandle, int srvr_id, int32_t fd), + TP_ARGS(i, tzhandle, srvr_id, fd), + TP_STRUCT__entry( + __field(int, i) + __field(uint32_t, tzhandle) + __field(int, srvr_id) + __field(int32_t, fd) + ), + TP_fast_assign( + __entry->i = i; + __entry->tzhandle = tzhandle; + __entry->srvr_id = srvr_id; + __entry->fd = fd; + ), + TP_printk("OI[%d]: tzhandle=0x%x srvr_id=0x%x fd=0x%x", + __entry->i, __entry->tzhandle, __entry->srvr_id, __entry->fd) +); + +TRACE_EVENT(marshal_out_tzcb_req, + TP_PROTO(uint32_t i, int32_t fd, int32_t cb_server_fd, uint32_t tzhandle), + TP_ARGS(i, fd, cb_server_fd, tzhandle), + TP_STRUCT__entry( + __field(int, i) + __field(int32_t, fd) + __field(int32_t, cb_server_fd) + __field(uint32_t, tzhandle) + ), + TP_fast_assign( + __entry->i = i; + __entry->fd = fd; + __entry->cb_server_fd = cb_server_fd; + __entry->tzhandle = tzhandle; + ), + TP_printk("OO[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x", + __entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle) +); + +TRACE_EVENT(process_invoke_req_tzhandle, + TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts), + TP_ARGS(tzhandle, op, counts), + TP_STRUCT__entry( + __field(uint32_t, tzhandle) + __field(uint32_t, op) + __field(uint32_t, counts) + ), + TP_fast_assign( + __entry->tzhandle = tzhandle; + __entry->op = op; + __entry->counts = counts; + ), + TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x", + __entry->tzhandle, __entry->op, __entry->counts) +); + +TRACE_EVENT(process_invoke_req_result, + TP_PROTO(int ret, int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts), + TP_ARGS(ret, result, tzhandle, op, counts), + TP_STRUCT__entry( + __field(int, ret) + __field(int32_t, result) + __field(uint32_t, tzhandle) + __field(uint32_t, op) + __field(uint32_t, counts) + ), + TP_fast_assign( + __entry->ret = ret; + __entry->result = result; + __entry->tzhandle = tzhandle; + __entry->op = op; + __entry->counts = counts; + ), + TP_printk("ret=%d result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x", + __entry->ret, __entry->result, __entry->tzhandle, + __entry->op, __entry->counts) +); + +TRACE_EVENT(process_log_info, + TP_PROTO(char *buf, uint32_t context_type, uint32_t tzhandle), + TP_ARGS(buf, context_type, tzhandle), + TP_STRUCT__entry( + __string(str, buf) + __field(uint32_t, context_type) + __field(uint32_t, tzhandle) + ), + TP_fast_assign( + __assign_str(str, buf); + __entry->context_type = context_type; + __entry->tzhandle = tzhandle; + ), + TP_printk("%s context_type=%d tzhandle=0x%08x", + __get_str(str), + __entry->context_type, __entry->tzhandle) +); + +TRACE_EVENT_CONDITION(smcinvoke_ioctl, + TP_PROTO(unsigned int cmd, long ret), + TP_ARGS(cmd, ret), + TP_CONDITION(ret), + TP_STRUCT__entry( + __field(unsigned int, cmd) + __field(long, ret) + ), + TP_fast_assign( + __entry->cmd = cmd; + __entry->ret = ret; + ), + TP_printk("cmd=%s ret=%ld", + __print_symbolic(__entry->cmd, + {SMCINVOKE_IOCTL_INVOKE_REQ, "SMCINVOKE_IOCTL_INVOKE_REQ"}, + {SMCINVOKE_IOCTL_ACCEPT_REQ, "SMCINVOKE_IOCTL_ACCEPT_REQ"}, + {SMCINVOKE_IOCTL_SERVER_REQ, "SMCINVOKE_IOCTL_SERVER_REQ"}, + {SMCINVOKE_IOCTL_ACK_LOCAL_OBJ, "SMCINVOKE_IOCTL_ACK_LOCAL_OBJ"}, + {SMCINVOKE_IOCTL_LOG, "SMCINVOKE_IOCTL_LOG"} + ), __entry->ret) +); + +TRACE_EVENT(smcinvoke_create_bridge, + TP_PROTO(uint64_t shmbridge_handle, uint16_t mem_region_id), + TP_ARGS(shmbridge_handle, mem_region_id), + TP_STRUCT__entry( + __field(uint64_t, shmbridge_handle) + __field(uint16_t, mem_region_id) + ), + TP_fast_assign( + __entry->shmbridge_handle = shmbridge_handle; + __entry->mem_region_id = mem_region_id; + ), + TP_printk("created shm bridge handle %llu for mem_region_id %u", + __entry->shmbridge_handle, __entry->mem_region_id) +); + +TRACE_EVENT(status, + TP_PROTO(const char *func, const char *status), + TP_ARGS(func, status), + TP_STRUCT__entry( + __string(str, func) + __string(str2, status) + ), + TP_fast_assign( + __assign_str(str, func); + __assign_str(str2, status); + ), + TP_printk("%s status=%s", __get_str(str), __get_str(str2)) +); + +TRACE_EVENT(process_accept_req_has_response, + TP_PROTO(pid_t pid, pid_t tgid), + TP_ARGS(pid, tgid), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(pid_t, tgid) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->tgid = tgid; + ), + TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid) +); + +TRACE_EVENT(process_accept_req_ret, + TP_PROTO(pid_t pid, pid_t tgid, int ret), + TP_ARGS(pid, tgid, ret), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(pid_t, tgid) + __field(int, ret) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->tgid = tgid; + __entry->ret = ret; + ), + TP_printk("pid=0x%x tgid=0x%x ret=%d", __entry->pid, __entry->tgid, __entry->ret) +); + +TRACE_EVENT(process_accept_req_placed, + TP_PROTO(pid_t pid, pid_t tgid), + TP_ARGS(pid, tgid), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(pid_t, tgid) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->tgid = tgid; + ), + TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid) +); + +TRACE_EVENT(process_invoke_request_from_kernel_client, + TP_PROTO(int fd, struct file *filp, int f_count), + TP_ARGS(fd, filp, f_count), + TP_STRUCT__entry( + __field(int, fd) + __field(struct file*, filp) + __field(int, f_count) + ), + TP_fast_assign( + __entry->fd = fd; + __entry->filp = filp; + __entry->f_count = f_count; + ), + TP_printk("fd=%d, filp=%p, f_count=%d", + __entry->fd, + __entry->filp, + __entry->f_count) +); + +TRACE_EVENT(smcinvoke_release_filp, + TP_PROTO(struct files_struct *files, struct file *filp, + int f_count, uint32_t context_type), + TP_ARGS(files, filp, f_count, context_type), + TP_STRUCT__entry( + __field(struct files_struct*, files) + __field(struct file*, filp) + __field(int, f_count) + __field(uint32_t, context_type) + ), + TP_fast_assign( + __entry->files = files; + __entry->filp = filp; + __entry->f_count = f_count; + __entry->context_type = context_type; + ), + TP_printk("files=%p, filp=%p, f_count=%u, cxt_type=%d", + __entry->files, + __entry->filp, + __entry->f_count, + __entry->context_type) +); + +TRACE_EVENT(smcinvoke_release_from_kernel_client, + TP_PROTO(struct files_struct *files, struct file *filp, int f_count), + TP_ARGS(files, filp, f_count), + TP_STRUCT__entry( + __field(struct files_struct*, files) + __field(struct file*, filp) + __field(int, f_count) + ), + TP_fast_assign( + __entry->files = files; + __entry->filp = filp; + __entry->f_count = f_count; + ), + TP_printk("files=%p, filp=%p, f_count=%u", + __entry->files, + __entry->filp, + __entry->f_count) +); + +TRACE_EVENT(smcinvoke_release, + TP_PROTO(struct files_struct *files, struct file *filp, + int f_count, void *private_data), + TP_ARGS(files, filp, f_count, private_data), + TP_STRUCT__entry( + __field(struct files_struct*, files) + __field(struct file*, filp) + __field(int, f_count) + __field(void*, private_data) + ), + TP_fast_assign( + __entry->files = files; + __entry->filp = filp; + __entry->f_count = f_count; + __entry->private_data = private_data; + ), + TP_printk("files=%p, filp=%p, f_count=%d, private_data=%p", + __entry->files, + __entry->filp, + __entry->f_count, + __entry->private_data) +); + +#endif /* _TRACE_SMCINVOKE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace_smcinvoke + +/* This part must be outside protection */ +#include diff --git a/ssg_kernel_headers.py b/ssg_kernel_headers.py new file mode 100644 index 0000000000..2285c65cbb --- /dev/null +++ b/ssg_kernel_headers.py @@ -0,0 +1,96 @@ +# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 as published by +# the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program. If not, see . + +import argparse +import filecmp +import os +import re +import subprocess +import sys + +def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): + if not h.startswith(prefix): + print('error: expected prefix [%s] on header [%s]' % (prefix, h)) + return False + + out_h = os.path.join(gen_dir, h[len(prefix):]) + (out_h_dirname, out_h_basename) = os.path.split(out_h) + env = os.environ.copy() + env["LOC_UNIFDEF"] = unifdef + cmd = ["sh", headers_install, h, out_h] + + if verbose: + print('run_headers_install: cmd is %s' % cmd) + + result = subprocess.call(cmd, env=env) + + if result != 0: + print('error: run_headers_install: cmd %s failed %d' % (cmd, result)) + return False + return True + +def gen_audio_headers(verbose, gen_dir, headers_install, unifdef, audio_include_uapi): + error_count = 0 + for h in audio_include_uapi: + audio_uapi_include_prefix = os.path.join(h.split('/include/uapi/')[0], + 'include', + 'uapi', + 'audio') + os.sep + + if not run_headers_install( + verbose, gen_dir, headers_install, unifdef, + audio_uapi_include_prefix, h): error_count += 1 + return error_count + +def main(): + """Parse command line arguments and perform top level control.""" + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + # Arguments that apply to every invocation of this script. + parser.add_argument( + '--verbose', action='store_true', + help='Print output that describes the workings of this script.') + parser.add_argument( + '--header_arch', required=True, + help='The arch for which to generate headers.') + parser.add_argument( + '--gen_dir', required=True, + help='Where to place the generated files.') + parser.add_argument( + '--audio_include_uapi', required=True, nargs='*', + help='The list of techpack/*/include/uapi header files.') + parser.add_argument( + '--headers_install', required=True, + help='The headers_install tool to process input headers.') + parser.add_argument( + '--unifdef', + required=True, + help='The unifdef tool used by headers_install.') + + args = parser.parse_args() + + if args.verbose: + print('header_arch [%s]' % args.header_arch) + print('gen_dir [%s]' % args.gen_dir) + print('audio_include_uapi [%s]' % args.audio_include_uapi) + print('headers_install [%s]' % args.headers_install) + print('unifdef [%s]' % args.unifdef) + + return gen_audio_headers(args.verbose, args.gen_dir, + args.headers_install, args.unifdef, args.audio_include_uapi) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/tz_log/tz_log.c b/tz_log/tz_log.c new file mode 100644 index 0000000000..0e937ec9ae --- /dev/null +++ b/tz_log/tz_log.c @@ -0,0 +1,1689 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* QSEE_LOG_BUF_SIZE = 32K */ +#define QSEE_LOG_BUF_SIZE 0x8000 + +/* enlarged qsee log buf size is 128K by default */ +#define QSEE_LOG_BUF_SIZE_V2 0x20000 + +/* TZ Diagnostic Area legacy version number */ +#define TZBSP_DIAG_MAJOR_VERSION_LEGACY 2 + +/* TZ Diagnostic Area version number */ +#define TZBSP_FVER_MAJOR_MINOR_MASK 0x3FF /* 10 bits */ +#define TZBSP_FVER_MAJOR_SHIFT 22 +#define TZBSP_FVER_MINOR_SHIFT 12 +#define TZBSP_DIAG_MAJOR_VERSION_V9 9 +#define TZBSP_DIAG_MINOR_VERSION_V2 2 +#define TZBSP_DIAG_MINOR_VERSION_V21 3 +#define TZBSP_DIAG_MINOR_VERSION_V22 4 + +/* TZ Diag Feature Version Id */ +#define QCOM_SCM_FEAT_DIAG_ID 0x06 + +/* + * Preprocessor Definitions and Constants + */ +#define TZBSP_MAX_CPU_COUNT 0x08 +/* + * Number of VMID Tables + */ +#define TZBSP_DIAG_NUM_OF_VMID 16 +/* + * VMID Description length + */ +#define TZBSP_DIAG_VMID_DESC_LEN 7 +/* + * Number of Interrupts + */ +#define TZBSP_DIAG_INT_NUM 32 +/* + * Length of descriptive name associated with Interrupt + */ +#define TZBSP_MAX_INT_DESC 16 +/* + * TZ 3.X version info + */ +#define QSEE_VERSION_TZ_3_X 0x800000 +/* + * TZ 4.X version info + */ +#define QSEE_VERSION_TZ_4_X 0x1000000 + +#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256 +#define TZBSP_NONCE_LEN 12 +#define TZBSP_TAG_LEN 16 + +#define ENCRYPTED_TZ_LOG_ID 0 +#define ENCRYPTED_QSEE_LOG_ID 1 + +/* + * Directory for TZ DBG logs + */ +#define TZDBG_DIR_NAME "tzdbg" + +/* + * VMID Table + */ +struct tzdbg_vmid_t { + uint8_t vmid; /* Virtual Machine Identifier */ + uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN]; /* ASCII Text */ +}; +/* + * Boot Info Table + */ +struct tzdbg_boot_info_t { + uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */ + uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */ + uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */ + uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */ + uint32_t warm_jmp_addr; /* Last Warmboot Jump Address */ + uint32_t spare; /* Reserved for future use. */ +}; +/* + * Boot Info Table for 64-bit + */ +struct tzdbg_boot_info64_t { + uint32_t wb_entry_cnt; /* Warmboot entry CPU Counter */ + uint32_t wb_exit_cnt; /* Warmboot exit CPU Counter */ + uint32_t pc_entry_cnt; /* Power Collapse entry CPU Counter */ + uint32_t pc_exit_cnt; /* Power Collapse exit CPU counter */ + uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */ + uint32_t psci_exit_cnt; /* PSCI syscall exit CPU Counter */ + uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */ + uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */ +}; +/* + * Reset Info Table + */ +struct tzdbg_reset_info_t { + uint32_t reset_type; /* Reset Reason */ + uint32_t reset_cnt; /* Number of resets occurred/CPU */ +}; +/* + * Interrupt Info Table + */ +struct tzdbg_int_t { + /* + * Type of Interrupt/exception + */ + uint16_t int_info; + /* + * Availability of the slot + */ + uint8_t avail; + /* + * Reserved for future use + */ + uint8_t spare; + /* + * Interrupt # for IRQ and FIQ + */ + uint32_t int_num; + /* + * ASCII text describing type of interrupt e.g: + * Secure Timer, EBI XPU. This string is always null terminated, + * supporting at most TZBSP_MAX_INT_DESC characters. + * Any additional characters are truncated. + */ + uint8_t int_desc[TZBSP_MAX_INT_DESC]; + uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */ +}; + +/* + * Interrupt Info Table used in tz version >=4.X + */ +struct tzdbg_int_t_tz40 { + uint16_t int_info; + uint8_t avail; + uint8_t spare; + uint32_t int_num; + uint8_t int_desc[TZBSP_MAX_INT_DESC]; + uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/ +}; + +/* warm boot reason for cores */ +struct tzbsp_diag_wakeup_info_t { + /* Wake source info : APCS_GICC_HPPIR */ + uint32_t HPPIR; + /* Wake source info : APCS_GICC_AHPPIR */ + uint32_t AHPPIR; +}; + +/* + * Log ring buffer position + */ +struct tzdbg_log_pos_t { + uint16_t wrap; + uint16_t offset; +}; + +struct tzdbg_log_pos_v2_t { + uint32_t wrap; + uint32_t offset; +}; + + /* + * Log ring buffer + */ +struct tzdbg_log_t { + struct tzdbg_log_pos_t log_pos; + /* open ended array to the end of the 4K IMEM buffer */ + uint8_t log_buf[]; +}; + +struct tzdbg_log_v2_t { + struct tzdbg_log_pos_v2_t log_pos; + /* open ended array to the end of the 4K IMEM buffer */ + uint8_t log_buf[]; +}; + +struct tzbsp_encr_info_for_log_chunk_t { + uint32_t size_to_encr; + uint8_t nonce[TZBSP_NONCE_LEN]; + uint8_t tag[TZBSP_TAG_LEN]; +}; + +/* + * Only `ENTIRE_LOG` will be used unless the + * "OEM_tz_num_of_diag_log_chunks_to_encr" devcfg field >= 2. + * If this is true, the diag log will be encrypted in two + * separate chunks: a smaller chunk containing only error + * fatal logs and a bigger "rest of the log" chunk. In this + * case, `ERR_FATAL_LOG_CHUNK` and `BIG_LOG_CHUNK` will be + * used instead of `ENTIRE_LOG`. + */ +enum tzbsp_encr_info_for_log_chunks_idx_t { + BIG_LOG_CHUNK = 0, + ENTIRE_LOG = 1, + ERR_FATAL_LOG_CHUNK = 1, + MAX_NUM_OF_CHUNKS, +}; + +struct tzbsp_encr_info_t { + uint32_t num_of_chunks; + struct tzbsp_encr_info_for_log_chunk_t chunks[MAX_NUM_OF_CHUNKS]; + uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE]; +}; + +/* + * Diagnostic Table + * Note: This is the reference data structure for tz diagnostic table + * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly + * copied into buffer from i/o memory. + */ +struct tzdbg_t { + uint32_t magic_num; + uint32_t version; + /* + * Number of CPU's + */ + uint32_t cpu_count; + /* + * Offset of VMID Table + */ + uint32_t vmid_info_off; + /* + * Offset of Boot Table + */ + uint32_t boot_info_off; + /* + * Offset of Reset info Table + */ + uint32_t reset_info_off; + /* + * Offset of Interrupt info Table + */ + uint32_t int_info_off; + /* + * Ring Buffer Offset + */ + uint32_t ring_off; + /* + * Ring Buffer Length + */ + uint32_t ring_len; + + /* Offset for Wakeup info */ + uint32_t wakeup_info_off; + + union { + /* The elements in below structure have to be used for TZ where + * diag version = TZBSP_DIAG_MINOR_VERSION_V2 + */ + struct { + + /* + * VMID to EE Mapping + */ + struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID]; + /* + * Boot Info + */ + struct tzdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT]; + /* + * Reset Info + */ + struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT]; + uint32_t num_interrupts; + struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM]; + /* Wake up info */ + struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT]; + + uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE]; + + uint8_t nonce[TZBSP_NONCE_LEN]; + + uint8_t tag[TZBSP_TAG_LEN]; + }; + /* The elements in below structure have to be used for TZ where + * diag version = TZBSP_DIAG_MINOR_VERSION_V21 + */ + struct { + + uint32_t encr_info_for_log_off; + + /* + * VMID to EE Mapping + */ + struct tzdbg_vmid_t vmid_info_v2[TZBSP_DIAG_NUM_OF_VMID]; + /* + * Boot Info + */ + struct tzdbg_boot_info_t boot_info_v2[TZBSP_MAX_CPU_COUNT]; + /* + * Reset Info + */ + struct tzdbg_reset_info_t reset_info_v2[TZBSP_MAX_CPU_COUNT]; + uint32_t num_interrupts_v2; + struct tzdbg_int_t int_info_v2[TZBSP_DIAG_INT_NUM]; + + /* Wake up info */ + struct tzbsp_diag_wakeup_info_t wakeup_info_v2[TZBSP_MAX_CPU_COUNT]; + + struct tzbsp_encr_info_t encr_info_for_log; + }; + }; + + /* + * We need at least 2K for the ring buffer + */ + struct tzdbg_log_t ring_buffer; /* TZ Ring Buffer */ +}; + +struct hypdbg_log_pos_t { + uint16_t wrap; + uint16_t offset; +}; + +struct hypdbg_boot_info_t { + uint32_t warm_entry_cnt; + uint32_t warm_exit_cnt; +}; + +struct hypdbg_t { + /* Magic Number */ + uint32_t magic_num; + + /* Number of CPU's */ + uint32_t cpu_count; + + /* Ring Buffer Offset */ + uint32_t ring_off; + + /* Ring buffer position mgmt */ + struct hypdbg_log_pos_t log_pos; + uint32_t log_len; + + /* S2 fault numbers */ + uint32_t s2_fault_counter; + + /* Boot Info */ + struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT]; + + /* Ring buffer pointer */ + uint8_t log_buf_p[]; +}; + +/* + * Enumeration order for VMID's + */ +enum tzdbg_stats_type { + TZDBG_BOOT = 0, + TZDBG_RESET, + TZDBG_INTERRUPT, + TZDBG_VMID, + TZDBG_GENERAL, + TZDBG_LOG, + TZDBG_QSEE_LOG, + TZDBG_HYP_GENERAL, + TZDBG_HYP_LOG, + TZDBG_STATS_MAX +}; + +struct tzdbg_stat { + size_t display_len; + size_t display_offset; + char *name; + char *data; +}; + +struct tzdbg { + void __iomem *virt_iobase; + void __iomem *hyp_virt_iobase; + struct tzdbg_t *diag_buf; + struct hypdbg_t *hyp_diag_buf; + char *disp_buf; + int debug_tz[TZDBG_STATS_MAX]; + struct tzdbg_stat stat[TZDBG_STATS_MAX]; + uint32_t hyp_debug_rw_buf_size; + bool is_hyplog_enabled; + uint32_t tz_version; + bool is_encrypted_log_enabled; + bool is_enlarged_buf; + bool is_full_encrypted_tz_logs_supported; + bool is_full_encrypted_tz_logs_enabled; + int tz_diag_minor_version; + int tz_diag_major_version; +}; + +struct tzbsp_encr_log_t { + /* Magic Number */ + uint32_t magic_num; + /* version NUMBER */ + uint32_t version; + /* encrypted log size */ + uint32_t encr_log_buff_size; + /* Wrap value*/ + uint16_t wrap_count; + /* AES encryption key wrapped up with oem public key*/ + uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE]; + /* Nonce used for encryption*/ + uint8_t nonce[TZBSP_NONCE_LEN]; + /* Tag to be used for Validation */ + uint8_t tag[TZBSP_TAG_LEN]; + /* Encrypted log buffer */ + uint8_t log_buf[1]; +}; + +struct encrypted_log_info { + phys_addr_t paddr; + void *vaddr; + size_t size; + uint64_t shmb_handle; +}; + +static struct tzdbg tzdbg = { + .stat[TZDBG_BOOT].name = "boot", + .stat[TZDBG_RESET].name = "reset", + .stat[TZDBG_INTERRUPT].name = "interrupt", + .stat[TZDBG_VMID].name = "vmid", + .stat[TZDBG_GENERAL].name = "general", + .stat[TZDBG_LOG].name = "log", + .stat[TZDBG_QSEE_LOG].name = "qsee_log", + .stat[TZDBG_HYP_GENERAL].name = "hyp_general", + .stat[TZDBG_HYP_LOG].name = "hyp_log", +}; + +static struct tzdbg_log_t *g_qsee_log; +static struct tzdbg_log_v2_t *g_qsee_log_v2; +static dma_addr_t coh_pmem; +static uint32_t debug_rw_buf_size; +static uint32_t display_buf_size; +static uint32_t qseelog_buf_size; +static phys_addr_t disp_buf_paddr; + +static uint64_t qseelog_shmbridge_handle; +static struct encrypted_log_info enc_qseelog_info; +static struct encrypted_log_info enc_tzlog_info; + +/* + * Debugfs data structure and functions + */ + +static int _disp_tz_general_stats(void) +{ + int len = 0; + + len += scnprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1, + " Version : 0x%x\n" + " Magic Number : 0x%x\n" + " Number of CPU : %d\n", + tzdbg.diag_buf->version, + tzdbg.diag_buf->magic_num, + tzdbg.diag_buf->cpu_count); + tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_vmid_stats(void) +{ + int i, num_vmid; + int len = 0; + struct tzdbg_vmid_t *ptr; + + ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->vmid_info_off); + num_vmid = ((tzdbg.diag_buf->boot_info_off - + tzdbg.diag_buf->vmid_info_off)/ + (sizeof(struct tzdbg_vmid_t))); + + for (i = 0; i < num_vmid; i++) { + if (ptr->vmid < 0xFF) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " 0x%x %s\n", + (uint32_t)ptr->vmid, (uint8_t *)ptr->desc); + } + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into the buffer\n", + __func__); + break; + } + ptr++; + } + + tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_boot_stats(void) +{ + int i; + int len = 0; + struct tzdbg_boot_info_t *ptr = NULL; + struct tzdbg_boot_info64_t *ptr_64 = NULL; + + pr_info("qsee_version = 0x%x\n", tzdbg.tz_version); + if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) { + ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *) + tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off); + } else { + ptr = (struct tzdbg_boot_info_t *)((unsigned char *) + tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off); + } + + for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) { + if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " CPU #: %d\n" + " Warmboot jump address : 0x%llx\n" + " Warmboot entry CPU counter : 0x%x\n" + " Warmboot exit CPU counter : 0x%x\n" + " Power Collapse entry CPU counter : 0x%x\n" + " Power Collapse exit CPU counter : 0x%x\n" + " Psci entry CPU counter : 0x%x\n" + " Psci exit CPU counter : 0x%x\n" + " Warmboot Jump Address Instruction : 0x%x\n", + i, (uint64_t)ptr_64->warm_jmp_addr, + ptr_64->wb_entry_cnt, + ptr_64->wb_exit_cnt, + ptr_64->pc_entry_cnt, + ptr_64->pc_exit_cnt, + ptr_64->psci_entry_cnt, + ptr_64->psci_exit_cnt, + ptr_64->warm_jmp_instr); + + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into the buffer\n", + __func__); + break; + } + ptr_64++; + } else { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " CPU #: %d\n" + " Warmboot jump address : 0x%x\n" + " Warmboot entry CPU counter: 0x%x\n" + " Warmboot exit CPU counter : 0x%x\n" + " Power Collapse entry CPU counter: 0x%x\n" + " Power Collapse exit CPU counter : 0x%x\n", + i, ptr->warm_jmp_addr, + ptr->wb_entry_cnt, + ptr->wb_exit_cnt, + ptr->pc_entry_cnt, + ptr->pc_exit_cnt); + + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into the buffer\n", + __func__); + break; + } + ptr++; + } + } + tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_reset_stats(void) +{ + int i; + int len = 0; + struct tzdbg_reset_info_t *ptr; + + ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->reset_info_off); + + for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " CPU #: %d\n" + " Reset Type (reason) : 0x%x\n" + " Reset counter : 0x%x\n", + i, ptr->reset_type, ptr->reset_cnt); + + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into the buffer\n", + __func__); + break; + } + + ptr++; + } + tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_interrupt_stats(void) +{ + int i, j; + int len = 0; + int *num_int; + void *ptr; + struct tzdbg_int_t *tzdbg_ptr; + struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40; + + num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf + + (tzdbg.diag_buf->int_info_off - sizeof(uint32_t))); + ptr = ((unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->int_info_off); + + pr_info("qsee_version = 0x%x\n", tzdbg.tz_version); + + if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) { + tzdbg_ptr = ptr; + for (i = 0; i < (*num_int); i++) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " Interrupt Number : 0x%x\n" + " Type of Interrupt : 0x%x\n" + " Description of interrupt : %s\n", + tzdbg_ptr->int_num, + (uint32_t)tzdbg_ptr->int_info, + (uint8_t *)tzdbg_ptr->int_desc); + for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " int_count on CPU # %d : %u\n", + (uint32_t)j, + (uint32_t)tzdbg_ptr->int_count[j]); + } + len += scnprintf(tzdbg.disp_buf + len, + debug_rw_buf_size - 1, "\n"); + + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into buf\n", + __func__); + break; + } + tzdbg_ptr++; + } + } else { + tzdbg_ptr_tz40 = ptr; + for (i = 0; i < (*num_int); i++) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " Interrupt Number : 0x%x\n" + " Type of Interrupt : 0x%x\n" + " Description of interrupt : %s\n", + tzdbg_ptr_tz40->int_num, + (uint32_t)tzdbg_ptr_tz40->int_info, + (uint8_t *)tzdbg_ptr_tz40->int_desc); + for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) { + len += scnprintf(tzdbg.disp_buf + len, + (debug_rw_buf_size - 1) - len, + " int_count on CPU # %d : %u\n", + (uint32_t)j, + (uint32_t)tzdbg_ptr_tz40->int_count[j]); + } + len += scnprintf(tzdbg.disp_buf + len, + debug_rw_buf_size - 1, "\n"); + + if (len > (debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into buf\n", + __func__); + break; + } + tzdbg_ptr_tz40++; + } + } + + tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_log_stats_legacy(void) +{ + int len = 0; + unsigned char *ptr; + + ptr = (unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->ring_off; + len += scnprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len, + "%s\n", ptr); + + tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf; + return len; +} + +static int _disp_log_stats(struct tzdbg_log_t *log, + struct tzdbg_log_pos_t *log_start, uint32_t log_len, + size_t count, uint32_t buf_idx) +{ + uint32_t wrap_start; + uint32_t wrap_end; + uint32_t wrap_cnt; + int max_len; + int len = 0; + int i = 0; + + wrap_start = log_start->wrap; + wrap_end = log->log_pos.wrap; + + /* Calculate difference in # of buffer wrap-arounds */ + if (wrap_end >= wrap_start) + wrap_cnt = wrap_end - wrap_start; + else { + /* wrap counter has wrapped around, invalidate start position */ + wrap_cnt = 2; + } + + if (wrap_cnt > 1) { + /* end position has wrapped around more than once, */ + /* current start no longer valid */ + log_start->wrap = log->log_pos.wrap - 1; + log_start->offset = (log->log_pos.offset + 1) % log_len; + } else if ((wrap_cnt == 1) && + (log->log_pos.offset > log_start->offset)) { + /* end position has overwritten start */ + log_start->offset = (log->log_pos.offset + 1) % log_len; + } + + pr_debug("diag_buf wrap = %u, offset = %u\n", + log->log_pos.wrap, log->log_pos.offset); + while (log_start->offset == log->log_pos.offset) { + /* + * No data in ring buffer, + * so we'll hang around until something happens + */ + unsigned long t = msleep_interruptible(50); + + if (t != 0) { + /* Some event woke us up, so let's quit */ + return 0; +} + + if (buf_idx == TZDBG_LOG) + memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase, + debug_rw_buf_size); + + } + + max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count; + + pr_debug("diag_buf wrap = %u, offset = %u\n", + log->log_pos.wrap, log->log_pos.offset); + /* + * Read from ring buff while there is data and space in return buff + */ + while ((log_start->offset != log->log_pos.offset) && (len < max_len)) { + tzdbg.disp_buf[i++] = log->log_buf[log_start->offset]; + log_start->offset = (log_start->offset + 1) % log_len; + if (log_start->offset == 0) + ++log_start->wrap; + ++len; + } + + /* + * return buffer to caller + */ + tzdbg.stat[buf_idx].data = tzdbg.disp_buf; + return len; +} + +static int _disp_log_stats_v2(struct tzdbg_log_v2_t *log, + struct tzdbg_log_pos_v2_t *log_start, uint32_t log_len, + size_t count, uint32_t buf_idx) +{ + uint32_t wrap_start; + uint32_t wrap_end; + uint32_t wrap_cnt; + int max_len; + int len = 0; + int i = 0; + + wrap_start = log_start->wrap; + wrap_end = log->log_pos.wrap; + + /* Calculate difference in # of buffer wrap-arounds */ + if (wrap_end >= wrap_start) + wrap_cnt = wrap_end - wrap_start; + else { + /* wrap counter has wrapped around, invalidate start position */ + wrap_cnt = 2; +} + + if (wrap_cnt > 1) { + /* end position has wrapped around more than once, */ + /* current start no longer valid */ + log_start->wrap = log->log_pos.wrap - 1; + log_start->offset = (log->log_pos.offset + 1) % log_len; + } else if ((wrap_cnt == 1) && + (log->log_pos.offset > log_start->offset)) { + /* end position has overwritten start */ + log_start->offset = (log->log_pos.offset + 1) % log_len; + } + pr_debug("diag_buf wrap = %u, offset = %u\n", + log->log_pos.wrap, log->log_pos.offset); + + while (log_start->offset == log->log_pos.offset) { + /* + * No data in ring buffer, + * so we'll hang around until something happens + */ + unsigned long t = msleep_interruptible(50); + + if (t != 0) { + /* Some event woke us up, so let's quit */ + return 0; + } + + if (buf_idx == TZDBG_LOG) + memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase, + debug_rw_buf_size); + + } + + max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count; + + pr_debug("diag_buf wrap = %u, offset = %u\n", + log->log_pos.wrap, log->log_pos.offset); + + /* + * Read from ring buff while there is data and space in return buff + */ + while ((log_start->offset != log->log_pos.offset) && (len < max_len)) { + tzdbg.disp_buf[i++] = log->log_buf[log_start->offset]; + log_start->offset = (log_start->offset + 1) % log_len; + if (log_start->offset == 0) + ++log_start->wrap; + ++len; + } + + /* + * return buffer to caller + */ + tzdbg.stat[buf_idx].data = tzdbg.disp_buf; + return len; +} + +static int __disp_hyp_log_stats(uint8_t *log, + struct hypdbg_log_pos_t *log_start, uint32_t log_len, + size_t count, uint32_t buf_idx) +{ + struct hypdbg_t *hyp = tzdbg.hyp_diag_buf; + unsigned long t = 0; + uint32_t wrap_start; + uint32_t wrap_end; + uint32_t wrap_cnt; + int max_len; + int len = 0; + int i = 0; + + wrap_start = log_start->wrap; + wrap_end = hyp->log_pos.wrap; + + /* Calculate difference in # of buffer wrap-arounds */ + if (wrap_end >= wrap_start) + wrap_cnt = wrap_end - wrap_start; + else { + /* wrap counter has wrapped around, invalidate start position */ + wrap_cnt = 2; + } + + if (wrap_cnt > 1) { + /* end position has wrapped around more than once, */ + /* current start no longer valid */ + log_start->wrap = hyp->log_pos.wrap - 1; + log_start->offset = (hyp->log_pos.offset + 1) % log_len; + } else if ((wrap_cnt == 1) && + (hyp->log_pos.offset > log_start->offset)) { + /* end position has overwritten start */ + log_start->offset = (hyp->log_pos.offset + 1) % log_len; + } + + while (log_start->offset == hyp->log_pos.offset) { + /* + * No data in ring buffer, + * so we'll hang around until something happens + */ + t = msleep_interruptible(50); + if (t != 0) { + /* Some event woke us up, so let's quit */ + return 0; + } + + /* TZDBG_HYP_LOG */ + memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase, + tzdbg.hyp_debug_rw_buf_size); + } + + max_len = (count > tzdbg.hyp_debug_rw_buf_size) ? + tzdbg.hyp_debug_rw_buf_size : count; + + /* + * Read from ring buff while there is data and space in return buff + */ + while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) { + tzdbg.disp_buf[i++] = log[log_start->offset]; + log_start->offset = (log_start->offset + 1) % log_len; + if (log_start->offset == 0) + ++log_start->wrap; + ++len; + } + + /* + * return buffer to caller + */ + tzdbg.stat[buf_idx].data = tzdbg.disp_buf; + return len; +} + +static int print_text(char *intro_message, + unsigned char *text_addr, + unsigned int size, + char *buf, uint32_t buf_len) +{ + unsigned int i; + int len = 0; + + pr_debug("begin address %p, size %d\n", text_addr, size); + len += scnprintf(buf + len, buf_len - len, "%s\n", intro_message); + for (i = 0; i < size; i++) { + if (buf_len <= len + 6) { + pr_err("buffer not enough, buf_len %d, len %d\n", + buf_len, len); + return buf_len; + } + len += scnprintf(buf + len, buf_len - len, "%02hhx ", + text_addr[i]); + if ((i & 0x1f) == 0x1f) + len += scnprintf(buf + len, buf_len - len, "%c", '\n'); + } + len += scnprintf(buf + len, buf_len - len, "%c", '\n'); + return len; +} + +static int _disp_encrpted_log_stats(struct encrypted_log_info *enc_log_info, + enum tzdbg_stats_type type, uint32_t log_id) +{ + int ret = 0, len = 0; + struct tzbsp_encr_log_t *encr_log_head; + uint32_t size = 0; + + if ((!tzdbg.is_full_encrypted_tz_logs_supported) && + (tzdbg.is_full_encrypted_tz_logs_enabled)) + pr_info("TZ not supporting full encrypted log functionality\n"); + ret = qcom_scm_request_encrypted_log(enc_log_info->paddr, + enc_log_info->size, log_id, tzdbg.is_full_encrypted_tz_logs_supported, + tzdbg.is_full_encrypted_tz_logs_enabled); + if (ret) + return 0; + encr_log_head = (struct tzbsp_encr_log_t *)(enc_log_info->vaddr); + pr_debug("display_buf_size = %d, encr_log_buff_size = %d\n", + display_buf_size, encr_log_head->encr_log_buff_size); + size = encr_log_head->encr_log_buff_size; + + len += scnprintf(tzdbg.disp_buf + len, + (display_buf_size - 1) - len, + "\n-------- New Encrypted %s --------\n", + ((log_id == ENCRYPTED_QSEE_LOG_ID) ? + "QSEE Log" : "TZ Dialog")); + + len += scnprintf(tzdbg.disp_buf + len, + (display_buf_size - 1) - len, + "\nMagic_Num :\n0x%x\n" + "\nVerion :\n%d\n" + "\nEncr_Log_Buff_Size :\n%d\n" + "\nWrap_Count :\n%d\n", + encr_log_head->magic_num, + encr_log_head->version, + encr_log_head->encr_log_buff_size, + encr_log_head->wrap_count); + + len += print_text("\nKey : ", encr_log_head->key, + TZBSP_AES_256_ENCRYPTED_KEY_SIZE, + tzdbg.disp_buf + len, display_buf_size); + len += print_text("\nNonce : ", encr_log_head->nonce, + TZBSP_NONCE_LEN, + tzdbg.disp_buf + len, display_buf_size - len); + len += print_text("\nTag : ", encr_log_head->tag, + TZBSP_TAG_LEN, + tzdbg.disp_buf + len, display_buf_size - len); + + if (len > display_buf_size - size) + pr_warn("Cannot fit all info into the buffer\n"); + + pr_debug("encrypted log size %d, disply buffer size %d, used len %d\n", + size, display_buf_size, len); + + len += print_text("\nLog : ", encr_log_head->log_buf, size, + tzdbg.disp_buf + len, display_buf_size - len); + memset(enc_log_info->vaddr, 0, enc_log_info->size); + tzdbg.stat[type].data = tzdbg.disp_buf; + return len; +} + +static int _disp_tz_log_stats(size_t count) +{ + static struct tzdbg_log_pos_v2_t log_start_v2 = {0}; + static struct tzdbg_log_pos_t log_start = {0}; + struct tzdbg_log_v2_t *log_v2_ptr; + struct tzdbg_log_t *log_ptr; + + log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->ring_off - + offsetof(struct tzdbg_log_t, log_buf)); + + log_v2_ptr = (struct tzdbg_log_v2_t *)((unsigned char *)tzdbg.diag_buf + + tzdbg.diag_buf->ring_off - + offsetof(struct tzdbg_log_v2_t, log_buf)); + + if (!tzdbg.is_enlarged_buf) + return _disp_log_stats(log_ptr, &log_start, + tzdbg.diag_buf->ring_len, count, TZDBG_LOG); + + return _disp_log_stats_v2(log_v2_ptr, &log_start_v2, + tzdbg.diag_buf->ring_len, count, TZDBG_LOG); +} + +static int _disp_hyp_log_stats(size_t count) +{ + static struct hypdbg_log_pos_t log_start = {0}; + uint8_t *log_ptr; + uint32_t log_len; + + log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf + + tzdbg.hyp_diag_buf->ring_off); + log_len = tzdbg.hyp_debug_rw_buf_size - tzdbg.hyp_diag_buf->ring_off; + + return __disp_hyp_log_stats(log_ptr, &log_start, + log_len, count, TZDBG_HYP_LOG); +} + +static int _disp_qsee_log_stats(size_t count) +{ + static struct tzdbg_log_pos_t log_start = {0}; + static struct tzdbg_log_pos_v2_t log_start_v2 = {0}; + + if (!tzdbg.is_enlarged_buf) + return _disp_log_stats(g_qsee_log, &log_start, + QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t), + count, TZDBG_QSEE_LOG); + + return _disp_log_stats_v2(g_qsee_log_v2, &log_start_v2, + QSEE_LOG_BUF_SIZE_V2 - sizeof(struct tzdbg_log_pos_v2_t), + count, TZDBG_QSEE_LOG); +} + +static int _disp_hyp_general_stats(size_t count) +{ + int len = 0; + int i; + struct hypdbg_boot_info_t *ptr = NULL; + + len += scnprintf((unsigned char *)tzdbg.disp_buf + len, + tzdbg.hyp_debug_rw_buf_size - 1, + " Magic Number : 0x%x\n" + " CPU Count : 0x%x\n" + " S2 Fault Counter: 0x%x\n", + tzdbg.hyp_diag_buf->magic_num, + tzdbg.hyp_diag_buf->cpu_count, + tzdbg.hyp_diag_buf->s2_fault_counter); + + ptr = tzdbg.hyp_diag_buf->boot_info; + for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) { + len += scnprintf((unsigned char *)tzdbg.disp_buf + len, + (tzdbg.hyp_debug_rw_buf_size - 1) - len, + " CPU #: %d\n" + " Warmboot entry CPU counter: 0x%x\n" + " Warmboot exit CPU counter : 0x%x\n", + i, ptr->warm_entry_cnt, ptr->warm_exit_cnt); + + if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) { + pr_warn("%s: Cannot fit all info into the buffer\n", + __func__); + break; + } + ptr++; + } + + tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf; + return len; +} + +static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf, + size_t count, loff_t *offp) +{ + int len = 0; + + if (tz_id == TZDBG_BOOT || tz_id == TZDBG_RESET || + tz_id == TZDBG_INTERRUPT || tz_id == TZDBG_GENERAL || + tz_id == TZDBG_VMID || tz_id == TZDBG_LOG) + memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase, + debug_rw_buf_size); + + if (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG) + memcpy_fromio((void *)tzdbg.hyp_diag_buf, + tzdbg.hyp_virt_iobase, + tzdbg.hyp_debug_rw_buf_size); + + switch (tz_id) { + case TZDBG_BOOT: + len = _disp_tz_boot_stats(); + break; + case TZDBG_RESET: + len = _disp_tz_reset_stats(); + break; + case TZDBG_INTERRUPT: + len = _disp_tz_interrupt_stats(); + break; + case TZDBG_GENERAL: + len = _disp_tz_general_stats(); + break; + case TZDBG_VMID: + len = _disp_tz_vmid_stats(); + break; + case TZDBG_LOG: + if (TZBSP_DIAG_MAJOR_VERSION_LEGACY < + (tzdbg.diag_buf->version >> 16)) { + len = _disp_tz_log_stats(count); + *offp = 0; + } else { + len = _disp_tz_log_stats_legacy(); + } + break; + case TZDBG_QSEE_LOG: + len = _disp_qsee_log_stats(count); + *offp = 0; + break; + case TZDBG_HYP_GENERAL: + len = _disp_hyp_general_stats(count); + break; + case TZDBG_HYP_LOG: + len = _disp_hyp_log_stats(count); + *offp = 0; + break; + default: + break; + } + + if (len > count) + len = count; + + return simple_read_from_buffer(buf, len, offp, + tzdbg.stat[tz_id].data, len); +} + +static ssize_t tzdbg_fs_read_encrypted(int tz_id, char __user *buf, + size_t count, loff_t *offp) +{ + int len = 0, ret = 0; + struct tzdbg_stat *stat = &(tzdbg.stat[tz_id]); + + pr_debug("%s: tz_id = %d\n", __func__, tz_id); + + if (tz_id >= TZDBG_STATS_MAX) { + pr_err("invalid encrypted log id %d\n", tz_id); + return ret; + } + + if (!stat->display_len) { + if (tz_id == TZDBG_QSEE_LOG) + stat->display_len = _disp_encrpted_log_stats( + &enc_qseelog_info, + tz_id, ENCRYPTED_QSEE_LOG_ID); + else + stat->display_len = _disp_encrpted_log_stats( + &enc_tzlog_info, + tz_id, ENCRYPTED_TZ_LOG_ID); + stat->display_offset = 0; + } + len = stat->display_len; + if (len > count) + len = count; + + *offp = 0; + ret = simple_read_from_buffer(buf, len, offp, + tzdbg.stat[tz_id].data + stat->display_offset, + count); + stat->display_offset += ret; + stat->display_len -= ret; + pr_debug("ret = %d, offset = %d\n", ret, (int)(*offp)); + pr_debug("display_len = %d, offset = %d\n", + stat->display_len, stat->display_offset); + return ret; +} + +static ssize_t tzdbg_fs_read(struct file *file, char __user *buf, + size_t count, loff_t *offp) +{ + struct seq_file *seq = file->private_data; + int tz_id = TZDBG_STATS_MAX; + + if (seq) + tz_id = *(int *)(seq->private); + else { + pr_err("%s: Seq data null unable to proceed\n", __func__); + return 0; + } + + if (!tzdbg.is_encrypted_log_enabled || + (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)) + return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp); + else + return tzdbg_fs_read_encrypted(tz_id, buf, count, offp); +} + +static int tzdbg_procfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, PDE_DATA(inode)); +} + +static int tzdbg_procfs_release(struct inode *inode, struct file *file) +{ + return single_release(inode, file); +} + +struct proc_ops tzdbg_fops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_read = tzdbg_fs_read, + .proc_open = tzdbg_procfs_open, + .proc_release = tzdbg_procfs_release, +}; + +/* + * Allocates log buffer from ION, registers the buffer at TZ + */ +static int tzdbg_register_qsee_log_buf(struct platform_device *pdev) +{ + int ret = 0; + void *buf = NULL; + uint32_t ns_vmids[] = {VMID_HLOS}; + uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE}; + uint32_t ns_vm_nums = 1; + + if (tzdbg.is_enlarged_buf) { + if (of_property_read_u32((&pdev->dev)->of_node, + "qseelog-buf-size-v2", &qseelog_buf_size)) { + pr_debug("Enlarged qseelog buf size isn't defined\n"); + qseelog_buf_size = QSEE_LOG_BUF_SIZE_V2; + } + } else { + qseelog_buf_size = QSEE_LOG_BUF_SIZE; + } + pr_debug("qseelog buf size is 0x%x\n", qseelog_buf_size); + + buf = dma_alloc_coherent(&pdev->dev, + qseelog_buf_size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + if (!tzdbg.is_encrypted_log_enabled) { + ret = qtee_shmbridge_register(coh_pmem, + qseelog_buf_size, ns_vmids, ns_vm_perms, ns_vm_nums, + PERM_READ | PERM_WRITE, + &qseelog_shmbridge_handle); + if (ret) { + pr_err("failed to create bridge for qsee_log buf\n"); + goto exit_free_mem; + } + } + + g_qsee_log = (struct tzdbg_log_t *)buf; + g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0; + + g_qsee_log_v2 = (struct tzdbg_log_v2_t *)buf; + g_qsee_log_v2->log_pos.wrap = g_qsee_log_v2->log_pos.offset = 0; + + ret = qcom_scm_register_qsee_log_buf(coh_pmem, qseelog_buf_size); + if (ret != QSEOS_RESULT_SUCCESS) { + pr_err( + "%s: scm_call to register log buf failed, resp result =%lld\n", + __func__, ret); + goto exit_dereg_bridge; + } + + return ret; + +exit_dereg_bridge: + if (!tzdbg.is_encrypted_log_enabled) + qtee_shmbridge_deregister(qseelog_shmbridge_handle); +exit_free_mem: + dma_free_coherent(&pdev->dev, qseelog_buf_size, + (void *)g_qsee_log, coh_pmem); + return ret; +} + +static void tzdbg_free_qsee_log_buf(struct platform_device *pdev) +{ + if (!tzdbg.is_encrypted_log_enabled) + qtee_shmbridge_deregister(qseelog_shmbridge_handle); + dma_free_coherent(&pdev->dev, qseelog_buf_size, + (void *)g_qsee_log, coh_pmem); +} + +static int tzdbg_allocate_encrypted_log_buf(struct platform_device *pdev) +{ + int ret = 0; + uint32_t ns_vmids[] = {VMID_HLOS}; + uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE}; + uint32_t ns_vm_nums = 1; + + if (!tzdbg.is_encrypted_log_enabled) + return 0; + + /* max encrypted qsee log buf zize (include header, and page align) */ + enc_qseelog_info.size = qseelog_buf_size + PAGE_SIZE; + + enc_qseelog_info.vaddr = dma_alloc_coherent(&pdev->dev, + enc_qseelog_info.size, + &enc_qseelog_info.paddr, GFP_KERNEL); + if (enc_qseelog_info.vaddr == NULL) + return -ENOMEM; + + ret = qtee_shmbridge_register(enc_qseelog_info.paddr, + enc_qseelog_info.size, ns_vmids, + ns_vm_perms, ns_vm_nums, + PERM_READ | PERM_WRITE, &enc_qseelog_info.shmb_handle); + if (ret) { + pr_err("failed to create encr_qsee_log bridge, ret %d\n", ret); + goto exit_free_qseelog; + } + pr_debug("Alloc memory for encr_qsee_log, size = %zu\n", + enc_qseelog_info.size); + + enc_tzlog_info.size = debug_rw_buf_size; + enc_tzlog_info.vaddr = dma_alloc_coherent(&pdev->dev, + enc_tzlog_info.size, + &enc_tzlog_info.paddr, GFP_KERNEL); + if (enc_tzlog_info.vaddr == NULL) + goto exit_unreg_qseelog; + + ret = qtee_shmbridge_register(enc_tzlog_info.paddr, + enc_tzlog_info.size, ns_vmids, ns_vm_perms, ns_vm_nums, + PERM_READ | PERM_WRITE, &enc_tzlog_info.shmb_handle); + if (ret) { + pr_err("failed to create encr_tz_log bridge, ret = %d\n", ret); + goto exit_free_tzlog; + } + pr_debug("Alloc memory for encr_tz_log, size %zu\n", + enc_qseelog_info.size); + + return 0; + +exit_free_tzlog: + dma_free_coherent(&pdev->dev, enc_tzlog_info.size, + enc_tzlog_info.vaddr, enc_tzlog_info.paddr); +exit_unreg_qseelog: + qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle); +exit_free_qseelog: + dma_free_coherent(&pdev->dev, enc_qseelog_info.size, + enc_qseelog_info.vaddr, enc_qseelog_info.paddr); + return -ENOMEM; +} + +static void tzdbg_free_encrypted_log_buf(struct platform_device *pdev) +{ + qtee_shmbridge_deregister(enc_tzlog_info.shmb_handle); + dma_free_coherent(&pdev->dev, enc_tzlog_info.size, + enc_tzlog_info.vaddr, enc_tzlog_info.paddr); + qtee_shmbridge_deregister(enc_qseelog_info.shmb_handle); + dma_free_coherent(&pdev->dev, enc_qseelog_info.size, + enc_qseelog_info.vaddr, enc_qseelog_info.paddr); +} + +static int tzdbg_fs_init(struct platform_device *pdev) +{ + int rc = 0; + int i; + struct proc_dir_entry *dent_dir; + struct proc_dir_entry *dent; + + dent_dir = proc_mkdir(TZDBG_DIR_NAME, NULL); + if (dent_dir == NULL) { + dev_err(&pdev->dev, "tzdbg proc_mkdir failed\n"); + return -ENOMEM; + } + + for (i = 0; i < TZDBG_STATS_MAX; i++) { + tzdbg.debug_tz[i] = i; + dent = proc_create_data(tzdbg.stat[i].name, + 0444, dent_dir, + &tzdbg_fops, &tzdbg.debug_tz[i]); + if (dent == NULL) { + dev_err(&pdev->dev, "TZ proc_create_data failed\n"); + rc = -ENOMEM; + goto err; + } + } + platform_set_drvdata(pdev, dent_dir); + return 0; +err: + remove_proc_entry(TZDBG_DIR_NAME, NULL); + + return rc; +} + +static void tzdbg_fs_exit(struct platform_device *pdev) +{ + struct proc_dir_entry *dent_dir; + dent_dir = platform_get_drvdata(pdev); + if (dent_dir) + remove_proc_entry(TZDBG_DIR_NAME, NULL); +} + +static int __update_hypdbg_base(struct platform_device *pdev, + void __iomem *virt_iobase) +{ + phys_addr_t hypdiag_phy_iobase; + uint32_t hyp_address_offset; + uint32_t hyp_size_offset; + struct hypdbg_t *hyp; + uint32_t *ptr = NULL; + + if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset", + &hyp_address_offset)) { + dev_err(&pdev->dev, "hyplog address offset is not defined\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset", + &hyp_size_offset)) { + dev_err(&pdev->dev, "hyplog size offset is not defined\n"); + return -EINVAL; + } + + hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset); + tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase + + hyp_size_offset); + + tzdbg.hyp_virt_iobase = devm_ioremap(&pdev->dev, + hypdiag_phy_iobase, + tzdbg.hyp_debug_rw_buf_size); + if (!tzdbg.hyp_virt_iobase) { + dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n", + &hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size); + return -ENXIO; + } + + ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr; + hyp = tzdbg.hyp_diag_buf; + hyp->log_pos.wrap = hyp->log_pos.offset = 0; + return 0; +} + +static int tzdbg_get_tz_version(void) +{ + u64 version; + int ret = 0; + + ret = qcom_scm_get_tz_log_feat_id(&version); + + if (ret) { + pr_err("%s: scm_call to get tz version failed\n", + __func__); + return ret; + } + tzdbg.tz_version = version; + + ret = qcom_scm_get_tz_feat_id_version(QCOM_SCM_FEAT_DIAG_ID, &version); + if (ret) { + pr_err("%s: scm_call to get tz diag version failed, ret = %d\n", + __func__, ret); + return ret; + } + pr_warn("tz diag version is %x\n", version); + tzdbg.tz_diag_major_version = + ((version >> TZBSP_FVER_MAJOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK); + tzdbg.tz_diag_minor_version = + ((version >> TZBSP_FVER_MINOR_SHIFT) & TZBSP_FVER_MAJOR_MINOR_MASK); + if (tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) { + switch (tzdbg.tz_diag_minor_version) { + case TZBSP_DIAG_MINOR_VERSION_V2: + case TZBSP_DIAG_MINOR_VERSION_V21: + case TZBSP_DIAG_MINOR_VERSION_V22: + tzdbg.is_enlarged_buf = true; + break; + default: + tzdbg.is_enlarged_buf = false; + } + } else { + tzdbg.is_enlarged_buf = false; + } + return ret; +} + +static void tzdbg_query_encrypted_log(void) +{ + int ret = 0; + uint64_t enabled; + + ret = qcom_scm_query_encrypted_log_feature(&enabled); + if (ret) { + pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret); + tzdbg.is_encrypted_log_enabled = false; + } else { + pr_warn("encrypted qseelog enabled is %d\n", enabled); + tzdbg.is_encrypted_log_enabled = enabled; + } +} + +/* + * Driver functions + */ +static int tz_log_probe(struct platform_device *pdev) +{ + struct resource *resource; + void __iomem *virt_iobase; + phys_addr_t tzdiag_phy_iobase; + uint32_t *ptr = NULL; + int ret = 0; + + ret = tzdbg_get_tz_version(); + if (ret) + return ret; + + /* + * Get address that stores the physical location diagnostic data + */ + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!resource) { + dev_err(&pdev->dev, + "%s: ERROR Missing MEM resource\n", __func__); + return -ENXIO; + } + + /* + * Get the debug buffer size + */ + debug_rw_buf_size = resource_size(resource); + + /* + * Map address that stores the physical location diagnostic data + */ + virt_iobase = devm_ioremap(&pdev->dev, resource->start, + debug_rw_buf_size); + if (!virt_iobase) { + dev_err(&pdev->dev, + "%s: ERROR could not ioremap: start=%pr, len=%u\n", + __func__, &resource->start, + (unsigned int)(debug_rw_buf_size)); + return -ENXIO; + } + + if (pdev->dev.of_node) { + tzdbg.is_hyplog_enabled = of_property_read_bool( + (&pdev->dev)->of_node, "qcom,hyplog-enabled"); + if (tzdbg.is_hyplog_enabled) { + ret = __update_hypdbg_base(pdev, virt_iobase); + if (ret) { + dev_err(&pdev->dev, + "%s: fail to get hypdbg_base ret %d\n", + __func__, ret); + return -EINVAL; + } + } else { + dev_info(&pdev->dev, "Hyp log service not support\n"); + } + } else { + dev_dbg(&pdev->dev, "Device tree data is not found\n"); + } + + /* + * Retrieve the address of diagnostic data + */ + tzdiag_phy_iobase = readl_relaxed(virt_iobase); + + tzdbg_query_encrypted_log(); + /* + * Map the diagnostic information area if encryption is disabled + */ + if (!tzdbg.is_encrypted_log_enabled) { + tzdbg.virt_iobase = devm_ioremap(&pdev->dev, + tzdiag_phy_iobase, debug_rw_buf_size); + + if (!tzdbg.virt_iobase) { + dev_err(&pdev->dev, + "%s: could not ioremap: start=%pr, len=%u\n", + __func__, &tzdiag_phy_iobase, + debug_rw_buf_size); + return -ENXIO; + } + /* allocate diag_buf */ + ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL); + if (ptr == NULL) + return -ENOMEM; + tzdbg.diag_buf = (struct tzdbg_t *)ptr; + } else { + if ((tzdbg.tz_diag_major_version == TZBSP_DIAG_MAJOR_VERSION_V9) && + (tzdbg.tz_diag_minor_version >= TZBSP_DIAG_MINOR_VERSION_V22)) + tzdbg.is_full_encrypted_tz_logs_supported = true; + if (pdev->dev.of_node) { + tzdbg.is_full_encrypted_tz_logs_enabled = of_property_read_bool( + (&pdev->dev)->of_node, "qcom,full-encrypted-tz-logs-enabled"); + } + } + + /* register unencrypted qsee log buffer */ + ret = tzdbg_register_qsee_log_buf(pdev); + if (ret) + goto exit_free_diag_buf; + + /* allocate encrypted qsee and tz log buffer */ + ret = tzdbg_allocate_encrypted_log_buf(pdev); + if (ret) { + dev_err(&pdev->dev, + "Failed to allocate encrypted log buffer\n", + __func__); + goto exit_free_qsee_log_buf; + } + + /* allocate display_buf */ + if (UINT_MAX/4 < qseelog_buf_size) { + pr_err("display_buf_size integer overflow\n"); + goto exit_free_qsee_log_buf; + } + display_buf_size = qseelog_buf_size * 4; + tzdbg.disp_buf = dma_alloc_coherent(&pdev->dev, display_buf_size, + &disp_buf_paddr, GFP_KERNEL); + if (tzdbg.disp_buf == NULL) { + ret = -ENOMEM; + goto exit_free_encr_log_buf; + } + + if (tzdbg_fs_init(pdev)) + goto exit_free_disp_buf; + return 0; + +exit_free_disp_buf: + dma_free_coherent(&pdev->dev, display_buf_size, + (void *)tzdbg.disp_buf, disp_buf_paddr); +exit_free_encr_log_buf: + tzdbg_free_encrypted_log_buf(pdev); +exit_free_qsee_log_buf: + tzdbg_free_qsee_log_buf(pdev); +exit_free_diag_buf: + if (!tzdbg.is_encrypted_log_enabled) + kfree(tzdbg.diag_buf); + return -ENXIO; +} + +static int tz_log_remove(struct platform_device *pdev) +{ + tzdbg_fs_exit(pdev); + dma_free_coherent(&pdev->dev, display_buf_size, + (void *)tzdbg.disp_buf, disp_buf_paddr); + tzdbg_free_encrypted_log_buf(pdev); + tzdbg_free_qsee_log_buf(pdev); + if (!tzdbg.is_encrypted_log_enabled) + kfree(tzdbg.diag_buf); + return 0; +} + +static const struct of_device_id tzlog_match[] = { + {.compatible = "qcom,tz-log"}, + {} +}; + +static struct platform_driver tz_log_driver = { + .probe = tz_log_probe, + .remove = tz_log_remove, + .driver = { + .name = "tz_log", + .of_match_table = tzlog_match, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; + +module_platform_driver(tz_log_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TZ Log driver"); From 4fcb225c3b538f1848cdf37cdbdac006ebe6a78e Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Tue, 14 Dec 2021 11:20:05 -0800 Subject: [PATCH 003/202] securemsm-kernel: Fix qcrypto compilation Temporarily commented des_expand_key. Change-Id: Ifb82b2944a27f53a9ef925ecb6dd4b6e73ede419 --- Kbuild | 2 +- arch/arm64/boot/dts/Makefile | 5 -- arch/arm64/boot/dts/securemsm-kernel.dtsi | 72 ----------------------- crypto-qti/qcrypto.c | 4 +- 4 files changed, 4 insertions(+), 79 deletions(-) delete mode 100644 arch/arm64/boot/dts/Makefile delete mode 100644 arch/arm64/boot/dts/securemsm-kernel.dtsi diff --git a/Kbuild b/Kbuild index bf3e85214d..eeee29c549 100644 --- a/Kbuild +++ b/Kbuild @@ -13,5 +13,5 @@ obj-m += qcedev-mod_dlkm.o qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o obj-m += qcrypto-msm_dlkm.o -qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o crypto-qti/des.o +qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile deleted file mode 100644 index 42c3b62880..0000000000 --- a/arch/arm64/boot/dts/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -dtbo-y +=securemsm-kernel.dtbo - -always-y := $(dtb-y) $(dtbo-y) -subdir-y := $(dts-dirs) -clean-files := *.dtb *.dtbo diff --git a/arch/arm64/boot/dts/securemsm-kernel.dtsi b/arch/arm64/boot/dts/securemsm-kernel.dtsi deleted file mode 100644 index ef3696abbf..0000000000 --- a/arch/arm64/boot/dts/securemsm-kernel.dtsi +++ /dev/null @@ -1,72 +0,0 @@ -/dts-v1/; -/plugin/; - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -&reserved_memory { - - user_contig_mem: user_contig_region { - compatible = "shared-dma-pool"; - alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; - reusable; - alignment = <0x0 0x400000>; - size = <0x0 0x1000000>; - }; - qseecom_mem: qseecom_region { - compatible = "shared-dma-pool"; - alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; - reusable; - alignment = <0x0 0x400000>; - size = <0x0 0x1400000>; - }; - - qseecom_ta_mem: qseecom_ta_region { - compatible = "shared-dma-pool"; - alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>; - reusable; - alignment = <0x0 0x400000>; - size = <0x0 0x1000000>; - }; -}; -&firmware { - qcom_smcinvoke { - compatible = "qcom,smcinvoke"; - }; - - qcom_tzlog: tz-log@146AA720 { - - compatible = "qcom,tz-log"; - reg = <0x146AA720 0x3000>; - qcom,hyplog-enabled; - hyplog-address-offset = <0x410>; - hyplog-size-offset = <0x414>; - }; - - qcom,dma-heaps { - qcom,qseecom { - qcom,dma-heap-name = "qcom,qseecom"; - qcom,dma-heap-type = ; - memory-region = <&qseecom_mem>; - }; - - qcom,qseecom_ta { - qcom,dma-heap-name = "qcom,qseecom-ta"; - qcom,dma-heap-type = ; - memory-region = <&qseecom_ta_mem>; - }; - }; -}; - - - - diff --git a/crypto-qti/qcrypto.c b/crypto-qti/qcrypto.c index 32864a85b5..d13e179080 100644 --- a/crypto-qti/qcrypto.c +++ b/crypto-qti/qcrypto.c @@ -1494,12 +1494,14 @@ static int _qcrypto_setkey_des(struct crypto_skcipher *tfm, const u8 *key, return -EINVAL; } memset(&dctx, 0, sizeof(dctx)); + /*Need to be fixed. Compilation error was seen with the below API. + Needs to be uncommented and enable if (des_expand_key(&dctx, key, keylen) == -ENOKEY) { if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) return -EINVAL; else return 0; - } + }*/ /* * TODO: delete of find equivalent in skcipher api From fb87f65caef8a91a35ce8c6b6ab34760a5cf0eb9 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Sun, 9 Jan 2022 09:30:14 -0800 Subject: [PATCH 004/202] Fix qcedev compilation Change-Id: I263496352ab0de591beb8488197ede449eee740c --- linux/fips_status.h | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 linux/fips_status.h diff --git a/linux/fips_status.h b/linux/fips_status.h new file mode 100644 index 0000000000..559a229d6b --- /dev/null +++ b/linux/fips_status.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _FIPS_STATUS__H +#define _FIPS_STATUS__H + +#include +#include + +/** + * fips_status: global FIPS140-2 status + * @FIPS140_STATUS_NA: + * Not a FIPS140-2 compliant Build. + * The flag status won't + * change throughout + * the lifetime + * @FIPS140_STATUS_PASS_CRYPTO: + * KAT self tests are passed. + * @FIPS140_STATUS_QCRYPTO_ALLOWED: + * Integrity test is passed. + * @FIPS140_STATUS_PASS: + * All tests are passed and build + * is in FIPS140-2 mode + * @FIPS140_STATUS_FAIL: + * One of the test is failed. + * This will block all requests + * to crypto modules + */ +enum fips_status { + FIPS140_STATUS_NA = 0, + FIPS140_STATUS_PASS_CRYPTO = 1, + FIPS140_STATUS_QCRYPTO_ALLOWED = 2, + FIPS140_STATUS_PASS = 3, + FIPS140_STATUS_FAIL = 0xFF +}; +#endif /* _FIPS_STATUS__H */ From 4af211aa1b6be24627e75ef921d8a40f4c433721 Mon Sep 17 00:00:00 2001 From: Smita Ghosh Date: Wed, 19 Jan 2022 11:49:59 -0800 Subject: [PATCH 005/202] tz_log: Update the tz_log module port fixes present in previous target. Change-Id: Ie0ce140ecb4142e93c3b5e69ffe1e39266ea1431 --- tz_log/tz_log.c | 142 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 141 insertions(+), 1 deletion(-) diff --git a/tz_log/tz_log.c b/tz_log/tz_log.c index 0e937ec9ae..9124bff572 100644 --- a/tz_log/tz_log.c +++ b/tz_log/tz_log.c @@ -337,6 +337,14 @@ struct hypdbg_log_pos_t { uint16_t offset; }; +struct rmdbg_log_hdr_t { + uint32_t write_idx; + uint32_t size; +}; +struct rmdbg_log_pos_t { + uint32_t read_idx; + uint32_t size; +}; struct hypdbg_boot_info_t { uint32_t warm_entry_cnt; uint32_t warm_exit_cnt; @@ -379,6 +387,7 @@ enum tzdbg_stats_type { TZDBG_QSEE_LOG, TZDBG_HYP_GENERAL, TZDBG_HYP_LOG, + TZDBG_RM_LOG, TZDBG_STATS_MAX }; @@ -392,12 +401,15 @@ struct tzdbg_stat { struct tzdbg { void __iomem *virt_iobase; void __iomem *hyp_virt_iobase; + void __iomem *rmlog_virt_iobase; struct tzdbg_t *diag_buf; struct hypdbg_t *hyp_diag_buf; + uint8_t *rm_diag_buf; char *disp_buf; int debug_tz[TZDBG_STATS_MAX]; struct tzdbg_stat stat[TZDBG_STATS_MAX]; uint32_t hyp_debug_rw_buf_size; + uint32_t rmlog_rw_buf_size; bool is_hyplog_enabled; uint32_t tz_version; bool is_encrypted_log_enabled; @@ -444,6 +456,7 @@ static struct tzdbg tzdbg = { .stat[TZDBG_QSEE_LOG].name = "qsee_log", .stat[TZDBG_HYP_GENERAL].name = "hyp_general", .stat[TZDBG_HYP_LOG].name = "hyp_log", + .stat[TZDBG_RM_LOG].name = "rm_log", }; static struct tzdbg_log_t *g_qsee_log; @@ -922,6 +935,22 @@ static int __disp_hyp_log_stats(uint8_t *log, tzdbg.stat[buf_idx].data = tzdbg.disp_buf; return len; } +static int __disp_rm_log_stats(uint8_t *log_ptr, uint32_t max_len) +{ + uint32_t i = 0; + /* + * Transfer data from rm dialog buff to display buffer in user space + */ + while ((i < max_len) && (i < display_buf_size)) { + tzdbg.disp_buf[i] = log_ptr[i]; + i++; + } + if (i != max_len) + pr_err("Dropping RM log message, max_len:%d display_buf_size:%d\n", + i, display_buf_size); + tzdbg.stat[TZDBG_RM_LOG].data = tzdbg.disp_buf; + return i; +} static int print_text(char *intro_message, unsigned char *text_addr, @@ -1045,6 +1074,55 @@ static int _disp_hyp_log_stats(size_t count) log_len, count, TZDBG_HYP_LOG); } +static int _disp_rm_log_stats(size_t count) +{ + static struct rmdbg_log_pos_t log_start = { 0 }; + struct rmdbg_log_hdr_t *p_log_hdr = NULL; + uint8_t *log_ptr = NULL; + uint32_t log_len = 0; + static bool wrap_around = { false }; + + /* Return 0 to close the display file,if there is nothing else to do */ + if ((log_start.size == 0x0) && wrap_around) { + wrap_around = false; + return 0; + } + /* Copy RM log data to tzdbg diag buffer for the first time */ + /* Initialize the tracking data structure */ + if (tzdbg.rmlog_rw_buf_size != 0) { + if (!wrap_around) { + memcpy_fromio((void *)tzdbg.rm_diag_buf, + tzdbg.rmlog_virt_iobase, + tzdbg.rmlog_rw_buf_size); + /* get RM header info first */ + p_log_hdr = (struct rmdbg_log_hdr_t *)tzdbg.rm_diag_buf; + /* Update RM log buffer index tracker and its size */ + log_start.read_idx = 0x0; + log_start.size = p_log_hdr->size; + } + /* Update RM log buffer starting ptr */ + log_ptr = + (uint8_t *) ((unsigned char *)tzdbg.rm_diag_buf + + sizeof(struct rmdbg_log_hdr_t)); + } else { + /* Return 0 to close the display file,if there is nothing else to do */ + pr_err("There is no RM log to read, size is %d!\n", + tzdbg.rmlog_rw_buf_size); + return 0; + } + log_len = log_start.size; + log_ptr += log_start.read_idx; + /* Check if we exceed the max length provided by user space */ + log_len = (count > log_len) ? log_len : count; + /* Update tracking data structure */ + log_start.size -= log_len; + log_start.read_idx += log_len; + + if (log_start.size) + wrap_around = true; + return __disp_rm_log_stats(log_ptr, log_len); +} + static int _disp_qsee_log_stats(size_t count) { static struct tzdbg_log_pos_t log_start = {0}; @@ -1148,6 +1226,10 @@ static ssize_t tzdbg_fs_read_unencrypted(int tz_id, char __user *buf, len = _disp_hyp_log_stats(count); *offp = 0; break; + case TZDBG_RM_LOG: + len = _disp_rm_log_stats(count); + *offp = 0; + break; default: break; } @@ -1213,7 +1295,8 @@ static ssize_t tzdbg_fs_read(struct file *file, char __user *buf, } if (!tzdbg.is_encrypted_log_enabled || - (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG)) + (tz_id == TZDBG_HYP_GENERAL || tz_id == TZDBG_HYP_LOG) + || tz_id == TZDBG_RM_LOG) return tzdbg_fs_read_unencrypted(tz_id, buf, count, offp); else return tzdbg_fs_read_encrypted(tz_id, buf, count, offp); @@ -1412,6 +1495,7 @@ err: static void tzdbg_fs_exit(struct platform_device *pdev) { struct proc_dir_entry *dent_dir; + dent_dir = platform_get_drvdata(pdev); if (dent_dir) remove_proc_entry(TZDBG_DIR_NAME, NULL); @@ -1460,6 +1544,55 @@ static int __update_hypdbg_base(struct platform_device *pdev, return 0; } +static int __update_rmlog_base(struct platform_device *pdev, + void __iomem *virt_iobase) +{ + uint32_t rmlog_address; + uint32_t rmlog_size; + uint32_t *ptr = NULL; + + /* if we don't get the node just ignore it */ + if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-address", + &rmlog_address)) { + dev_err(&pdev->dev, "RM log address is not defined\n"); + tzdbg.rmlog_rw_buf_size = 0; + return 0; + } + /* if we don't get the node just ignore it */ + if (of_property_read_u32((&pdev->dev)->of_node, "rmlog-size", + &rmlog_size)) { + dev_err(&pdev->dev, "RM log size is not defined\n"); + tzdbg.rmlog_rw_buf_size = 0; + return 0; + } + + tzdbg.rmlog_rw_buf_size = rmlog_size; + + /* Check if there is RM log to read */ + if (!tzdbg.rmlog_rw_buf_size) { + tzdbg.rmlog_virt_iobase = NULL; + tzdbg.rm_diag_buf = NULL; + dev_err(&pdev->dev, "RM log size is %d\n", + tzdbg.rmlog_rw_buf_size); + return 0; + } + + tzdbg.rmlog_virt_iobase = devm_ioremap(&pdev->dev, + rmlog_address, + rmlog_size); + if (!tzdbg.rmlog_virt_iobase) { + dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n", + rmlog_address, tzdbg.rmlog_rw_buf_size); + return -ENXIO; + } + + ptr = kzalloc(tzdbg.rmlog_rw_buf_size, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + tzdbg.rm_diag_buf = (uint8_t *)ptr; + return 0; +} static int tzdbg_get_tz_version(void) { u64 version; @@ -1570,6 +1703,13 @@ static int tz_log_probe(struct platform_device *pdev) __func__, ret); return -EINVAL; } + ret = __update_rmlog_base(pdev, virt_iobase); + if (ret) { + dev_err(&pdev->dev, + "%s: fail to get rmlog_base ret %d\n", + __func__, ret); + return -EINVAL; + } } else { dev_info(&pdev->dev, "Hyp log service not support\n"); } From 49142cbffece6b573de1008e9d037f5ad392b2a9 Mon Sep 17 00:00:00 2001 From: Sheik Anwar Shabic Y Date: Tue, 25 Jan 2022 10:18:41 +0530 Subject: [PATCH 006/202] securemsm-kernel : Enable hdcp_qseecom module compilation 1. enable hdcp_qseecom.ko module compilation. 2. removed duplicated files in crypto-qti and smcInvoke folder. Change-Id: I18c14000756484aa3d4723a58814ba8350d12927 --- Android.mk | 14 +- Kbuild | 19 +- Makefile | 7 +- config/ssg_smcinvoke.conf | 1 + crypto-qti/linux/fips_status.h | 38 - .../linux/platform_data/qcom_crypto_device.h | 18 - crypto-qti/linux/qcedev.h | 289 --- crypto-qti/linux/qcrypto.h | 60 - hdcp/hdcp_qseecom.c | 1862 +++++++++++++++++ linux/hdcp_qseecom.h | 153 ++ {smcinvoke => linux}/misc/qseecom_kernel.h | 0 {crypto-qti/linux => linux}/qcota.h | 0 securemsm_kernel_product_board.mk | 2 + securemsm_kernel_vendor_board.mk | 2 + 14 files changed, 2046 insertions(+), 419 deletions(-) delete mode 100644 crypto-qti/linux/fips_status.h delete mode 100644 crypto-qti/linux/platform_data/qcom_crypto_device.h delete mode 100644 crypto-qti/linux/qcedev.h delete mode 100644 crypto-qti/linux/qcrypto.h create mode 100644 hdcp/hdcp_qseecom.c create mode 100644 linux/hdcp_qseecom.h rename {smcinvoke => linux}/misc/qseecom_kernel.h (100%) rename {crypto-qti/linux => linux}/qcota.h (100%) diff --git a/Android.mk b/Android.mk index d95cf8a5d1..cb8f0f94ea 100644 --- a/Android.mk +++ b/Android.mk @@ -3,16 +3,12 @@ LOCAL_PATH := $(call my-dir) DLKM_DIR := $(TOP)/device/qcom/common/dlkm - - - SSG_SRC_FILES := \ $(wildcard $(LOCAL_PATH)/*) \ $(wildcard $(LOCAL_PATH)/*/*) \ $(wildcard $(LOCAL_PATH)/*/*/*) \ $(wildcard $(LOCAL_PATH)/*/*/*/*) - #$(error $(SSG_SRC_FILES)) include $(CLEAR_VARS) #LOCAL_SRC_FILES := $(SSG_SRC_FILES) @@ -63,3 +59,13 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################# +################################################# +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := hdcp_qseecom_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := hdcp_qseecom_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# diff --git a/Kbuild b/Kbuild index eeee29c549..f1b98ce19f 100644 --- a/Kbuild +++ b/Kbuild @@ -1,17 +1,24 @@ -include $(SSG_MODULE_ROOT)/config/ssg_smcinvoke.conf +include $(SSG_MODULE_ROOT)/config/ssg_smcinvoke.conf -obj-m += smcinvoke_dlkm.o +LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ + -I$(SSG_MODULE_ROOT)/linux/ + +KBUILD_CPPFLAGS += -DCONFIG_HDCP_QSEECOM + +obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o -obj-m += tz_log_dlkm.o +obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o tz_log_dlkm-objs := tz_log/tz_log.o -obj-m += qce50_dlkm.o +obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o qce50_dlkm-objs := crypto-qti/qce50.o -obj-m += qcedev-mod_dlkm.o +obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qcedev-mod_dlkm.o qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o -obj-m += qcrypto-msm_dlkm.o +obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o +obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o +hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o diff --git a/Makefile b/Makefile index 3cdc7da653..34b6c6fcd2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ M=$(PWD) SSG_MODULE_ROOT=$(KERNEL_SRC)/$(M) - -KBUILD_OPTIONS+= SSG_MODULE_ROOT=$(SSG_MODULE_ROOT) +INC=-I/$(M)/linux/* +KBUILD_OPTIONS+=SSG_MODULE_ROOT=$(SSG_MODULE_ROOT) all: modules @@ -9,5 +9,4 @@ clean: $(MAKE) -C $(KERNEL_SRC) M=$(M) clean %: - $(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS) - + $(MAKE) -C $(KERNEL_SRC) M=$(M) $(INC) $@ $(KBUILD_OPTIONS) \ No newline at end of file diff --git a/config/ssg_smcinvoke.conf b/config/ssg_smcinvoke.conf index ae62b67f20..f64b0435db 100644 --- a/config/ssg_smcinvoke.conf +++ b/config/ssg_smcinvoke.conf @@ -5,3 +5,4 @@ export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m export CONFIG_CRYPTO_DEV_QCRYPTO=m export CONFIG_SCSI_UFS_CRYPTO=m export CONFIG_SCSI_UFS_CRYPTO_QTI=m +export CONFIG_HDCP_QSEECOM=m \ No newline at end of file diff --git a/crypto-qti/linux/fips_status.h b/crypto-qti/linux/fips_status.h deleted file mode 100644 index 559a229d6b..0000000000 --- a/crypto-qti/linux/fips_status.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. - */ - -#ifndef _FIPS_STATUS__H -#define _FIPS_STATUS__H - -#include -#include - -/** - * fips_status: global FIPS140-2 status - * @FIPS140_STATUS_NA: - * Not a FIPS140-2 compliant Build. - * The flag status won't - * change throughout - * the lifetime - * @FIPS140_STATUS_PASS_CRYPTO: - * KAT self tests are passed. - * @FIPS140_STATUS_QCRYPTO_ALLOWED: - * Integrity test is passed. - * @FIPS140_STATUS_PASS: - * All tests are passed and build - * is in FIPS140-2 mode - * @FIPS140_STATUS_FAIL: - * One of the test is failed. - * This will block all requests - * to crypto modules - */ -enum fips_status { - FIPS140_STATUS_NA = 0, - FIPS140_STATUS_PASS_CRYPTO = 1, - FIPS140_STATUS_QCRYPTO_ALLOWED = 2, - FIPS140_STATUS_PASS = 3, - FIPS140_STATUS_FAIL = 0xFF -}; -#endif /* _FIPS_STATUS__H */ diff --git a/crypto-qti/linux/platform_data/qcom_crypto_device.h b/crypto-qti/linux/platform_data/qcom_crypto_device.h deleted file mode 100644 index 819df7c5e5..0000000000 --- a/crypto-qti/linux/platform_data/qcom_crypto_device.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. - */ - -#ifndef __QCOM_CRYPTO_DEVICE__H -#define __QCOM_CRYPTO_DEVICE__H - -#include - -struct msm_ce_hw_support { - uint32_t ce_shared; - uint32_t shared_ce_resource; - uint32_t hw_key_support; - uint32_t sha_hmac; -}; - -#endif /* __QCOM_CRYPTO_DEVICE__H */ diff --git a/crypto-qti/linux/qcedev.h b/crypto-qti/linux/qcedev.h deleted file mode 100644 index 6968e92c4b..0000000000 --- a/crypto-qti/linux/qcedev.h +++ /dev/null @@ -1,289 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ -/* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. - */ - -#ifndef _QCEDEV__H -#define _QCEDEV__H - -#include -#include -#include "fips_status.h" - -#define QCEDEV_MAX_SHA_BLOCK_SIZE 64 -#define QCEDEV_MAX_BEARER 31 -#define QCEDEV_MAX_KEY_SIZE 64 -#define QCEDEV_MAX_IV_SIZE 32 - -#define QCEDEV_MAX_BUFFERS 16 -#define QCEDEV_MAX_SHA_DIGEST 32 - -#define QCEDEV_USE_PMEM 1 -#define QCEDEV_NO_PMEM 0 - -#define QCEDEV_AES_KEY_128 16 -#define QCEDEV_AES_KEY_192 24 -#define QCEDEV_AES_KEY_256 32 -/** - *qcedev_oper_enum: Operation types - * @QCEDEV_OPER_ENC: Encrypt - * @QCEDEV_OPER_DEC: Decrypt - * @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by - * user. Key already set by an external processor. - * @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by - * user. Key already set by an external processor. - */ -enum qcedev_oper_enum { - QCEDEV_OPER_DEC = 0, - QCEDEV_OPER_ENC = 1, - QCEDEV_OPER_DEC_NO_KEY = 2, - QCEDEV_OPER_ENC_NO_KEY = 3, - QCEDEV_OPER_LAST -}; - -/** - *qcedev_oper_enum: Cipher algorithm types - * @QCEDEV_ALG_DES: DES - * @QCEDEV_ALG_3DES: 3DES - * @QCEDEV_ALG_AES: AES - */ -enum qcedev_cipher_alg_enum { - QCEDEV_ALG_DES = 0, - QCEDEV_ALG_3DES = 1, - QCEDEV_ALG_AES = 2, - QCEDEV_ALG_LAST -}; - -/** - *qcedev_cipher_mode_enum : AES mode - * @QCEDEV_AES_MODE_CBC: CBC - * @QCEDEV_AES_MODE_ECB: ECB - * @QCEDEV_AES_MODE_CTR: CTR - * @QCEDEV_AES_MODE_XTS: XTS - * @QCEDEV_AES_MODE_CCM: CCM - * @QCEDEV_DES_MODE_CBC: CBC - * @QCEDEV_DES_MODE_ECB: ECB - */ -enum qcedev_cipher_mode_enum { - QCEDEV_AES_MODE_CBC = 0, - QCEDEV_AES_MODE_ECB = 1, - QCEDEV_AES_MODE_CTR = 2, - QCEDEV_AES_MODE_XTS = 3, - QCEDEV_AES_MODE_CCM = 4, - QCEDEV_DES_MODE_CBC = 5, - QCEDEV_DES_MODE_ECB = 6, - QCEDEV_AES_DES_MODE_LAST -}; - -/** - *enum qcedev_sha_alg_enum : Secure Hashing Algorithm - * @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits) - * @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit) - * @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits) - * @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit) - * @QCEDEV_ALG_AES_CMAC: Configurable MAC size - */ -enum qcedev_sha_alg_enum { - QCEDEV_ALG_SHA1 = 0, - QCEDEV_ALG_SHA256 = 1, - QCEDEV_ALG_SHA1_HMAC = 2, - QCEDEV_ALG_SHA256_HMAC = 3, - QCEDEV_ALG_AES_CMAC = 4, - QCEDEV_ALG_SHA_ALG_LAST -}; - -/** - * struct buf_info - Buffer information - * @offset: Offset from the base address of the buffer - * (Used when buffer is allocated using PMEM) - * @vaddr: Virtual buffer address pointer - * @len: Size of the buffer - */ -struct buf_info { - union { - __u32 offset; - __u8 *vaddr; - }; - __u32 len; -}; - -/** - * struct qcedev_vbuf_info - Source and destination Buffer information - * @src: Array of buf_info for input/source - * @dst: Array of buf_info for output/destination - */ -struct qcedev_vbuf_info { - struct buf_info src[QCEDEV_MAX_BUFFERS]; - struct buf_info dst[QCEDEV_MAX_BUFFERS]; -}; - -/** - * struct qcedev_pmem_info - Stores PMEM buffer information - * @fd_src: Handle to /dev/adsp_pmem used to allocate - * memory for input/src buffer - * @src: Array of buf_info for input/source - * @fd_dst: Handle to /dev/adsp_pmem used to allocate - * memory for output/dst buffer - * @dst: Array of buf_info for output/destination - * @pmem_src_offset: The offset from input/src buffer - * (allocated by PMEM) - */ -struct qcedev_pmem_info { - int fd_src; - struct buf_info src[QCEDEV_MAX_BUFFERS]; - int fd_dst; - struct buf_info dst[QCEDEV_MAX_BUFFERS]; -}; - -/** - * struct qcedev_cipher_op_req - Holds the ciphering request information - * @use_pmem (IN): Flag to indicate if buffer source is PMEM - * QCEDEV_USE_PMEM/QCEDEV_NO_PMEM - * @pmem (IN): Stores PMEM buffer information. - * Refer struct qcedev_pmem_info - * @vbuf (IN/OUT): Stores Source and destination Buffer information - * Refer to struct qcedev_vbuf_info - * @data_len (IN): Total Length of input/src and output/dst in bytes - * @in_place_op (IN): Indicates whether the operation is inplace where - * source == destination - * When using PMEM allocated memory, must set this to 1 - * @enckey (IN): 128 bits of confidentiality key - * enckey[0] bit 127-120, enckey[1] bit 119-112,.. - * enckey[15] bit 7-0 - * @encklen (IN): Length of the encryption key(set to 128 bits/16 - * bytes in the driver) - * @iv (IN/OUT): Initialisation vector data - * This is updated by the driver, incremented by - * number of blocks encrypted/decrypted. - * @ivlen (IN): Length of the IV - * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set - * for AES-128 CTR mode only) - * @alg (IN): Type of ciphering algorithm: AES/DES/3DES - * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR - * Apllicabel when using AES algorithm only - * @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or - * QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY - * - *If use_pmem is set to 0, the driver assumes that memory was not allocated - * via PMEM, and kernel will need to allocate memory and copy data from user - * space buffer (data_src/dta_dst) and process accordingly and copy data back - * to the user space buffer - * - * If use_pmem is set to 1, the driver assumes that memory was allocated via - * PMEM. - * The kernel driver will use the fd_src to determine the kernel virtual address - * base that maps to the user space virtual address base for the buffer - * allocated in user space. - * The final input/src and output/dst buffer pointer will be determined - * by adding the offsets to the kernel virtual addr. - * - * If use of hardware key is supported in the target, user can configure the - * key parameters (encklen, enckey) to use the hardware key. - * In order to use the hardware key, set encklen to 0 and set the enckey - * data array to 0. - */ -struct qcedev_cipher_op_req { - __u8 use_pmem; - union { - struct qcedev_pmem_info pmem; - struct qcedev_vbuf_info vbuf; - }; - __u32 entries; - __u32 data_len; - __u8 in_place_op; - __u8 enckey[QCEDEV_MAX_KEY_SIZE]; - __u32 encklen; - __u8 iv[QCEDEV_MAX_IV_SIZE]; - __u32 ivlen; - __u32 byteoffset; - enum qcedev_cipher_alg_enum alg; - enum qcedev_cipher_mode_enum mode; - enum qcedev_oper_enum op; -}; - -/** - * struct qcedev_sha_op_req - Holds the hashing request information - * @data (IN): Array of pointers to the data to be hashed - * @entries (IN): Number of buf_info entries in the data array - * @data_len (IN): Length of data to be hashed - * @digest (IN/OUT): Returns the hashed data information - * @diglen (OUT): Size of the hashed/digest data - * @authkey (IN): Pointer to authentication key for HMAC - * @authklen (IN): Size of the authentication key - * @alg (IN): Secure Hash algorithm - */ -struct qcedev_sha_op_req { - struct buf_info data[QCEDEV_MAX_BUFFERS]; - __u32 entries; - __u32 data_len; - __u8 digest[QCEDEV_MAX_SHA_DIGEST]; - __u32 diglen; - __u8 *authkey; - __u32 authklen; - enum qcedev_sha_alg_enum alg; -}; - -/** - * struct qfips_verify_t - Holds data for FIPS Integrity test - * @kernel_size (IN): Size of kernel Image - * @kernel (IN): pointer to buffer containing the kernel Image - */ -struct qfips_verify_t { - unsigned int kernel_size; - void *kernel; -}; - -/** - * struct qcedev_map_buf_req - Holds the mapping request information - * fd (IN): Array of fds. - * num_fds (IN): Number of fds in fd[]. - * fd_size (IN): Array of sizes corresponding to each fd in fd[]. - * fd_offset (IN): Array of offset corresponding to each fd in fd[]. - * vaddr (OUT): Array of mapped virtual address corresponding to - * each fd in fd[]. - */ -struct qcedev_map_buf_req { - __s32 fd[QCEDEV_MAX_BUFFERS]; - __u32 num_fds; - __u32 fd_size[QCEDEV_MAX_BUFFERS]; - __u32 fd_offset[QCEDEV_MAX_BUFFERS]; - __u64 buf_vaddr[QCEDEV_MAX_BUFFERS]; -}; - -/** - * struct qcedev_unmap_buf_req - Holds the hashing request information - * fd (IN): Array of fds to unmap - * num_fds (IN): Number of fds in fd[]. - */ -struct qcedev_unmap_buf_req { - __s32 fd[QCEDEV_MAX_BUFFERS]; - __u32 num_fds; -}; - -struct file; - -#define QCEDEV_IOC_MAGIC 0x87 - -#define QCEDEV_IOCTL_ENC_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req) -#define QCEDEV_IOCTL_DEC_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req) -#define QCEDEV_IOCTL_SHA_INIT_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req) -#define QCEDEV_IOCTL_SHA_UPDATE_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req) -#define QCEDEV_IOCTL_SHA_FINAL_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req) -#define QCEDEV_IOCTL_GET_SHA_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req) -#define QCEDEV_IOCTL_LOCK_CE \ - _IO(QCEDEV_IOC_MAGIC, 7) -#define QCEDEV_IOCTL_UNLOCK_CE \ - _IO(QCEDEV_IOC_MAGIC, 8) -#define QCEDEV_IOCTL_GET_CMAC_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req) -#define QCEDEV_IOCTL_MAP_BUF_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req) -#define QCEDEV_IOCTL_UNMAP_BUF_REQ \ - _IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req) -#endif /* _QCEDEV__H */ diff --git a/crypto-qti/linux/qcrypto.h b/crypto-qti/linux/qcrypto.h deleted file mode 100644 index 4c034a9c1e..0000000000 --- a/crypto-qti/linux/qcrypto.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. - */ - -#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ -#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_ - -#include -#include -#include -#include - -#define QCRYPTO_CTX_KEY_MASK 0x000000ff -#define QCRYPTO_CTX_USE_HW_KEY 0x00000001 -#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002 - -#define QCRYPTO_CTX_XTS_MASK 0x0000ff00 -#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100 -#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200 - - -int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev); -int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev); -int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev); - -int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags); -int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags); -int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags); - -int qcrypto_cipher_clear_flag(struct skcipher_request *req, - unsigned int flags); -int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags); -int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags); - -struct crypto_engine_entry { - u32 hw_instance; - u32 ce_device; - int shared; -}; - -int qcrypto_get_num_engines(void); -void qcrypto_get_engine_list(size_t num_engines, - struct crypto_engine_entry *arr); -int qcrypto_cipher_set_device_hw(struct skcipher_request *req, - unsigned int fde_pfe, - unsigned int hw_inst); - - -struct qcrypto_func_set { - int (*cipher_set)(struct skcipher_request *req, - unsigned int fde_pfe, - unsigned int hw_inst); - int (*cipher_flag)(struct skcipher_request *req, unsigned int flags); - int (*get_num_engines)(void); - void (*get_engine_list)(size_t num_engines, - struct crypto_engine_entry *arr); -}; - -#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */ diff --git a/hdcp/hdcp_qseecom.c b/hdcp/hdcp_qseecom.c new file mode 100644 index 0000000000..f97ff22fcc --- /dev/null +++ b/hdcp/hdcp_qseecom.c @@ -0,0 +1,1862 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2022, The Linux Foundation. All rights reserved. + */ + +#define pr_fmt(fmt) "[hdcp-qseecom] %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "linux/hdcp_qseecom.h" +#include "misc/qseecom_kernel.h" + +#define HDCP2P2_APP_NAME "hdcp2p2" +#define HDCP1_APP_NAME "hdcp1" +#define HDCP1OPS_APP_NAME "ops" +#define HDCPSRM_APP_NAME "hdcpsrm" +#define QSEECOM_SBUFF_SIZE 0x1000 + +#define MAX_REC_ID_LIST_SIZE 160 +#define MAX_TX_MESSAGE_SIZE 129 +#define MAX_RX_MESSAGE_SIZE 534 +#define MAX_TOPOLOGY_ELEMS 32 +#define HDCP1_NOTIFY_TOPOLOGY 1 +#define HDCP1_AKSV_SIZE 8 + +#define HDCP1_SET_KEY 202 +#define HDCP1_KEY_VERIFY 204 +#define HDCP1_SET_ENC 205 + +#define BITS_8_IN_BYTES 1 +#define BITS_16_IN_BYTES 2 +#define BITS_24_IN_BYTES 3 +#define BITS_32_IN_BYTES 4 +#define BITS_40_IN_BYTES 5 +#define BITS_64_IN_BYTES 8 +#define BITS_128_IN_BYTES 16 +#define BITS_160_IN_BYTES 20 +#define BITS_256_IN_BYTES 32 +#define BITS_1024_IN_BYTES 128 +#define BITS_3072_IN_BYTES 384 +#define TXCAPS_SIZE 3 +#define RXCAPS_SIZE 3 +#define RXINFO_SIZE 2 +#define SEQ_NUM_V_SIZE 3 + +#define RCVR_ID_SIZE BITS_40_IN_BYTES +#define MAX_RCVR_IDS_ALLOWED_IN_LIST 31 +#define MAX_RCVR_ID_LIST_SIZE \ + (RCVR_ID_SIZE*MAX_RCVR_IDS_ALLOWED_IN_LIST) +/* + * minimum wait as per standard is 200 ms. keep it 300 ms + * to be on safe side. + */ +#define SLEEP_SET_HW_KEY_MS 300 + +/* Wait 200ms after authentication */ +#define SLEEP_FORCE_ENCRYPTION_MS 200 + +/* hdcp command status */ +#define HDCP_SUCCESS 0 + +/* DP device type */ +#define DEVICE_TYPE_DP 0x8002 + +const char *HdcpErrors[] = { + "HDCP_SUCCESS", + "HDCP_FAIL", + "HDCP_BAD_PARAM", + "HDCP_DEVICE_TYPE_UNSUPPORTED", + "HDCP_INVALID_COMMAND", + "HDCP_INVALID_COMMAND_HANDLE", + "HDCP_ERROR_SIZE_IN", + "HDCP_ERROR_SIZE_OUT", + "HDCP_DATA_SIZE_INSUFFICIENT", + "HDCP_UNSUPPORTED_RX_VERSION", + "HDCP_WRONG_RX_CAPAB_MASK", + "HDCP_WRONG_RX_RSVD", + "HDCP_WRONG_RX_HDCP_CAPABLE", + "HDCP_RSA_SIGNATURE_VERIFY_FAILED", + "HDCP_VERIFY_H_PRIME_FAILED", + "HDCP_LC_FAILED", + "HDCP_MESSAGE_TIMEOUT", + "HDCP_COUNTER_ROLL_OVER", + "HDCP_WRONG_RXINFO_RSVD", + "HDCP_RXINFO_MAX_DEVS", + "HDCP_RXINFO_MAX_CASCADE", + "HDCP_WRONG_INITIAL_SEQ_NUM_V", + "HDCP_SEQ_NUM_V_ROLL_OVER", + "HDCP_WRONG_SEQ_NUM_V", + "HDCP_VERIFY_V_FAILED", + "HDCP_RPT_METHOD_INVOKED", + "HDCP_RPT_STRM_LEN_WRONG", + "HDCP_VERIFY_STRM_M_FAILED", + "HDCP_TRANSMITTER_NOT_FOUND", + "HDCP_SESSION_NOT_FOUND", + "HDCP_MAX_SESSION_EXCEEDED", + "HDCP_MAX_CONNECTION_EXCEEDED", + "HDCP_MAX_STREAMS_EXCEEDED", + "HDCP_MAX_DEVICES", + "HDCP_ALLOC_FAILED", + "HDCP_CONNECTION_NOT_FOUND", + "HDCP_HASH_FAILED", + "HDCP_BN_FAILED", + "HDCP_ENCRYPT_KM_FAILED", + "HDCP_DECRYPT_KM_FAILED", + "HDCP_HMAC_FAILED", + "HDCP_GET_RANDOM_FAILED", + "HDCP_INVALID_KEY_HEADER", + "HDCP_INVALID_KEY_LC_HASH", + "HDCP_INVALID_KEY_HASH", + "HDCP_KEY_WRITE_FAILED", + "HDCP_KEY_READ_FAILED", + "HDCP_KEY_DECRYPT_FAILED", + "HDCP_TEST_KEY_ON_SECURE_DEVICE", + "HDCP_KEY_VERSION_UNSUPPORTED", + "HDCP_RXID_NOT_FOUND", + "HDCP_STORAGE_INIT_FAILED", + "HDCP_STORAGE_FILE_OPEN_FAILED", + "HDCP_STORAGE_FILE_READ_FAILED", + "HDCP_STORAGE_FILE_WRITE_FAILED", + "HDCP_STORAGE_ID_UNSUPPORTED", + "HDCP_MUTUAL_EXCLUSIVE_DEVICE_PRESENT", + "HDCP_INVALID_STATE", + "HDCP_CONFIG_READ_FAILED", + "HDCP_OPEN_TZ_SERVICE_FAILED", + "HDCP_HW_CLOCK_OFF", + "HDCP_SET_HW_KEY_FAILED", + "HDCP_CLEAR_HW_KEY_FAILED", + "HDCP_GET_CONTENT_LEVEL_FAILED", + "HDCP_STREAMID_INUSE", + "HDCP_STREAM_NOT_FOUND", + "HDCP_FORCE_ENCRYPTION_FAILED", + "HDCP_STREAMNUMBER_INUSE" +}; + +/* flags set by tz in response message */ +#define HDCP_TXMTR_SUBSTATE_INIT 0 +#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST 1 +#define HDCP_TXMTR_SUBSTATE_PROCESSED_RECIEVERID_LIST 2 +#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_STREAM_READY_MESSAGE 3 +#define HDCP_TXMTR_SUBSTATE_REPEATER_AUTH_COMPLETE 4 + +#define HDCP_DEVICE_ID 0x0008000 +#define HDCP_CREATE_DEVICE_ID(x) (HDCP_DEVICE_ID | (x)) + +#define HDCP_TXMTR_HDMI HDCP_CREATE_DEVICE_ID(1) + +#define HDCP_TXMTR_SERVICE_ID 0x0001000 +#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x) + +#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF) +#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF) +#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v) & 0xFF) + +#define HDCP_CLIENT_MAJOR_VERSION 2 +#define HDCP_CLIENT_MINOR_VERSION 1 +#define HDCP_CLIENT_PATCH_VERSION 0 +#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \ + ((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF)) + +#define hdcp2_app_init_var(x) \ + struct hdcp_##x##_req *req_buf = NULL; \ + struct hdcp_##x##_rsp *rsp_buf = NULL; \ + if (!handle->qseecom_handle) { \ + pr_err("invalid qseecom_handle while processing %s\n", #x); \ + rc = -EINVAL; \ + goto error; \ + } \ + req_buf = (struct hdcp_##x##_req *) handle->qseecom_handle->sbuf; \ + rsp_buf = (struct hdcp_##x##_rsp *) (handle->qseecom_handle->sbuf + \ + QSEECOM_ALIGN(sizeof(struct hdcp_##x##_req))); \ + req_buf->commandid = hdcp_cmd_##x + + +#define hdcp2_app_process_cmd(x) \ +({ \ + int rc = qseecom_send_command(handle->qseecom_handle, \ + req_buf, QSEECOM_ALIGN(sizeof(struct hdcp_##x##_req)), \ + rsp_buf, QSEECOM_ALIGN(sizeof(struct hdcp_##x##_rsp))); \ + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS)) { \ + pr_err("qseecom cmd %s failed with err = %d, status = %d:%s\n" \ + , #x, rc, rsp_buf->status, \ + hdcp_cmd_status_to_str(rsp_buf->status)); \ + rc = -EINVAL; \ + } \ + rc; \ +}) + +enum { + hdcp_cmd_tx_init = SERVICE_CREATE_CMD(1), + hdcp_cmd_tx_init_v1 = SERVICE_CREATE_CMD(1), + hdcp_cmd_tx_deinit = SERVICE_CREATE_CMD(2), + hdcp_cmd_rcvd_msg = SERVICE_CREATE_CMD(3), + hdcp_cmd_send_timeout = SERVICE_CREATE_CMD(4), + hdcp_cmd_set_hw_key = SERVICE_CREATE_CMD(5), + hdcp_cmd_query_stream_type = SERVICE_CREATE_CMD(6), + hdcp_cmd_init_v1 = SERVICE_CREATE_CMD(11), + hdcp_cmd_init = SERVICE_CREATE_CMD(11), + hdcp_cmd_deinit = SERVICE_CREATE_CMD(12), + hdcp_cmd_version = SERVICE_CREATE_CMD(14), + hdcp_cmd_verify_key = SERVICE_CREATE_CMD(15), + hdcp_cmd_session_init = SERVICE_CREATE_CMD(16), + hdcp_cmd_session_deinit = SERVICE_CREATE_CMD(17), + hdcp_cmd_start_auth = SERVICE_CREATE_CMD(18), + hdcp_cmd_session_open_stream = SERVICE_CREATE_CMD(20), + hdcp_cmd_session_close_stream = SERVICE_CREATE_CMD(21), + hdcp_cmd_force_encryption = SERVICE_CREATE_CMD(22), +}; + +enum hdcp_state { + HDCP_STATE_INIT = 0x00, + HDCP_STATE_APP_LOADED = 0x01, + HDCP_STATE_SESSION_INIT = 0x02, + HDCP_STATE_TXMTR_INIT = 0x04, + HDCP_STATE_AUTHENTICATED = 0x08, + HDCP_STATE_ERROR = 0x10 +}; + +enum hdcp_element { + HDCP_TYPE_UNKNOWN, + HDCP_TYPE_RECEIVER, + HDCP_TYPE_REPEATER, +}; + +enum hdcp_version { + HDCP_VERSION_UNKNOWN, + HDCP_VERSION_2_2, + HDCP_VERSION_1_4 +}; + +struct receiver_info { + unsigned char rcvrInfo[RCVR_ID_SIZE]; + enum hdcp_element elem_type; + enum hdcp_version hdcp_version; +}; + +struct topology_info { + unsigned int nNumRcvrs; + struct receiver_info rcvinfo[MAX_TOPOLOGY_ELEMS]; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_set_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_set_rsp { + uint32_t commandid; + uint32_t ret; + uint8_t ksv[HDCP1_AKSV_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_version_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_version_rsp { + uint32_t status; + uint32_t commandId; + uint32_t appversion; +}; + +struct __attribute__ ((__packed__)) hdcp_verify_key_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_verify_key_rsp { + uint32_t status; + uint32_t commandId; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_verify_req { + uint32_t commandid; + uint32_t key_type; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_verify_rsp { + uint32_t commandId; + uint32_t ret; +}; + +struct __attribute__ ((__packed__)) hdcp_init_v1_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_init_v1_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_init_req { + uint32_t commandid; + uint32_t clientversion; +}; + +struct __attribute__ ((__packed__)) hdcp_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t appversion; +}; + +struct __attribute__ ((__packed__)) hdcp_deinit_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_session_init_req { + uint32_t commandid; + uint32_t deviceid; +}; + +struct __attribute__ ((__packed__)) hdcp_session_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_session_deinit_req { + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_session_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_v1_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_v1_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_req { + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_deinit_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_rcvd_msg_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t msglen; + uint8_t msg[MAX_RX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_rcvd_msg_rsp { + uint32_t status; + uint32_t commandid; + uint32_t state; + uint32_t timeout; + uint32_t flag; + uint32_t msglen; + uint8_t msg[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_set_hw_key_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_set_hw_key_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_send_timeout_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_send_timeout_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_query_stream_type_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_query_stream_type_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t msg[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_set_stream_type_req { + uint32_t commandid; + uint32_t ctxhandle; + uint8_t streamtype; +}; + +struct __attribute__ ((__packed__)) hdcp_set_stream_type_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_update_srm_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t srmoffset; + uint32_t srmlength; +}; + +struct __attribute__ ((__packed__)) hdcp_update_srm_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_get_topology_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_get_topology_rsp { + uint32_t status; + uint32_t commandid; + struct topology_info topologyinfo; +}; + +struct __attribute__ ((__packed__)) rxvr_info_struct { + uint8_t rcvrCert[522]; + uint8_t rrx[BITS_64_IN_BYTES]; + uint8_t rxcaps[RXCAPS_SIZE]; + bool repeater; +}; + +struct __attribute__ ((__packed__)) repeater_info_struct { + uint8_t RxInfo[RXINFO_SIZE]; + uint8_t seq_num_V[SEQ_NUM_V_SIZE]; + bool seq_num_V_Rollover_flag; + uint8_t ReceiverIDList[MAX_RCVR_ID_LIST_SIZE]; + uint32_t ReceiverIDListLen; +}; + +struct __attribute__ ((__packed__)) hdcp1_set_enc_req { + uint32_t commandid; + uint32_t enable; +}; + +struct __attribute__ ((__packed__)) hdcp1_set_enc_rsp { + uint32_t commandid; + uint32_t ret; +}; + +struct __attribute__ ((__packed__)) hdcp1_ops_notify_req { + uint32_t commandid; + uint32_t device_type; + uint8_t recv_id_list[MAX_REC_ID_LIST_SIZE]; + int32_t recv_id_len; + struct hdcp1_topology topology; + bool is_authenticated; +}; + +struct __attribute__ ((__packed__)) hdcp1_ops_notify_rsp { + uint32_t commandid; + uint32_t ret; +}; + +struct __attribute__ ((__packed__)) hdcp_start_auth_req { + uint32_t commandid; + uint32_t ctxHandle; +}; + +struct __attribute__ ((__packed__)) hdcp_start_auth_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__((__packed__)) hdcp_session_open_stream_req { + uint32_t commandid; + uint32_t sessionid; + uint32_t vcpayloadid; + uint32_t stream_number; + uint32_t streamMediaType; +}; + +struct __attribute__((__packed__)) hdcp_session_open_stream_rsp { + uint32_t status; + uint32_t commandid; + uint32_t streamid; +}; + +struct __attribute__((__packed__)) hdcp_session_close_stream_req { + uint32_t commandid; + uint32_t sessionid; + uint32_t streamid; +}; + +struct __attribute__((__packed__)) hdcp_session_close_stream_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__((__packed__)) hdcp_force_encryption_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t enable; +}; + +struct __attribute__ ((__packed__)) hdcp_force_encryption_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct hdcp2_handle { + struct hdcp2_app_data app_data; + uint32_t tz_ctxhandle; + bool feature_supported; + enum hdcp_state hdcp_state; + struct qseecom_handle *qseecom_handle; + struct qseecom_handle *hdcpsrm_qseecom_handle; + uint32_t session_id; + bool legacy_app; + uint32_t device_type; + char *app_name; + + int (*app_init)(struct hdcp2_handle *handle); + int (*tx_init)(struct hdcp2_handle *handle); +}; + +/* + * struct hdcp1_handle - handle for HDCP 1.x client + * @qseecom_handle - for sending commands to qseecom + * @hdcpops_handle - for sending commands to ops TA + * @feature_supported - set to true if the platform supports HDCP 1.x + * @device_type - the interface type (HDMI or DisplayPort) + */ +struct hdcp1_handle { + struct qseecom_handle *qseecom_handle; + struct qseecom_handle *hdcpops_handle; + bool feature_supported; + uint32_t device_type; + enum hdcp_state hdcp_state; + char *app_name; +}; + +#define HDCP_CMD_STATUS_TO_STR(x) #x +static const char *hdcp_cmd_status_to_str(uint32_t err) +{ + int len = ARRAY_SIZE(HdcpErrors); + + if (err >= 0 && err < len) + return HdcpErrors[err]; + else + return ""; +} + +static int hdcp_get_version(struct hdcp2_handle *handle) +{ + int rc = 0; + uint32_t app_major_version = 0; + + hdcp2_app_init_var(version); + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_err("library already loaded\n"); + goto error; + } + + rc = hdcp2_app_process_cmd(version); + if (rc) + goto error; + + app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion); + + pr_debug("hdp2p2 app major version %d, app version %d\n", + app_major_version, rsp_buf->appversion); + + if (app_major_version == 1) + handle->legacy_app = true; +error: + return rc; +} + +static int hdcp2_app_init_legacy(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(init_v1); + + if (!handle->legacy_app) { + pr_err("wrong init function\n"); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_err("library already loaded\n"); + goto error; + } + + rc = hdcp2_app_process_cmd(init_v1); + if (rc) + goto error; + + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_init(struct hdcp2_handle *handle) +{ + int rc = 0; + uint32_t app_minor_version = 0; + + hdcp2_app_init_var(init); + + if (handle->legacy_app) { + pr_err("wrong init function\n"); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_err("library already loaded\n"); + goto error; + } + + req_buf->clientversion = + HDCP_CLIENT_MAKE_VERSION(HDCP_CLIENT_MAJOR_VERSION, + HDCP_CLIENT_MINOR_VERSION, + HDCP_CLIENT_PATCH_VERSION); + + rc = hdcp2_app_process_cmd(init); + if (rc) + goto error; + + app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion); + if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) { + pr_err + ("client-app minor version mismatch app(%d), client(%d)\n", + app_minor_version, HDCP_CLIENT_MINOR_VERSION); + rc = -1; + goto error; + } + + pr_debug("success\n"); + + pr_debug("client version major(%d), minor(%d), patch(%d)\n", + HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION, + HDCP_CLIENT_PATCH_VERSION); + + pr_debug("app version major(%d), minor(%d), patch(%d)\n", + HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion), + HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion), + HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion)); +error: + return rc; +} + +static int hdcp2_app_tx_init(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(tx_init); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) { + pr_err("txmtr already initialized\n"); + goto error; + } + + req_buf->sessionid = handle->session_id; + + rc = hdcp2_app_process_cmd(tx_init); + if (rc) + goto error; + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; + + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_tx_init_legacy(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(tx_init_v1); + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) { + pr_err("txmtr already initialized\n"); + goto error; + } + + rc = hdcp2_app_process_cmd(tx_init_v1); + if (rc) + goto error; + + handle->app_data.response.data = rsp_buf->message; + handle->app_data.response.length = rsp_buf->msglen; + handle->app_data.timeout = rsp_buf->timeout; + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; + + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_load(struct hdcp2_handle *handle) +{ + int rc = 0; + + if (!handle) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_err("%s app already loaded\n", handle->app_name); + goto error; + } + + rc = qseecom_start_app(&handle->qseecom_handle, + handle->app_name, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed for HDCP2P2 (%d)\n", rc); + goto error; + } + + rc = qseecom_start_app(&handle->hdcpsrm_qseecom_handle, + HDCPSRM_APP_NAME, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed for HDCPSRM (%d)\n", rc); + goto hdcpsrm_error; + } + + pr_debug("qseecom_start_app success\n"); + + rc = hdcp_get_version(handle); + if (rc) { + pr_err("library get version failed\n"); + goto get_version_error; + } + + if (handle->legacy_app) { + handle->app_init = hdcp2_app_init_legacy; + handle->tx_init = hdcp2_app_tx_init_legacy; + } else { + handle->app_init = hdcp2_app_init; + handle->tx_init = hdcp2_app_tx_init; + } + + rc = handle->app_init(handle); + if (rc) { + pr_err("app init failed\n"); + goto get_version_error; + } + + handle->hdcp_state |= HDCP_STATE_APP_LOADED; + + return rc; +get_version_error: + qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); +hdcpsrm_error: + qseecom_shutdown_app(&handle->qseecom_handle); +error: + return rc; +} + +static int hdcp2_app_unload(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(deinit); + + hdcp2_app_process_cmd(deinit); + + /* deallocate the resources for qseecom HDCPSRM handle */ + rc = qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); + if (rc) + pr_err("qseecom_shutdown_app failed for HDCPSRM (%d)\n", rc); + + /* deallocate the resources for qseecom HDCP2P2 handle */ + rc = qseecom_shutdown_app(&handle->qseecom_handle); + if (rc) { + pr_err("qseecom_shutdown_app failed for HDCP2P2 (%d)\n", rc); + return rc; + } + + handle->hdcp_state &= ~HDCP_STATE_APP_LOADED; + pr_debug("%s app unloaded\n", handle->app_name); + + return rc; +error: + qseecom_shutdown_app(&handle->hdcpsrm_qseecom_handle); + return rc; +} + +static int hdcp2_verify_key(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(verify_key); + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", handle->app_name); + rc = -EINVAL; + goto error; + } + + rc = hdcp2_app_process_cmd(verify_key); + pr_debug("verify_key = %d\n", rc); + +error: + return rc; +} + +bool hdcp2_feature_supported(void *data) +{ + int rc = 0; + bool supported = false; + struct hdcp2_handle *handle = data; + + if (!handle) { + pr_err("invalid input\n"); + rc = -EINVAL; + goto error; + } + + if (handle->feature_supported) { + supported = true; + goto error; + } + + rc = hdcp2_app_load(handle); + if (!rc) { + if (!hdcp2_verify_key(handle)) { + pr_debug("HDCP 2.2 supported\n"); + handle->feature_supported = true; + supported = true; + } + hdcp2_app_unload(handle); + } +error: + return supported; +} +EXPORT_SYMBOL(hdcp2_feature_supported); + +static int hdcp2_app_session_init(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(session_init); + + if (!handle->qseecom_handle || !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", handle->app_name); + rc = -EINVAL; + goto error; + } + + if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) { + pr_err("session already initialized\n"); + goto error; + } + + req_buf->deviceid = handle->device_type; + + rc = hdcp2_app_process_cmd(session_init); + if (rc) + goto error; + + pr_debug("session id %d\n", rsp_buf->sessionid); + + handle->session_id = rsp_buf->sessionid; + handle->hdcp_state |= HDCP_STATE_SESSION_INIT; + + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_session_deinit(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(session_deinit); + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", handle->app_name); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->sessionid = handle->session_id; + + rc = hdcp2_app_process_cmd(session_deinit); + if (rc) + goto error; + + handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT; + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_tx_deinit(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(tx_deinit); + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", handle->app_name); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->ctxhandle = handle->tz_ctxhandle; + + rc = hdcp2_app_process_cmd(tx_deinit); + if (rc) + goto error; + + handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT; + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_start_auth(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(start_auth); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->ctxHandle = handle->tz_ctxhandle; + + rc = hdcp2_app_process_cmd(start_auth); + if (rc) + goto error; + + handle->app_data.response.data = rsp_buf->message; + handle->app_data.response.length = rsp_buf->msglen; + handle->app_data.timeout = rsp_buf->timeout; + handle->app_data.repeater_flag = false; + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + + pr_debug("success\n"); +error: + return rc; +} + +static int hdcp2_app_start(struct hdcp2_handle *handle) +{ + int rc = 0; + + rc = hdcp2_app_load(handle); + if (rc) + goto error; + + if (!handle->legacy_app) { + rc = hdcp2_app_session_init(handle); + if (rc) + goto error; + } + + if (handle->tx_init == NULL) { + pr_err("invalid txmtr init function pointer\n"); + rc = -EINVAL; + goto error; + } + + rc = handle->tx_init(handle); + +error: + return rc; +} + +static int hdcp2_app_stop(struct hdcp2_handle *handle) +{ + int rc = 0; + + rc = hdcp2_app_tx_deinit(handle); + if (rc) + goto end; + + if (!handle->legacy_app) { + rc = hdcp2_app_session_deinit(handle); + if (rc) + goto end; + } + + rc = hdcp2_app_unload(handle); +end: + return rc; +} + +static int hdcp2_app_process_msg(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(rcvd_msg); + + if (!handle->app_data.request.data) { + pr_err("invalid request buffer\n"); + rc = -EINVAL; + goto error; + } + + req_buf->msglen = handle->app_data.request.length; + req_buf->ctxhandle = handle->tz_ctxhandle; + + rc = hdcp2_app_process_cmd(rcvd_msg); + if (rc) + goto error; + + /* check if it's a repeater */ + if (rsp_buf->flag == HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) + handle->app_data.repeater_flag = true; + + handle->app_data.response.data = rsp_buf->msg; + handle->app_data.response.length = rsp_buf->msglen; + handle->app_data.timeout = rsp_buf->timeout; +error: + return rc; +} + +static int hdcp2_app_timeout(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(send_timeout); + + rc = hdcp2_app_process_cmd(send_timeout); + if (rc) + goto error; + + handle->app_data.response.data = rsp_buf->message; + handle->app_data.response.length = rsp_buf->msglen; + handle->app_data.timeout = rsp_buf->timeout; +error: + return rc; +} + +static int hdcp2_app_enable_encryption(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(set_hw_key); + + /* + * wait at least 200ms before enabling encryption + * as per hdcp2p2 specifications. + */ + msleep(SLEEP_SET_HW_KEY_MS); + + req_buf->ctxhandle = handle->tz_ctxhandle; + + rc = hdcp2_app_process_cmd(set_hw_key); + if (rc) + goto error; + + handle->hdcp_state |= HDCP_STATE_AUTHENTICATED; + + pr_debug("success\n"); + return rc; +error: + return rc; +} + +static int hdcp2_force_encryption_utility(struct hdcp2_handle *handle, + uint32_t enable) +{ + int rc = 0; + + hdcp2_app_init_var(force_encryption); + if (handle->hdcp_state == HDCP_STATE_AUTHENTICATED) + msleep(SLEEP_FORCE_ENCRYPTION_MS); + + req_buf->ctxhandle = handle->tz_ctxhandle; + req_buf->enable = enable; + + rc = hdcp2_app_process_cmd(force_encryption); + if (rc || (rsp_buf->commandid != hdcp_cmd_force_encryption)) + goto error; + + return 0; +error: + return rc; +} + +int hdcp2_force_encryption(void *ctx, uint32_t enable) +{ + int rc = 0; + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + rc = hdcp2_force_encryption_utility(handle, enable); + if (rc) + goto error; + + pr_debug("success\n"); + return 0; +error: + pr_err("failed, rc=%d\n", rc); + return rc; +} +EXPORT_SYMBOL(hdcp2_force_encryption); + +static int hdcp2_app_query_stream(struct hdcp2_handle *handle) +{ + int rc = 0; + + hdcp2_app_init_var(query_stream_type); + + req_buf->ctxhandle = handle->tz_ctxhandle; + + rc = hdcp2_app_process_cmd(query_stream_type); + if (rc) + goto error; + + handle->app_data.response.data = rsp_buf->msg; + handle->app_data.response.length = rsp_buf->msglen; + handle->app_data.timeout = rsp_buf->timeout; +error: + return rc; +} + +static unsigned char *hdcp2_get_recv_buf(struct hdcp2_handle *handle) +{ + struct hdcp_rcvd_msg_req *req_buf; + + req_buf = (struct hdcp_rcvd_msg_req *)(handle->qseecom_handle->sbuf); + return req_buf->msg; +} + +int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, + struct hdcp2_app_data *app_data) +{ + struct hdcp2_handle *handle = NULL; + int rc = 0; + + if (!ctx || !app_data) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + handle->app_data.request.length = app_data->request.length; + + pr_debug("command %s\n", hdcp2_app_cmd_str(cmd)); + + switch (cmd) { + case HDCP2_CMD_START: + rc = hdcp2_app_start(handle); + break; + case HDCP2_CMD_START_AUTH: + rc = hdcp2_app_start_auth(handle); + break; + case HDCP2_CMD_PROCESS_MSG: + rc = hdcp2_app_process_msg(handle); + break; + case HDCP2_CMD_TIMEOUT: + rc = hdcp2_app_timeout(handle); + break; + case HDCP2_CMD_EN_ENCRYPTION: + rc = hdcp2_app_enable_encryption(handle); + break; + case HDCP2_CMD_QUERY_STREAM: + rc = hdcp2_app_query_stream(handle); + break; + case HDCP2_CMD_STOP: + rc = hdcp2_app_stop(handle); + default: + goto error; + } + + if (rc) + goto error; + + handle->app_data.request.data = hdcp2_get_recv_buf(handle); + + app_data->request.data = handle->app_data.request.data; + app_data->request.length = handle->app_data.request.length; + app_data->response.data = handle->app_data.response.data; + app_data->response.length = handle->app_data.response.length; + app_data->timeout = handle->app_data.timeout; + app_data->repeater_flag = handle->app_data.repeater_flag; +error: + return rc; +} +EXPORT_SYMBOL(hdcp2_app_comm); + +static int hdcp2_open_stream_helper(struct hdcp2_handle *handle, + uint8_t vc_payload_id, + uint8_t stream_number, + uint32_t *stream_id) +{ + int rc = 0; + + hdcp2_app_init_var(session_open_stream); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->sessionid = handle->session_id; + req_buf->vcpayloadid = vc_payload_id; + req_buf->stream_number = stream_number; + req_buf->streamMediaType = 0; + + rc = hdcp2_app_process_cmd(session_open_stream); + if (rc) + goto error; + + *stream_id = rsp_buf->streamid; + + pr_debug("success\n"); + +error: + return rc; +} + +int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, uint8_t stream_number, + uint32_t *stream_id) +{ + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + + return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number, + stream_id); +} +EXPORT_SYMBOL(hdcp2_open_stream); + +static int hdcp2_close_stream_helper(struct hdcp2_handle *handle, + uint32_t stream_id) +{ + int rc = 0; + + hdcp2_app_init_var(session_close_stream); + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + rc = -EINVAL; + goto error; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + rc = -EINVAL; + goto error; + } + + req_buf->sessionid = handle->session_id; + req_buf->streamid = stream_id; + + rc = hdcp2_app_process_cmd(session_close_stream); + + if (rc) + goto error; + + pr_debug("success\n"); +error: + return rc; +} + +int hdcp2_close_stream(void *ctx, uint32_t stream_id) +{ + struct hdcp2_handle *handle = NULL; + + if (!ctx) { + pr_err("invalid input\n"); + return -EINVAL; + } + + handle = ctx; + + return hdcp2_close_stream_helper(handle, stream_id); +} +EXPORT_SYMBOL(hdcp2_close_stream); + +void *hdcp2_init(u32 device_type) +{ + struct hdcp2_handle *handle = NULL; + + handle = kzalloc(sizeof(struct hdcp2_handle), GFP_KERNEL); + if (!handle) + goto error; + + handle->device_type = device_type; + handle->app_name = HDCP2P2_APP_NAME; +error: + return handle; +} +EXPORT_SYMBOL(hdcp2_init); + +void hdcp2_deinit(void *ctx) +{ + kfree_sensitive(ctx); +} +EXPORT_SYMBOL(hdcp2_deinit); + +void *hdcp1_init(void) +{ + struct hdcp1_handle *handle = + kzalloc(sizeof(struct hdcp1_handle), GFP_KERNEL); + + if (!handle) + goto error; + + handle->app_name = HDCP1_APP_NAME; +error: + return handle; +} +EXPORT_SYMBOL(hdcp1_init); + +void hdcp1_deinit(void *data) +{ + kfree(data); +} +EXPORT_SYMBOL(hdcp1_deinit); + +static int hdcp1_count_ones(u8 *array, u8 len) +{ + int i, j, count = 0; + + for (i = 0; i < len; i++) + for (j = 0; j < 8; j++) + count += (((array[i] >> j) & 0x1) ? 1 : 0); + return count; +} + +static int hdcp1_validate_aksv(u32 aksv_msb, u32 aksv_lsb) +{ + int const number_of_ones = 20; + u8 aksv[5]; + + pr_debug("AKSV=%02x%08x\n", aksv_msb, aksv_lsb); + + aksv[0] = aksv_lsb & 0xFF; + aksv[1] = (aksv_lsb >> 8) & 0xFF; + aksv[2] = (aksv_lsb >> 16) & 0xFF; + aksv[3] = (aksv_lsb >> 24) & 0xFF; + aksv[4] = aksv_msb & 0xFF; + + /* check there are 20 ones in AKSV */ + if (hdcp1_count_ones(aksv, 5) != number_of_ones) { + pr_err("AKSV bit count failed\n"); + return -EINVAL; + } + + return 0; +} + + +static int hdcp1_set_key(struct hdcp1_handle *hdcp1_handle, u32 *aksv_msb, + u32 *aksv_lsb) +{ + int rc = 0; + struct hdcp1_key_set_req *key_set_req; + struct hdcp1_key_set_rsp *key_set_rsp; + struct qseecom_handle *handle = NULL; + + if (aksv_msb == NULL || aksv_lsb == NULL) { + pr_err("invalid aksv\n"); + return -EINVAL; + } + + if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) { + pr_err("invalid HDCP 1.x handle\n"); + return -EINVAL; + } + + if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", hdcp1_handle->app_name); + return -EINVAL; + } + + handle = hdcp1_handle->qseecom_handle; + + /* set keys and request aksv */ + key_set_req = (struct hdcp1_key_set_req *)handle->sbuf; + key_set_req->commandid = HDCP1_SET_KEY; + key_set_rsp = (struct hdcp1_key_set_rsp *)(handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req))); + rc = qseecom_send_command(handle, key_set_req, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_set_req)), + key_set_rsp, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_set_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err=%d\n", rc); + return -ENOKEY; + } + + rc = key_set_rsp->ret; + if (rc) { + pr_err("set key cmd failed, rsp=%d\n", key_set_rsp->ret); + return -ENOKEY; + } + + /* copy bytes into msb and lsb */ + *aksv_msb = key_set_rsp->ksv[0] << 24 | key_set_rsp->ksv[1] << 16 | + key_set_rsp->ksv[2] << 8 | key_set_rsp->ksv[3]; + *aksv_lsb = key_set_rsp->ksv[4] << 24 | key_set_rsp->ksv[5] << 16 | + key_set_rsp->ksv[6] << 8 | key_set_rsp->ksv[7]; + + rc = hdcp1_validate_aksv(*aksv_msb, *aksv_lsb); + if (rc) { + pr_err("aksv validation failed (%d)\n", rc); + return rc; + } + + return 0; +} + +static int hdcp1_app_load(struct hdcp1_handle *handle) +{ + int rc = 0; + + if (!handle) { + pr_err("invalid handle\n"); + goto error; + } + + rc = qseecom_start_app(&handle->qseecom_handle, handle->app_name, + QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("%s app load failed (%d)\n", handle->app_name, rc); + goto error; + } + + rc = qseecom_start_app(&handle->hdcpops_handle, HDCP1OPS_APP_NAME, + QSEECOM_SBUFF_SIZE); + if (rc) { + pr_warn("%s app load failed (%d)\n", HDCP1OPS_APP_NAME, rc); + handle->hdcpops_handle = NULL; + } + + handle->hdcp_state |= HDCP_STATE_APP_LOADED; + pr_debug("%s app loaded\n", handle->app_name); + +error: + return rc; +} + +static void hdcp1_app_unload(struct hdcp1_handle *handle) +{ + int rc = 0; + + if (!handle || !handle->qseecom_handle) { + pr_err("invalid handle\n"); + return; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_warn("%s app not loaded\n", handle->app_name); + return; + } + + if (handle->hdcpops_handle) { + /* deallocate the resources for HDCP 1.x ops handle */ + rc = qseecom_shutdown_app(&handle->hdcpops_handle); + if (rc) + pr_warn("%s app unload failed (%d)\n", HDCP1OPS_APP_NAME, rc); + } + + /* deallocate the resources for qseecom HDCP 1.x handle */ + rc = qseecom_shutdown_app(&handle->qseecom_handle); + if (rc) { + pr_err("%s app unload failed (%d)\n", handle->app_name, rc); + return; + } + + handle->hdcp_state &= ~HDCP_STATE_APP_LOADED; + pr_debug("%s app unloaded\n", handle->app_name); +} + +static int hdcp1_verify_key(struct hdcp1_handle *hdcp1_handle) +{ + int rc = 0; + struct hdcp1_key_verify_req *key_verify_req; + struct hdcp1_key_verify_rsp *key_verify_rsp; + struct qseecom_handle *handle = NULL; + + if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) { + pr_err("invalid HDCP 1.x handle\n"); + return -EINVAL; + } + + if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", hdcp1_handle->app_name); + return -EINVAL; + } + + handle = hdcp1_handle->qseecom_handle; + + key_verify_req = (struct hdcp1_key_verify_req *)handle->sbuf; + key_verify_req->commandid = HDCP1_KEY_VERIFY; + key_verify_rsp = (struct hdcp1_key_verify_rsp *)(handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_key_verify_req))); + rc = qseecom_send_command(handle, key_verify_req, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_verify_req)), + key_verify_rsp, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_set_rsp))); + + if (rc < 0) { + pr_err("command HDCP1_KEY_VERIFY failed (%d)\n", rc); + return -EINVAL; + } + + rc = key_verify_rsp->ret; + if (rc) { + pr_err("key_verify failed, rsp=%d\n", key_verify_rsp->ret); + return -EINVAL; + } + + pr_debug("success\n"); + + return 0; +} + +bool hdcp1_feature_supported(void *data) +{ + bool supported = false; + struct hdcp1_handle *handle = data; + int rc = 0; + + if (!handle) { + pr_err("invalid handle\n"); + goto error; + } + + if (handle->feature_supported) { + supported = true; + goto error; + } + + rc = hdcp1_app_load(handle); + if (!rc && (handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + if (!hdcp1_verify_key(handle)) { + pr_debug("HDCP 1.x supported\n"); + handle->feature_supported = true; + supported = true; + } + hdcp1_app_unload(handle); + } +error: + return supported; +} +EXPORT_SYMBOL(hdcp1_feature_supported); + +int hdcp1_set_enc(void *data, bool enable) +{ + int rc = 0; + struct hdcp1_set_enc_req *set_enc_req; + struct hdcp1_set_enc_rsp *set_enc_rsp; + struct hdcp1_handle *hdcp1_handle = data; + struct qseecom_handle *handle = NULL; + + if (!hdcp1_handle || !hdcp1_handle->qseecom_handle) { + pr_err("invalid HDCP 1.x handle\n"); + return -EINVAL; + } + + if (!hdcp1_handle->feature_supported) { + pr_err("HDCP 1.x not supported\n"); + return -EINVAL; + } + + if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", hdcp1_handle->app_name); + return -EINVAL; + } + + handle = hdcp1_handle->qseecom_handle; + + /* set keys and request aksv */ + set_enc_req = (struct hdcp1_set_enc_req *)handle->sbuf; + set_enc_req->commandid = HDCP1_SET_ENC; + set_enc_req->enable = enable; + set_enc_rsp = (struct hdcp1_set_enc_rsp *)(handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req))); + rc = qseecom_send_command(handle, set_enc_req, + QSEECOM_ALIGN(sizeof + (struct hdcp1_set_enc_req)), + set_enc_rsp, + QSEECOM_ALIGN(sizeof + (struct hdcp1_set_enc_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err=%d\n", rc); + return -EINVAL; + } + + rc = set_enc_rsp->ret; + if (rc) { + pr_err("enc cmd failed, rsp=%d\n", set_enc_rsp->ret); + return -EINVAL; + } + + pr_debug("success\n"); + return 0; +} +EXPORT_SYMBOL(hdcp1_set_enc); + +int hdcp1_ops_notify(void *data, void *topo, bool is_authenticated) +{ + int rc = 0; + struct hdcp1_ops_notify_req *ops_notify_req; + struct hdcp1_ops_notify_rsp *ops_notify_rsp; + struct hdcp1_handle *hdcp1_handle = data; + struct qseecom_handle *handle = NULL; + struct hdcp1_topology *topology = NULL; + + if (!hdcp1_handle || !hdcp1_handle->hdcpops_handle) { + pr_err("invalid HDCP 1.x ops handle\n"); + return -EINVAL; + } + + if (!hdcp1_handle->feature_supported) { + pr_err("HDCP 1.x not supported\n"); + return -EINVAL; + } + + if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("%s app not loaded\n", HDCP1OPS_APP_NAME); + return -EINVAL; + } + + handle = hdcp1_handle->hdcpops_handle; + topology = (struct hdcp1_topology *)topo; + + /* set keys and request aksv */ + ops_notify_req = (struct hdcp1_ops_notify_req *)handle->sbuf; + ops_notify_req->commandid = HDCP1_NOTIFY_TOPOLOGY; + ops_notify_req->device_type = DEVICE_TYPE_DP; + ops_notify_req->is_authenticated = is_authenticated; + ops_notify_req->topology.depth = topology->depth; + ops_notify_req->topology.device_count = topology->device_count; + ops_notify_req->topology.max_devices_exceeded = topology->max_devices_exceeded; + ops_notify_req->topology.max_cascade_exceeded = topology->max_cascade_exceeded; + + /* + * For hdcp1.4 below two nodes are not applicable but as + * TZ ops ta talks with other drivers with same structure + * and want to maintain same interface across hdcp versions, + * we are setting the values to 0. + */ + ops_notify_req->topology.hdcp2LegacyDeviceDownstream = 0; + ops_notify_req->topology.hdcp1DeviceDownstream = 0; + + memset(ops_notify_req->recv_id_list, 0, sizeof(uint8_t) * MAX_REC_ID_LIST_SIZE); + + ops_notify_rsp = (struct hdcp1_ops_notify_rsp *)(handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_req))); + rc = qseecom_send_command(handle, ops_notify_req, + QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_req)), + ops_notify_rsp, + QSEECOM_ALIGN(sizeof(struct hdcp1_ops_notify_rsp))); + + rc = ops_notify_rsp->ret; + if (rc < 0) { + pr_warn("Ops notify cmd failed, rsp=%d\n", ops_notify_rsp->ret); + return -EINVAL; + } + + pr_debug("ops notify success\n"); + return 0; +} +EXPORT_SYMBOL(hdcp1_ops_notify); + +int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb) +{ + int rc = 0; + struct hdcp1_handle *hdcp1_handle = data; + + if (!aksv_msb || !aksv_lsb) { + pr_err("invalid aksv output buffer\n"); + rc = -EINVAL; + goto error; + } + + if (!hdcp1_handle) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto error; + } + + if (!hdcp1_handle->feature_supported) { + pr_err("feature not supported\n"); + rc = -EINVAL; + goto error; + } + + if (hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_debug("%s app already loaded\n", hdcp1_handle->app_name); + goto error; + } + + rc = hdcp1_app_load(hdcp1_handle); + if (rc) + goto error; + + rc = hdcp1_set_key(hdcp1_handle, aksv_msb, aksv_lsb); + if (rc) + goto key_error; + + pr_debug("success\n"); + return rc; + +key_error: + hdcp1_app_unload(hdcp1_handle); +error: + return rc; +} +EXPORT_SYMBOL(hdcp1_start); + +void hdcp1_stop(void *data) +{ + struct hdcp1_handle *hdcp1_handle = data; + + if (!hdcp1_handle || !hdcp1_handle->qseecom_handle || !hdcp1_handle->hdcpops_handle) { + pr_err("invalid handle\n"); + return; + } + + if (!(hdcp1_handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_debug("%s app not loaded\n", hdcp1_handle->app_name); + return; + } + + hdcp1_app_unload(hdcp1_handle); +} +EXPORT_SYMBOL(hdcp1_stop); + +static int __init hdcp_module_init(void){ return 0; } +static void __exit hdcp_module_exit(void){ return; } + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("HDCP driver"); + +module_init(hdcp_module_init); +module_exit(hdcp_module_exit); diff --git a/linux/hdcp_qseecom.h b/linux/hdcp_qseecom.h new file mode 100644 index 0000000000..bc5eef6acc --- /dev/null +++ b/linux/hdcp_qseecom.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2022, The Linux Foundation. All rights reserved. + */ + +#ifndef __HDCP_QSEECOM_H +#define __HDCP_QSEECOM_H +#include + +#define HDCP_QSEECOM_ENUM_STR(x) #x + +enum hdcp2_app_cmd { + HDCP2_CMD_START, + HDCP2_CMD_START_AUTH, + HDCP2_CMD_STOP, + HDCP2_CMD_PROCESS_MSG, + HDCP2_CMD_TIMEOUT, + HDCP2_CMD_EN_ENCRYPTION, + HDCP2_CMD_QUERY_STREAM, +}; + +struct hdcp2_buffer { + unsigned char *data; + u32 length; +}; + +struct hdcp2_app_data { + u32 timeout; + bool repeater_flag; + struct hdcp2_buffer request; // requests to TA, sent from sink + struct hdcp2_buffer response; // responses from TA, sent to sink +}; + +struct hdcp1_topology { + uint32_t depth; + uint32_t device_count; + uint32_t max_devices_exceeded; + uint32_t max_cascade_exceeded; + uint32_t hdcp2LegacyDeviceDownstream; + uint32_t hdcp1DeviceDownstream; +}; + +static inline const char *hdcp2_app_cmd_str(enum hdcp2_app_cmd cmd) +{ + switch (cmd) { + case HDCP2_CMD_START: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START); + case HDCP2_CMD_START_AUTH: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH); + case HDCP2_CMD_STOP: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP); + case HDCP2_CMD_PROCESS_MSG: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_PROCESS_MSG); + case HDCP2_CMD_TIMEOUT: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_TIMEOUT); + case HDCP2_CMD_EN_ENCRYPTION: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_EN_ENCRYPTION); + case HDCP2_CMD_QUERY_STREAM: + return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_QUERY_STREAM); + default: return "???"; + } +} + +#if IS_ENABLED(CONFIG_HDCP_QSEECOM) +void *hdcp1_init(void); +void hdcp1_deinit(void *data); +bool hdcp1_feature_supported(void *data); +int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb); +int hdcp1_set_enc(void *data, bool enable); +int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated); +void hdcp1_stop(void *data); + +void *hdcp2_init(u32 device_type); +void hdcp2_deinit(void *ctx); +bool hdcp2_feature_supported(void *ctx); +int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, + struct hdcp2_app_data *app_data); +int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, + uint8_t stream_number, uint32_t *stream_id); +int hdcp2_close_stream(void *ctx, uint32_t stream_id); +int hdcp2_force_encryption(void *ctx, uint32_t enable); +#else +static inline void *hdcp1_init(void) +{ + return NULL; +} + +static inline void hdcp1_deinit(void *data) +{ +} + +static inline bool hdcp1_feature_supported(void *data) +{ + return false; +} + +static inline int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb) +{ + return 0; +} + +static inline int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated) +{ + return 0; +} + +static inline int hdcp1_set_enc(void *data, bool enable) +{ + return 0; +} + +static inline void hdcp1_stop(void *data) +{ +} + +static inline void *hdcp2_init(u32 device_type) +{ + return NULL; +} + +static inline void hdcp2_deinit(void *ctx) +{ +} + +static inline bool hdcp2_feature_supported(void *ctx) +{ + return false; +} + +static inline int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, + struct hdcp2_app_data *app_data) +{ + return 0; +} + +static inline int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, + uint8_t stream_number, uint32_t *stream_id) +{ + return 0; +} + +static inline int hdcp2_close_stream(void *ctx, uint32_t stream_id) +{ + return 0; +} + +static inline int hdcp2_force_encryption(void *ctx, uint32_t enable) +{ + return 0; +} +#endif /* CONFIG_HDCP_QSEECOM */ + +#endif /* __HDCP_QSEECOM_H */ diff --git a/smcinvoke/misc/qseecom_kernel.h b/linux/misc/qseecom_kernel.h similarity index 100% rename from smcinvoke/misc/qseecom_kernel.h rename to linux/misc/qseecom_kernel.h diff --git a/crypto-qti/linux/qcota.h b/linux/qcota.h similarity index 100% rename from crypto-qti/linux/qcota.h rename to linux/qcota.h diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 3673f60204..556281513c 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -4,5 +4,7 @@ PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ + $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index 4c8f5f1e9d..841a8885b6 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -3,3 +3,5 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ + $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + From 72ec3a4a485438c824a8bf53ac166a2bfaadb8ac Mon Sep 17 00:00:00 2001 From: Sheik Anwar Shabic Y Date: Fri, 28 Jan 2022 19:35:48 +0530 Subject: [PATCH 007/202] securemsm-kernel: Enable tmecom module compilation Enable compilation for tmecom-intf_dlkm.ko compilation. Change-Id: I049c769abea1a53160d82f4c85ec49089f485f7e --- Android.mk | 10 + Kbuild | 3 + config/ssg_smcinvoke.conf | 3 +- linux/tme_hwkm_master.h | 120 ++++++++ linux/tme_hwkm_master_defs.h | 462 ++++++++++++++++++++++++++++++ securemsm_kernel_product_board.mk | 1 + securemsm_kernel_vendor_board.mk | 1 + tmecom/tme_hwkm_master.c | 404 ++++++++++++++++++++++++++ tmecom/tme_hwkm_master_intf.h | 132 +++++++++ tmecom/tmecom.c | 318 ++++++++++++++++++++ tmecom/tmecom.h | 12 + 11 files changed, 1465 insertions(+), 1 deletion(-) create mode 100644 linux/tme_hwkm_master.h create mode 100644 linux/tme_hwkm_master_defs.h create mode 100644 tmecom/tme_hwkm_master.c create mode 100644 tmecom/tme_hwkm_master_intf.h create mode 100644 tmecom/tmecom.c create mode 100644 tmecom/tmecom.h diff --git a/Android.mk b/Android.mk index cb8f0f94ea..4d9200d781 100644 --- a/Android.mk +++ b/Android.mk @@ -69,3 +69,13 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################# +################################################# +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := tmecom-intf_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := tmecom-intf_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# diff --git a/Kbuild b/Kbuild index f1b98ce19f..f51a3ec284 100644 --- a/Kbuild +++ b/Kbuild @@ -22,3 +22,6 @@ qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o + +obj-$(CONFIG_MSM_TMECOM_QMP) := tmecom-intf_dlkm.o +tmecom-intf_dlkm-objs := tmecom/tmecom.o tmecom/tme_hwkm_master.o diff --git a/config/ssg_smcinvoke.conf b/config/ssg_smcinvoke.conf index f64b0435db..4de3fd4c74 100644 --- a/config/ssg_smcinvoke.conf +++ b/config/ssg_smcinvoke.conf @@ -5,4 +5,5 @@ export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m export CONFIG_CRYPTO_DEV_QCRYPTO=m export CONFIG_SCSI_UFS_CRYPTO=m export CONFIG_SCSI_UFS_CRYPTO_QTI=m -export CONFIG_HDCP_QSEECOM=m \ No newline at end of file +export CONFIG_HDCP_QSEECOM=m +export CONFIG_MSM_TMECOM_QMP=m diff --git a/linux/tme_hwkm_master.h b/linux/tme_hwkm_master.h new file mode 100644 index 0000000000..90503b44fa --- /dev/null +++ b/linux/tme_hwkm_master.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#ifndef _TME_HWKM_MASTER_H_ +#define _TME_HWKM_MASTER_H_ + +#include + +/** + * API functions + */ + +/** + * Clear a Key Table entry. + * + * @param [in] key_id The ID of the key to clear. + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_clearkey(uint32_t key_id, + struct tme_ext_err_info *err_info); + +/** + * Generate a random key with an associated policy. + * + * @param [in] key_id The ID of the key to be generated. + * @param [in] policy The policy specifying the key to be generated. + * @param [in] cred_slot Credential slot to which this key will be bound. + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_generatekey(uint32_t key_id, + struct tme_key_policy *policy, + uint32_t cred_slot, + struct tme_ext_err_info *err_info); + +/** + * Derive a KEY using either HKDF or NIST algorithms. + * + * @param [in] key_id The ID of the key to be derived. + * @param [in] kdf_info Specifies how the key is to be derived + * and the properties of the derived key. + * @param [in] cred_slot Credential slot to which this key will be bound. + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_derivekey(uint32_t key_id, + struct tme_kdf_spec *kdf_info, + uint32_t cred_slot, + struct tme_ext_err_info *err_info); + +/** + * Wrap a key so that it can be safely moved outside the TME. + * + * @param [in] kwkey_id Denotes a key, already present in the + * Key Table, to be used to secure the target key. + * @param [in] targetkey_id Denotes the key to be wrapped. + * @param [in] cred_slot Credential slot to which this key is bound. + * @param [out] wrapped Buffer for wrapped key output from response + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_wrapkey(uint32_t key_id, + uint32_t targetkey_id, + uint32_t cred_slot, + struct tme_wrapped_key *wrapped, + struct tme_ext_err_info *err_info); + +/** + * Unwrap a key from outside the TME and store in the Key Table. + * + * @param [in] key_id The ID of the key to be unwrapped. + * @param [in] kwkey_id Denotes a key, already present in the + * Key Table, to be used to unwrap the key. + * @param [in] cred_slot Credential slot to which this key will be bound. + * @param [in] wrapped The key to be unwrapped. + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id, + uint32_t kwkey_id, + uint32_t cred_slot, + struct tme_wrapped_key *wrapped, + struct tme_ext_err_info *err_info); + +/** + * Import a plaintext key from outside the TME and store in the Key Table. + * + * @param [in] key_id The ID of the key to be imported. + * @param [in] policy The Key Policy to be associated with the key. + * @param [in] keyMaterial The plaintext key material. + * @param [in] cred_slot Credential slot to which this key will be bound. + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_importkey(uint32_t key_id, + struct tme_key_policy *policy, + struct tme_plaintext_key *key_material, + uint32_t cred_slot, + struct tme_ext_err_info *err_info); + +/** + * Broadcast Transport Key to HWKM slaves. + * + * @param [out] err_info Extended error info + * + * @return 0 if successful, error code otherwise. + */ +uint32_t tme_hwkm_master_broadcast_transportkey( + struct tme_ext_err_info *err_info); + +#endif /* _TME_HWKM_MASTER_H_ */ + diff --git a/linux/tme_hwkm_master_defs.h b/linux/tme_hwkm_master_defs.h new file mode 100644 index 0000000000..d6b1a8f5ac --- /dev/null +++ b/linux/tme_hwkm_master_defs.h @@ -0,0 +1,462 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#ifndef _TME_HWKM_MASTER_DEFS_H_ +#define _TME_HWKM_MASTER_DEFS_H_ + +#include + +#define UINT32_C(x) (x ## U) + +/** + * Key ID + */ +/* L1 Key IDs that are Key Table slot numbers */ +/**< CUS, 512 bits, in fuses */ +#define TME_KID_CHIP_UNIQUE_SEED 8 +/**< CRBK, 512 bits, in fuses */ +#define TME_KID_CHIP_RAND_BASE 9 +/**< L1 Key derived from L0 slot numbers 0-3 or 4-7 */ +#define TME_KID_CHIP_FAM_L1 10 + +/* Transport Key ID */ +#define TME_KID_TP 11/**< 528 bits, retained */ + +/** + * KeyPolicy + */ +/** Key Policy: 64-bit integer with bit encoded values */ +struct tme_key_policy { + uint32_t low; + uint32_t high; +} __packed; + +#define TME_KPHALFBITS 32 + +#define TME_KPCOMBINE(lo32, hi32) (((uint64_t)(lo32)) | \ + (((uint64_t)(hi32)) << TME_KPHALFBITS)) + +/** + * Fields in Key Policy low word + */ + +/** Key Type: Fundamental crypto algorithm groups */ +/**< Position of Key Type bits */ +#define TME_KT_Shift 0 +/**< Mask for Key Type bits */ +#define TME_KT_Mask (UINT32_C(0x07) << TME_KT_Shift) +/**< Symmetric algorithms */ +#define TME_KT_Symmetric (UINT32_C(0x00) << TME_KT_Shift) +/**< Asymmetric algorithms: ECC */ +#define TME_KT_Asymmetric_ECC (UINT32_C(0x01) << TME_KT_Shift) +/**< Asymmetric algorithms: RSA */ +#define TME_KT_Asymmetric_RSA (UINT32_C(0x05) << TME_KT_Shift) + +/** Key Length */ +/**< Position of Key Length bits */ +#define TME_KL_Shift 3 +/**< Mask for Key Length bits */ +#define TME_KL_Mask (UINT32_C(0x0F) << TME_KL_Shift) +/**< 64 bits - AES/2TDES */ +#define TME_KL_64 (UINT32_C(0x00) << TME_KL_Shift) +/**< 128 bits - AES/2TDES */ +#define TME_KL_128 (UINT32_C(0x01) << TME_KL_Shift) +/**< 192 bits - AES/3TDES */ +#define TME_KL_192 (UINT32_C(0x02) << TME_KL_Shift) +/**< 224 bits - ECDSA */ +#define TME_KL_224 (UINT32_C(0x03) << TME_KL_Shift) +/**< 256 bits - ECDSA/AES */ +#define TME_KL_256 (UINT32_C(0x04) << TME_KL_Shift) +/**< 384 bits - ECDSA */ +#define TME_KL_384 (UINT32_C(0x05) << TME_KL_Shift) +/**< 448 bits - ECDSA */ +#define TME_KL_448 (UINT32_C(0x06) << TME_KL_Shift) +/**< 512 bits - ECDSA/HMAC/KDF/AES-SIV/AES-XTS */ +#define TME_KL_512 (UINT32_C(0x07) << TME_KL_Shift) +/**< 521 bits - ECDSA/HMAC/KDF */ +#define TME_KL_521 (UINT32_C(0x08) << TME_KL_Shift) +/**< 2048 bits - RSA */ +#define TME_KL_2048 (UINT32_C(0x09) << TME_KL_Shift) +/**< 3072 bits - RSA */ +#define TME_KL_3072 (UINT32_C(0x0A) << TME_KL_Shift) +/**< 4096 bits - RSA */ +#define TME_KL_4096 (UINT32_C(0x0B) << TME_KL_Shift) + +/** + * Key Profile: Only applicable at present + * if Key Type is #TME_KT_Symmetric + */ +/**< Position of Key Profile bits */ +#define TME_KP_Shift 7 +/**< Mask for Key Class bits */ +#define TME_KP_Mask (UINT32_C(0x07) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KP_Generic (UINT32_C(0x00) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric (aka KDK) */ +#define TME_KP_KeyDerivation (UINT32_C(0x01) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric (aka KWK) */ +#define TME_KP_KWK_STORAGE (UINT32_C(0x02) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric (aka KSK) */ +#define TME_KP_KWK_SESSION (UINT32_C(0x03) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric (aka TPK) */ +#define TME_KP_KWK_TRANSPORT (UINT32_C(0x04) << TME_KP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KP_KWK_XPORT (UINT32_C(0x05) << TME_KP_Shift) +/**< If Key Type is not #TME_KT_Symmetric */ +#define TME_KP_Unused (UINT32_C(0x00) << TME_KP_Shift) + +/** Key Operation: Crypto operations permitted for a key */ +/**< Position of Key Operation bits */ +#define TME_KOP_Shift 10 +/**< Mask for Key Operation bits */ +#define TME_KOP_Mask (UINT32_C(0x0F) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_Encryption (UINT32_C(0x01) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_Decryption (UINT32_C(0x02) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_MAC (UINT32_C(0x04) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_NISTDerive (UINT32_C(0x04) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_HKDFExtract (UINT32_C(0x08) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KOP_HKDFExpand (UINT32_C(0x09) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_ECC */ +#define TME_KOP_ECDSASign (UINT32_C(0x01) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_ECC */ +#define TME_KOP_ECDSAVerify (UINT32_C(0x02) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_ECC */ +#define TME_KOP_ECDHSharedSecret (UINT32_C(0x04) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_RSA */ +#define TME_KOP_RSAASign (UINT32_C(0x01) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_RSA */ +#define TME_KOP_RSAAVerify (UINT32_C(0x02) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_RSA */ +#define TME_KOP_RSAEnc (UINT32_C(0x04) << TME_KOP_Shift) +/**< If Key Type is #TME_KT_Asymmetric_RSA */ +#define TME_KOP_RSADec (UINT32_C(0x08) << TME_KOP_Shift) + +/** Key Algorithm */ +/**< Position of Key Algorithm bits */ +#define TME_KAL_Shift 14 +/**< Mask for Key Algorithm bits */ +#define TME_KAL_Mask (UINT32_C(0x3F) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Symmetric */ +#define TME_KAL_AES128_ECB (UINT32_C(0x00) << TME_KAL_Shift) +#define TME_KAL_AES256_ECB (UINT32_C(0x01) << TME_KAL_Shift) +#define TME_KAL_DES_ECB (UINT32_C(0x02) << TME_KAL_Shift) +#define TME_KAL_TDES_ECB (UINT32_C(0x03) << TME_KAL_Shift) +#define TME_KAL_AES128_CBC (UINT32_C(0x04) << TME_KAL_Shift) +#define TME_KAL_AES256_CBC (UINT32_C(0x05) << TME_KAL_Shift) +#define TME_KAL_DES_CBC (UINT32_C(0x06) << TME_KAL_Shift) +#define TME_KAL_TDES_CBC (UINT32_C(0x07) << TME_KAL_Shift) +#define TME_KAL_AES128_CCM_TC (UINT32_C(0x08) << TME_KAL_Shift) +#define TME_KAL_AES128_CCM_NTC (UINT32_C(0x09) << TME_KAL_Shift) +#define TME_KAL_AES256_CCM_TC (UINT32_C(0x0A) << TME_KAL_Shift) +#define TME_KAL_AES256_CCM_NTC (UINT32_C(0x0B) << TME_KAL_Shift) +#define TME_KAL_AES256_SIV (UINT32_C(0x0C) << TME_KAL_Shift) +#define TME_KAL_AES128_CTR (UINT32_C(0x0D) << TME_KAL_Shift) +#define TME_KAL_AES256_CTR (UINT32_C(0x0E) << TME_KAL_Shift) +#define TME_KAL_AES128_XTS (UINT32_C(0x0F) << TME_KAL_Shift) +#define TME_KAL_AES256_XTS (UINT32_C(0x10) << TME_KAL_Shift) +#define TME_KAL_SHA1_HMAC (UINT32_C(0x11) << TME_KAL_Shift) +#define TME_KAL_SHA256_HMAC (UINT32_C(0x12) << TME_KAL_Shift) +#define TME_KAL_AES128_CMAC (UINT32_C(0x13) << TME_KAL_Shift) +#define TME_KAL_AES256_CMAC (UINT32_C(0x14) << TME_KAL_Shift) +#define TME_KAL_SHA384_HMAC (UINT32_C(0x15) << TME_KAL_Shift) +#define TME_KAL_SHA512_HMAC (UINT32_C(0x16) << TME_KAL_Shift) +#define TME_KAL_AES128_GCM (UINT32_C(0x17) << TME_KAL_Shift) +#define TME_KAL_AES256_GCM (UINT32_C(0x18) << TME_KAL_Shift) +#define TME_KAL_KASUMI (UINT32_C(0x19) << TME_KAL_Shift) +#define TME_KAL_SNOW3G (UINT32_C(0x1A) << TME_KAL_Shift) +#define TME_KAL_ZUC (UINT32_C(0x1B) << TME_KAL_Shift) +#define TME_KAL_PRINCE (UINT32_C(0x1C) << TME_KAL_Shift) +#define TME_KAL_SIPHASH (UINT32_C(0x1D) << TME_KAL_Shift) +#define TME_KAL_TDES_2KEY_CBC (UINT32_C(0x1E) << TME_KAL_Shift) +#define TME_KAL_TDES_2KEY_ECB (UINT32_C(0x1F) << TME_KAL_Shift) +#define TME_KAL_KDF_NIST (UINT32_C(0x20) << TME_KAL_Shift) +#define TME_KAL_KDF_HKDF (UINT32_C(0x21) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ +#define TME_KAL_ECC_ALGO_ECDSA (UINT32_C(0x00) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ +#define TME_KAL_ECC_ALGO_ECDH (UINT32_C(0x01) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ +#define TME_KAL_ECC_CURVE_NIST (UINT32_C(0x00) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ +#define TME_KAL_ECC_CURVE_BPOOL (UINT32_C(0x08) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */ +#define TME_KAL_DSA (UINT32_C(0x00) << TME_KAL_Shift) +/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */ +#define TME_KAL_DH (UINT32_C(0x01) << TME_KAL_Shift) + +/** Key Security Level */ +/**< Position of Key Security Level bits */ +#define TME_KSL_Shift 20 +/**< Mask for Key Security Level bits */ +#define TME_KSL_Mask (UINT32_C(0x03) << TME_KSL_Shift) +/**< Software Key */ +#define TME_KSL_SWKey (UINT32_C(0x00) << TME_KSL_Shift) +/**< Hardware Managed Key */ +#define TME_KSL_HWManagedKey (UINT32_C(0x01) << TME_KSL_Shift) +/**< Hardware Key */ +#define TME_KSL_HWKey (UINT32_C(0x02) << TME_KSL_Shift) + +/** Key Destination */ +/**< Position of Key Destination bits */ +#define TME_KD_Shift 22 +/**< Mask for Key Destination bits */ +#define TME_KD_Mask (UINT32_C(0x0F) << TME_KD_Shift) +/**< Master */ +#define TME_KD_TME_HW (UINT32_C(0x01) << TME_KD_Shift) +/**< ICE Slave */ +#define TME_KD_ICE (UINT32_C(0x02) << TME_KD_Shift) +/**< GPCE Slave */ +#define TME_KD_GPCE (UINT32_C(0x04) << TME_KD_Shift) +/**< Modem CE Slave */ +#define TME_KD_MDM_CE (UINT32_C(0x08) << TME_KD_Shift) + +/** Key Owner */ +/**< Position of Key Owner bits */ +#define TME_KO_Shift 26 +/**< Mask for Key Owner bits */ +#define TME_KO_Mask (UINT32_C(0x0F) << TME_KO_Shift) +/**< TME Hardware */ +#define TME_KO_TME_HW (UINT32_C(0x00) << TME_KO_Shift) +/**< TME Firmware */ +#define TME_KO_TME_FW (UINT32_C(0x01) << TME_KO_Shift) +/**< TZ (= APPS-S) */ +#define TME_KO_TZ (UINT32_C(0x02) << TME_KO_Shift) +/**< HLOS / HYP (= APPS-NS) */ +#define TME_KO_HLOS_HYP (UINT32_C(0x03) << TME_KO_Shift) +/**< Modem */ +#define TME_KO_MDM (UINT32_C(0x04) << TME_KO_Shift) +/**< SPU */ +#define TME_KO_SPU (UINT32_C(0x0F) << TME_KO_Shift) + +/** Key Lineage */ +/**< Position of Key Lineage bits */ +#define TME_KLI_Shift 30 +/**< Mask for Key Lineage bits */ +#define TME_KLI_Mask (UINT32_C(0x03) << TME_KLI_Shift) +/**< Not applicable */ +#define TME_KLI_NA (UINT32_C(0x00) << TME_KLI_Shift) +/**< Not provisioned, chip unique */ +#define TME_KLI_NP_CU (UINT32_C(0x01) << TME_KLI_Shift) +/**< Provisioned, not chip unique */ +#define TME_KLI_P_NCU (UINT32_C(0x02) << TME_KLI_Shift) +/**< Provisioned, chip unique */ +#define TME_KLI_P_CU (UINT32_C(0x03) << TME_KLI_Shift) + +/** + * Fields in Key Policy high word * + */ + +/** Reserved Bits, Group 1 */ +/**< Position of Reserved bits */ +#define TME_KR1_Shift (32 - TME_KPHALFBITS) +/**< Mask for Reserved bits */ +#define TME_KR1_Mask (UINT32_C(0x01) << TME_KR1_Shift) + +/** Key Wrapping Constraints */ +/**< Position of Key Attribute bits */ +#define TME_KWC_Shift (33 - TME_KPHALFBITS) +/**< Mask for Key Attribute bits */ +#define TME_KWC_Mask (UINT32_C(0x0F) << TME_KWC_Shift) +/**< Key is wrappable with KWK_EXPORT */ +#define TME_KWC_Wrappable_KXP (UINT32_C(0x01) << TME_KWC_Shift) +/**< Key is wrappable with KWK_STORAGE */ +#define TME_KWC_Wrappable_KWK (UINT32_C(0x02) << TME_KWC_Shift) +/**< Key is wrappable with KWK_TRANSPORT */ +#define TME_KWC_Wrappable_KTP (UINT32_C(0x04) << TME_KWC_Shift) +/**< Key is wrappable with KWK_SESSION */ +#define TME_KWC_Wrappable_KSK (UINT32_C(0x08) << TME_KWC_Shift) + +/** Throttling */ +/**< Position of Throttling bits */ +#define TME_KTH_Shift (37 - TME_KPHALFBITS) +/**< Mask for Throttling bits */ +#define TME_KTH_Mask (UINT32_C(0x01) << TME_KTH_Shift) +/**< Throttling enabled */ +#define TME_KTH_Enabled (UINT32_C(0x01) << TME_KTH_Shift) + +/** Reserved Bits, Group 2 */ +/**< Position of Reserved bits */ +#define TME_KR2_Shift (38 - TME_KPHALFBITS) +/**< Mask for Reserved bits */ +#define TME_KR2_Mask (UINT32_C(0x3F) << TME_KR2_Shift) + +/** Key Policy Version */ +/**< Position of Key Policy Version bits */ +#define TME_KPV_Shift (44 - TME_KPHALFBITS) +/**< Mask for Key Policy Version bits */ +#define TME_KPV_Mask (UINT32_C(0x0F) << TME_KPV_Shift) +/**< Mask for Key Policy Version bits */ +#define TME_KPV_Version (UINT32_C(0x03) << TME_KPV_Shift) + +/** Key Authorised Users */ +/**< Position of Authorised User bits */ +#define TME_KAU_Shift (48 - TME_KPHALFBITS) +/**< Mask for Authorised User bits */ +#define TME_KAU_Mask (UINT32_C(0xFF) << TME_KAU_Shift) +/**< Key usable by TME Hardware */ +#define TME_KAU_TME_HW (UINT32_C(0x01) << TME_KAU_Shift) +/**< Key usable by TME Firmware */ +#define TME_KAU_TME_FW (UINT32_C(0x02) << TME_KAU_Shift) +/**< Key usable by TZ (= APPS_S) */ +#define TME_KAU_TZ (UINT32_C(0x04) << TME_KAU_Shift) +/**< Key usable by HLOS / HYP (= APPS_NS) */ +#define TME_KAU_HLOS_HYP (UINT32_C(0x08) << TME_KAU_Shift) +/**< Key usable by Modem */ +#define TME_KAU_MDM (UINT32_C(0x10) << TME_KAU_Shift) +/**< Key usable by SPU */ +#define TME_KAU_SPU (UINT32_C(0x20) << TME_KAU_Shift) +/**< Key usable by all EEs */ +#define TME_KAU_ALL TME_KAU_Mask + +/** + * Credentials for throttling + */ +#define TME_CRED_SLOT_ID_NONE 0 /**< No throttling */ +#define TME_CRED_SLOT_ID_1 1 /**< Credential slot 1 */ +#define TME_CRED_SLOT_ID_2 2 /**< Credential slot 2 */ + +/** + * KDFSpec and associated structures + */ +/** Maximum context size that can be sent to the TME, in bytes */ +#define TME_KDF_SW_CONTEXT_BYTES_MAX 128 +#define TME_KDF_SALT_LABEL_BYTES_MAX 64 + +/** + * Security info to be appended to a KDF context by the Sequencer + * + * These fields allow keys to be tied to specific devices, states, + * OEMs, subsystems, etc. + * Values are obtained by the Sequencer from hardware, such as + * fuses or internal registers. + */ +#define TME_KSC_SOCTestSignState 0x00000001 /**< (32 bits) */ +#define TME_KSC_SOCSecBootState 0x00000002 /**< (8 bits) */ +#define TME_KSC_SOCDebugState 0x00000004 /**< (8 bits) */ +#define TME_KSC_TMELifecycleState 0x00000008 /**< (8 bits) */ +#define TME_KSC_BootStageOTP 0x00000010 /**< (8 bits) */ +#define TME_KSC_SWContext 0x00000020 /**< (variable) */ +#define TME_KSC_ChildKeyPolicy 0x00000040 /**< (64 bits) */ +#define TME_KSC_MixingKey 0x00000080 /**< (key len) */ +#define TME_KSC_ChipUniqueID 0x00000100 /**< (64 bits) */ +#define TME_KSC_ChipDeviceNumber 0x00000200 /**< (32 bits) */ +#define TME_KSC_TMEPatchVer 0x00000400 /**< (512 bits) */ +#define TME_KSC_SOCPatchVer 0x00000800 /**< (512 bits) */ +#define TME_KSC_OEMID 0x00001000 /**< (16 bits) */ +#define TME_KSC_OEMProductID 0x00002000 /**< (16 bits) */ +#define TME_KSC_TMEImgSecVer 0x00004000 /**< (512 bits) */ +#define TME_KSC_SOCInitImgSecVer 0x00008000 /**< (512 bits) */ +#define TME_KSC_OEMMRCHash 0x00010000 /**< (512 bits) */ +#define TME_KSC_OEMProductSeed 0x00020000 /**< (128 bits) */ +#define TME_KSC_SeqPatchVer 0x00040000 /**< (512 bits) */ +#define TME_KSC_HWMeasurement1 0x00080000 /**< (512 bits) */ +#define TME_KSC_HWMeasurement2 0x00100000 /**< (512 bits) */ +#define TME_KSC_Reserved 0xFFE00000 /**< RFU */ + +/** KDF Specification: encompasses both HKDF and NIST KDF algorithms */ +struct tme_kdf_spec { + /* Info common to HKDF and NIST algorithms */ + /**< @c TME_KAL_KDF_HKDF or @c TME_KAL_KDF_NIST */ + uint32_t kdfalgo; + /**< IKM for HKDF; IKS for NIST */ + uint32_t inputkey; + /**< If @c TME_KSC_MixingKey set in Security Context */ + uint32_t mixkey; + /**< If deriving a L3 key */ + uint32_t l2key; + /**< Derived key policy */ + struct tme_key_policy policy; + /**< Software provided context */ + uint8_t swcontext[TME_KDF_SW_CONTEXT_BYTES_MAX]; + /**< Length of @c swContext in bytes */ + uint32_t swcontextLength; + /**< Info to be appended to @c swContext */ + uint32_t security_context; + /**< Salt for HKDF; Label for NIST */ + uint8_t salt_label[TME_KDF_SALT_LABEL_BYTES_MAX]; + /**< Length of @c saltLabel in bytes */ + uint32_t salt_labelLength; + /* Additional info specific to HKDF: kdfAlgo == @c KAL_KDF_HKDF */ + /**< PRF Digest algorithm: @c KAL_SHA256_HMAC or @c KAL_SHA512_HMAC */ + uint32_t prf_digest_algo; +} __packed; + +/** + * WrappedKey and associated structures + */ +/* Maximum wrapped key context size, in bytes */ +/**< Cipher Text 68B, MAC 16B, KeyPolicy 8B, Nonce 8B */ +#define TME_WK_CONTEXT_BYTES_MAX 100 +struct tme_wrapped_key { + /**< Wrapped key context */ + uint8_t key[TME_WK_CONTEXT_BYTES_MAX]; + /**< Length of @c key in bytes*/ + uint32_t length; +} __packed; + +/** + * Plain text Key and associated structures + */ +/* Maximum plain text key size, in bytes */ +#define TME_PT_KEY_BYTES_MAX 68 + +/** + * Key format for intrinsically word aligned key + * lengths like 128/256/384/512... bits. + * + * Example: 256-bit key integer representation, + * Key = 0xK31 K30 K29.......K0 + * Byte array, key[] = {0xK31, 0xK30, 0xK29, ...., 0xK0} + * + * + * Key format for non-word aligned key lengths like 521 bits. + * The key length is rounded off to next word ie, 544 bits. + * + * Example: 521-bit key, Key = 0xK65 K64 K63.......K2 K1 K0 + * [bits 1-7 of K0 is expected to be zeros] + * 544 bit integer representation, Key = 0xK65 K64 K63.......K2 K1 K0 00 00 + * Byte array, key[] = {0xK65, 0xK64, 0xK63, ...., 0xK2, 0xK1, 0xK0, 0x00, 0x00} + * + */ +struct tme_plaintext_key { + /**< Plain text key */ + uint8_t key[TME_PT_KEY_BYTES_MAX]; + /**< Length of @c key in bytes */ + uint32_t length; +} __packed; + +/** + * Extended Error Information structure + */ +struct tme_ext_err_info { + /* TME FW */ + /**< TME FW Response status. */ + uint32_t tme_err_status; + + /* SEQ FW */ + /**< Contents of CSR_CMD_ERROR_STATUS */ + uint32_t seq_err_status; + + /* SEQ HW Key Policy */ + /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */ + uint32_t seq_kp_err_status0; + /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */ + uint32_t seq_kp_err_status1; + + /** + * Debug information: log/print this information + * if any of the above fields is non-zero + */ + /**< Contents of CSR_CMD_RESPONSE_STATUS */ + uint32_t seq_rsp_status; +} __packed; + +#endif /* _TME_HWKM_MASTER_DEFS_H_ */ + diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 556281513c..e43f1db9a0 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -5,6 +5,7 @@ PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + $(KERNEL_MODULES_OUT)/tmecom-intf_dlkm.ko \ diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index 841a8885b6..8e80187b18 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -4,4 +4,5 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + $(KERNEL_MODULES_OUT)/tmecom-intf_dlkm.ko \ diff --git a/tmecom/tme_hwkm_master.c b/tmecom/tme_hwkm_master.c new file mode 100644 index 0000000000..28aa03744b --- /dev/null +++ b/tmecom/tme_hwkm_master.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tme_hwkm_master_intf.h" +#include "tmecom.h" + +#define TME_MSG_CBOR_TAG_HWKM (303) + +#define TME_CLEAR_KEY_CBOR_TAG 0x482F01D9 /* _be32 0xD9012F48 */ +#define TME_DERIVE_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ +#define TME_GENERATE_KEY_CBOR_TAG 0x542F01D9 /* _be32 0xD9012F54 */ +#define TME_IMPORT_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ +#define TME_WRAP_KEY_CBOR_TAG 0x502F01D9 /* _be32 0xD9012F50 */ +#define TME_UNWRAP_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ +#define TME_BORADCAST_KEY_CBOR_TAG 0x442F01D9 /* _be32 0xD9012F44 */ + +/* + * Static alloc for wrapped key + * Protected by tmecom dev mutex + */ +static struct wrap_key_resp gwrpk_response = {0}; + +static inline uint32_t update_ext_err( + struct tme_ext_err_info *err_info, + struct tme_response_sts *result) +{ + bool is_failure = false; + + err_info->tme_err_status = result->tme_err_status; + err_info->seq_err_status = result->seq_err_status; + err_info->seq_kp_err_status0 = result->seq_kp_err_status0; + err_info->seq_kp_err_status1 = result->seq_kp_err_status1; + err_info->seq_rsp_status = result->seq_rsp_status; + + is_failure = err_info->tme_err_status || + err_info->seq_err_status || + err_info->seq_kp_err_status0 || + err_info->seq_kp_err_status1; + + print_hex_dump_bytes("err_info decoded bytes : ", + DUMP_PREFIX_ADDRESS, (void *)err_info, + sizeof(*err_info)); + + return is_failure ? 1 : 0; +} + +uint32_t tme_hwkm_master_clearkey(uint32_t key_id, + struct tme_ext_err_info *err_info) +{ + struct clear_key_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!err_info) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cmd.code = TME_HWKM_CMD_CLEAR_KEY; + request->key_id = key_id; + request->cbor_header = TME_CLEAR_KEY_CBOR_TAG; + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM clear key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_clearkey); + +uint32_t tme_hwkm_master_generatekey(uint32_t key_id, + struct tme_key_policy *policy, + uint32_t cred_slot, + struct tme_ext_err_info *err_info) +{ + struct gen_key_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!err_info || !policy) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cmd.code = TME_HWKM_CMD_GENERATE_KEY; + request->key_id = key_id; + request->cred_slot = cred_slot; + request->cbor_header = TME_GENERATE_KEY_CBOR_TAG; + memcpy(&request->key_policy, policy, sizeof(*policy)); + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM generate key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_generatekey); + +uint32_t tme_hwkm_master_derivekey(uint32_t key_id, + struct tme_kdf_spec *kdf_info, + uint32_t cred_slot, + struct tme_ext_err_info *err_info) +{ + struct derive_key_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!kdf_info || !err_info) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cmd.code = TME_HWKM_CMD_DERIVE_KEY; + request->key_id = key_id; + request->cred_slot = cred_slot; + request->cbor_header = TME_DERIVE_KEY_CBOR_TAG; + memcpy(&request->kdf_info, kdf_info, sizeof(*kdf_info)); + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM derive key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_derivekey); + +uint32_t tme_hwkm_master_wrapkey(uint32_t key_id, + uint32_t targetkey_id, + uint32_t cred_slot, + struct tme_wrapped_key *wrapped, + struct tme_ext_err_info *err_info) +{ + struct wrap_key_req *request = NULL; + struct wrap_key_resp *wrpk_response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*wrpk_response); + + if (!wrapped || !err_info) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + wrpk_response = &gwrpk_response; + + if (!request) + return -ENOMEM; + + request->cmd.code = TME_HWKM_CMD_WRAP_KEY; + request->key_id = key_id; + request->target_key_id = targetkey_id; + request->cbor_header = TME_WRAP_KEY_CBOR_TAG; + + ret = tmecom_process_request(request, sizeof(*request), wrpk_response, + &response_len); + + if (ret != 0) { + pr_err("HWKM wrap key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*wrpk_response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(wrpk_response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, &wrpk_response->status); + + if (!ret) + memcpy(wrapped, &wrpk_response->wrapped_key, sizeof(*wrapped)); + +err_exit: + kfree(request); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_wrapkey); + +uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id, + uint32_t kwkey_id, + uint32_t cred_slot, + struct tme_wrapped_key *wrapped, + struct tme_ext_err_info *err_info) +{ + struct unwrap_key_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!wrapped || !err_info) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cmd.code = TME_HWKM_CMD_UNWRAP_KEY; + request->key_id = key_id; + request->kw_key_id = kwkey_id; + request->cbor_header = TME_UNWRAP_KEY_CBOR_TAG; + memcpy(&request->wrapped, wrapped, sizeof(*wrapped)); + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM unwrap key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_unwrapkey); + +uint32_t tme_hwkm_master_importkey(uint32_t key_id, + struct tme_key_policy *policy, + struct tme_plaintext_key *key_material, + uint32_t cred_slot, + struct tme_ext_err_info *err_info) +{ + struct import_key_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!key_material || !err_info || !policy) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cmd.code = TME_HWKM_CMD_IMPORT_KEY; + request->key_id = key_id; + request->cred_slot = cred_slot; + request->cbor_header = TME_IMPORT_KEY_CBOR_TAG; + memcpy(&request->key_policy, policy, sizeof(*policy)); + memcpy(&request->key_material, key_material, sizeof(*key_material)); + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM import key request failed for %d\n", key_id); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_importkey); + +uint32_t tme_hwkm_master_broadcast_transportkey( + struct tme_ext_err_info *err_info) +{ + struct broadcast_tpkey_req *request = NULL; + struct tme_response_sts *response = NULL; + uint32_t ret = 0; + size_t response_len = sizeof(*response); + + if (!err_info) + return -EINVAL; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + response = kzalloc(response_len, GFP_KERNEL); + + if (!request || !response) { + ret = -ENOMEM; + goto err_exit; + } + + request->cbor_header = TME_BORADCAST_KEY_CBOR_TAG; + request->cmd.code = TME_HWKM_CMD_BROADCAST_TP_KEY; + + ret = tmecom_process_request(request, sizeof(*request), response, + &response_len); + + if (ret != 0) { + pr_err("HWKM broadcast TP key request failed\n"); + goto err_exit; + } + + if (response_len != sizeof(*response)) { + pr_err("HWKM response failed with invalid length: %u, %u\n", + response_len, sizeof(response)); + ret = -EBADMSG; + goto err_exit; + } + + ret = update_ext_err(err_info, response); + +err_exit: + kfree(request); + kfree(response); + return ret; +} +EXPORT_SYMBOL(tme_hwkm_master_broadcast_transportkey); + diff --git a/tmecom/tme_hwkm_master_intf.h b/tmecom/tme_hwkm_master_intf.h new file mode 100644 index 0000000000..f847f68d48 --- /dev/null +++ b/tmecom/tme_hwkm_master_intf.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#ifndef _TME_HWKM_MASTER_INTERFACE_H_ +#define _TME_HWKM_MASTER_INTERFACE_H_ + +#include + +/** + * HWKM Master command IDs + */ +enum tme_hwkm_cmd { + TME_HWKM_CMD_CLEAR_KEY = 0, /**< Clear Key */ + TME_HWKM_CMD_GENERATE_KEY = 1, /**< Generate Key */ + TME_HWKM_CMD_DERIVE_KEY = 2, /**< Derive Key, NIST or HKDF */ + TME_HWKM_CMD_WRAP_KEY = 3, /**< Wrap Key */ + TME_HWKM_CMD_UNWRAP_KEY = 4, /**< Unwrap Key */ + TME_HWKM_CMD_IMPORT_KEY = 5, /**< Import Key */ + TME_HWKM_CMD_BROADCAST_TP_KEY = 6, /**< Broadcast Transport Key */ + TMW_HWKM_CMD_INVALID = 7, /**< Invalid cmd */ +}; + +/** + * Opcode and response structures + */ + +/* Values as per TME_HWKM_CMD_* */ +struct tme_hwkm_master_cmd { + uint32_t code; +} __packed; + + +struct tme_response_sts { + /* TME FW */ + uint32_t tme_err_status; /**< TME FW Response status. */ + + /* SEQ FW */ + uint32_t seq_err_status; /**< Contents of CSR_CMD_ERROR_STATUS */ + + /* SEQ HW Key Policy */ + uint32_t seq_kp_err_status0; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */ + uint32_t seq_kp_err_status1; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */ + + /* Debug information: log/print this information if any of the above fields is non-zero */ + uint32_t seq_rsp_status; /**< Contents of CSR_CMD_RESPONSE_STATUS */ + +} __packed; + +/** + * Clear Key ID structures + */ +struct clear_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_CLEAR_KEY */ + uint32_t key_id; /**< The ID of the key to clear.*/ +} __packed; + +/** + * Generate Key ID structures + */ +struct gen_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_GENERATE_KEY */ + uint32_t key_id; /**< The ID of the key to be generated. */ + struct tme_key_policy key_policy;/**< The policy specifying the key to be generated. */ + uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ +} __packed; + +/** + * Derive Key ID structures + */ +struct derive_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_DERIVE_KEY */ + uint32_t key_id; /**< The ID of the key to be derived. */ + struct tme_kdf_spec kdf_info; /**< Specifies how the key is to be derived. */ + uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ +} __packed; + +/** + * Wrap Key ID structures + */ +struct wrap_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_WRAP_KEY */ + uint32_t key_id; /**< The ID of the key to secure the target key. */ + uint32_t target_key_id; /**< Denotes the key to be wrapped. */ + uint32_t cred_slot; /**< Credential slot to which this key is bound. */ +} __packed; + + +struct wrap_key_resp { + struct tme_response_sts status; /**< Response status. */ + struct tme_wrapped_key wrapped_key; /**< The wrapped key. */ +} __packed; + +/** + * Unwrap Key ID structures + */ +struct unwrap_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_UNWRAP_KEY */ + uint32_t key_id; /**< The ID of the key to be unwrapped. */ + uint32_t kw_key_id; /**< The ID of the key to be used to unwrap the key. */ + struct tme_wrapped_key wrapped; /**< The key to be unwrapped. */ + uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ +} __packed; + +/** + * Import Key ID structures + */ +struct import_key_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_IMPORT_KEY */ + uint32_t key_id; /**< The ID of the key to be imported. */ + struct tme_key_policy key_policy;/**< The Key Policy to be associated with the key. */ + struct tme_plaintext_key key_material;/**< The plain-text key material. */ + uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ +} __packed; + +/** + * Broadcast Transport Key structures + */ +struct broadcast_tpkey_req { + uint32_t cbor_header; /**< CBOR encoded tag */ + struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_BROADCAST_TP_KEY */ +} __packed; + + +#endif /* _TME_HWKM_MASTER_INTERFACE_H_ */ + diff --git a/tmecom/tmecom.c b/tmecom/tmecom.c new file mode 100644 index 0000000000..b8b694a180 --- /dev/null +++ b/tmecom/tmecom.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tmecom.h" + +struct tmecom { + struct device *dev; + struct mbox_client cl; + struct mbox_chan *chan; + struct mutex lock; + struct qmp_pkt pkt; + wait_queue_head_t waitq; + void *txbuf; + bool rx_done; +}; + +#if IS_ENABLED(CONFIG_DEBUG_FS) +#include +#include + +char dpkt[MBOX_MAX_MSG_LEN + 1]; +struct dentry *debugfs_file; +#endif /* CONFIG_DEBUG_FS */ + +static struct tmecom *tmedev; + +/** + * tmecom_msg_hdr - Request/Response message header between HLOS and TME. + * + * This header is proceeding any request specific parameters. + * The transaction id is used to match request with response. + * + * Note: glink/QMP layer provides the rx/tx data size, so user payload size + * is calculated by reducing the header size. + */ +struct tmecom_msg_hdr { + unsigned int reserved; /* for future use */ + unsigned int txnid; /* transaction id */ +} __packed; +#define TMECOM_TX_HDR_SIZE sizeof(struct tmecom_msg_hdr) +#define CBOR_NUM_BYTES (sizeof(unsigned int)) +#define TMECOM_RX_HDR_SIZE (TMECOM_TX_HDR_SIZE + CBOR_NUM_BYTES) + +/* + * CBOR encode emulation + * Prepend tmecom_msg_hdr space + * CBOR tag is prepended in request + */ +static inline size_t tmecom_encode(struct tmecom *tdev, const void *reqbuf, + size_t size) +{ + unsigned int *msg = tdev->txbuf + TMECOM_TX_HDR_SIZE; + unsigned int *src = (unsigned int *)reqbuf; + + memcpy(msg, src, size); + return (size + TMECOM_TX_HDR_SIZE); +} + +/* + * CBOR decode emulation + * Strip tmecom_msg_hdr & CBOR tag + */ +static inline size_t tmecom_decode(struct tmecom *tdev, void *respbuf) +{ + unsigned int *msg = tdev->pkt.data + TMECOM_RX_HDR_SIZE; + unsigned int *rbuf = (unsigned int *)respbuf; + + memcpy(rbuf, msg, (tdev->pkt.size - TMECOM_RX_HDR_SIZE)); + return (tdev->pkt.size - TMECOM_RX_HDR_SIZE); +} + +static bool tmecom_check_rx_done(struct tmecom *tdev) +{ + return tdev->rx_done; +} + +int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf, + size_t *respsize) +{ + struct tmecom *tdev = tmedev; + long time_left = 0; + int ret = 0; + + /* + * Check to handle if probe is not successful or not completed yet + */ + if (!tdev) { + pr_err("%s: tmecom dev is NULL\n", __func__); + return -ENODEV; + } + + if (!reqbuf || !reqsize || (reqsize > MBOX_MAX_MSG_LEN)) { + dev_err(tdev->dev, "invalid reqbuf or reqsize\n"); + return -EINVAL; + } + + if (!respbuf || !respsize || (*respsize > MBOX_MAX_MSG_LEN)) { + dev_err(tdev->dev, "invalid respbuf or respsize\n"); + return -EINVAL; + } + + mutex_lock(&tdev->lock); + + tdev->rx_done = false; + tdev->pkt.size = tmecom_encode(tdev, reqbuf, reqsize); + /* + * Controller expects a 4 byte aligned buffer + */ + tdev->pkt.size = (tdev->pkt.size + 0x3) & ~0x3; + tdev->pkt.data = tdev->txbuf; + + pr_debug("tmecom encoded request size = %u\n", tdev->pkt.size); + print_hex_dump_bytes("tmecom sending bytes : ", + DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size); + + if (mbox_send_message(tdev->chan, &tdev->pkt) < 0) { + dev_err(tdev->dev, "failed to send qmp message\n"); + ret = -EAGAIN; + goto err_exit; + } + + time_left = wait_event_interruptible_timeout(tdev->waitq, + tmecom_check_rx_done(tdev), tdev->cl.tx_tout); + + if (!time_left) { + dev_err(tdev->dev, "request timed out\n"); + ret = -ETIMEDOUT; + goto err_exit; + } + + dev_info(tdev->dev, "response received\n"); + + pr_debug("tmecom received size = %u\n", tdev->pkt.size); + print_hex_dump_bytes("tmecom received bytes : ", + DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size); + + *respsize = tmecom_decode(tdev, respbuf); + + tdev->rx_done = false; + ret = 0; + +err_exit: + mutex_unlock(&tdev->lock); + return ret; +} +EXPORT_SYMBOL(tmecom_process_request); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static ssize_t tmecom_debugfs_write(struct file *file, + const char __user *userstr, size_t len, loff_t *pos) +{ + int ret = 0; + size_t rxlen = 0; + struct tme_ext_err_info *err_info = (struct tme_ext_err_info *)dpkt; + + + if (!len || (len > MBOX_MAX_MSG_LEN)) { + pr_err("invalid message length\n"); + return -EINVAL; + } + + memset(dpkt, 0, sizeof(*dpkt)); + ret = copy_from_user(dpkt, userstr, len); + if (ret) { + pr_err("%s copy from user failed, ret=%d\n", __func__, ret); + return len; + } + + tmecom_process_request(dpkt, len, dpkt, &rxlen); + + print_hex_dump_bytes("tmecom decoded bytes : ", + DUMP_PREFIX_ADDRESS, dpkt, rxlen); + + pr_debug("calling TME_HWKM_CMD_BROADCAST_TP_KEY api\n"); + ret = tme_hwkm_master_broadcast_transportkey(err_info); + + if (ret == 0) + pr_debug("%s successful\n", __func__); + + return len; +} + +static const struct file_operations tmecom_debugfs_ops = { + .open = simple_open, + .write = tmecom_debugfs_write, +}; +#endif /* CONFIG_DEBUG_FS */ + +static void tmecom_receive_message(struct mbox_client *client, void *message) +{ + struct tmecom *tdev = dev_get_drvdata(client->dev); + struct qmp_pkt *pkt = NULL; + + pr_debug("%s entered\n", __func__); + + if (!message) { + dev_err(tdev->dev, "spurious message received\n"); + goto tmecom_receive_end; + } + + if (tdev->rx_done) { + dev_err(tdev->dev, "tmecom response pending\n"); + goto tmecom_receive_end; + } + pkt = (struct qmp_pkt *)message; + tdev->pkt.size = pkt->size; + tdev->pkt.data = pkt->data; + tdev->rx_done = true; +tmecom_receive_end: + wake_up_interruptible(&tdev->waitq); +} + +static int tmecom_probe(struct platform_device *pdev) +{ + struct tmecom *tdev; + const char *label; + char name[32]; + + tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); + if (!tdev) + return -ENOMEM; + + tdev->cl.dev = &pdev->dev; + tdev->cl.tx_block = true; + tdev->cl.tx_tout = 500; + tdev->cl.knows_txdone = false; + tdev->cl.rx_callback = tmecom_receive_message; + + label = of_get_property(pdev->dev.of_node, "mbox-names", NULL); + if (!label) + return -EINVAL; + snprintf(name, 32, "%s_send_message", label); + + tdev->chan = mbox_request_channel(&tdev->cl, 0); + if (IS_ERR(tdev->chan)) { + dev_err(&pdev->dev, "failed to get mbox channel\n"); + return PTR_ERR(tdev->chan); + } + + mutex_init(&tdev->lock); + + if (tdev->chan) { + tdev->txbuf = + devm_kzalloc(&pdev->dev, MBOX_MAX_MSG_LEN, GFP_KERNEL); + if (!tdev->txbuf) { + dev_err(&pdev->dev, "message buffer alloc faile\n"); + return -ENOMEM; + } + } + + init_waitqueue_head(&tdev->waitq); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + debugfs_file = debugfs_create_file(name, 0220, NULL, tdev, + &tmecom_debugfs_ops); + if (!debugfs_file) + goto err; +#endif /* CONFIG_DEBUG_FS */ + + tdev->rx_done = false; + tdev->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, tdev); + + tmedev = tdev; + + dev_info(&pdev->dev, "tmecom probe success\n"); + return 0; +err: + mbox_free_channel(tdev->chan); + return -ENOMEM; +} + +static int tmecom_remove(struct platform_device *pdev) +{ + struct tmecom *tdev = platform_get_drvdata(pdev); + +#if IS_ENABLED(CONFIG_DEBUG_FS) + debugfs_remove(debugfs_file); +#endif /* CONFIG_DEBUG_FS */ + + if (tdev->chan) + mbox_free_channel(tdev->chan); + + dev_info(&pdev->dev, "tmecom remove success\n"); + return 0; +} + +static const struct of_device_id tmecom_match_tbl[] = { + {.compatible = "qcom,tmecom-qmp-client"}, + {}, +}; + +static struct platform_driver tmecom_driver = { + .probe = tmecom_probe, + .remove = tmecom_remove, + .driver = { + .name = "tmecom-qmp-client", + .suppress_bind_attrs = true, + .of_match_table = tmecom_match_tbl, + }, +}; +module_platform_driver(tmecom_driver); + +MODULE_DESCRIPTION("MSM TMECom QTI mailbox protocol client"); +MODULE_LICENSE("GPL v2"); diff --git a/tmecom/tmecom.h b/tmecom/tmecom.h new file mode 100644 index 0000000000..54dfb4cd0d --- /dev/null +++ b/tmecom/tmecom.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ +#ifndef _TMECOM_H_ +#define _TMECOM_H_ + +#define MBOX_MAX_MSG_LEN 1024 + +int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf, + size_t *respsize); +#endif /*_TMECOM_H_ */ From f5ec206c2611eca243b0f7fa40d04719d0fa2b20 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 31 Jan 2022 11:50:40 -0800 Subject: [PATCH 008/202] qcedev: do not report sps errors for presil sps driver is not enabled/tested in pre-sil, so do not error out during probe for sps failures. Change-Id: I0b3bb5684ea63b72cbd3735f8c92e62c3fb3d20c --- crypto-qti/qce50.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 6d8df8489e..427e8b6e3f 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -6055,9 +6055,9 @@ void *qce_open(struct platform_device *pdev, int *rc) qce_init_ce_cfg_val(pce_dev); *rc = qce_sps_init(pce_dev); - if (*rc) - goto err; - qce_setup_ce_sps_data(pce_dev); + if (*rc == 0) + qce_setup_ce_sps_data(pce_dev); + *rc = 0; qce_disable_clk(pce_dev); setup_dummy_req(pce_dev); atomic_set(&pce_dev->no_of_queued_req, 0); From 4724978ce5fe2563af8049172fb0405b7582148d Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 17 Feb 2022 14:05:07 -0800 Subject: [PATCH 009/202] Revert "securemsm-kernel: Enable tmecom module compilation" This reverts commit 72ec3a4a485438c824a8bf53ac166a2bfaadb8ac. --- Android.mk | 10 - Kbuild | 3 - config/ssg_smcinvoke.conf | 3 +- linux/tme_hwkm_master.h | 120 -------- linux/tme_hwkm_master_defs.h | 462 ------------------------------ securemsm_kernel_product_board.mk | 1 - securemsm_kernel_vendor_board.mk | 1 - tmecom/tme_hwkm_master.c | 404 -------------------------- tmecom/tme_hwkm_master_intf.h | 132 --------- tmecom/tmecom.c | 318 -------------------- tmecom/tmecom.h | 12 - 11 files changed, 1 insertion(+), 1465 deletions(-) delete mode 100644 linux/tme_hwkm_master.h delete mode 100644 linux/tme_hwkm_master_defs.h delete mode 100644 tmecom/tme_hwkm_master.c delete mode 100644 tmecom/tme_hwkm_master_intf.h delete mode 100644 tmecom/tmecom.c delete mode 100644 tmecom/tmecom.h diff --git a/Android.mk b/Android.mk index 4d9200d781..cb8f0f94ea 100644 --- a/Android.mk +++ b/Android.mk @@ -69,13 +69,3 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################# -################################################# -include $(CLEAR_VARS) -LOCAL_SRC_FILES := $(SSG_SRC_FILES) -LOCAL_MODULE := tmecom-intf_dlkm.ko -LOCAL_MODULE_KBUILD_NAME := tmecom-intf_dlkm.ko -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE_DEBUG_ENABLE := true -LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) -include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# diff --git a/Kbuild b/Kbuild index f51a3ec284..f1b98ce19f 100644 --- a/Kbuild +++ b/Kbuild @@ -22,6 +22,3 @@ qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o - -obj-$(CONFIG_MSM_TMECOM_QMP) := tmecom-intf_dlkm.o -tmecom-intf_dlkm-objs := tmecom/tmecom.o tmecom/tme_hwkm_master.o diff --git a/config/ssg_smcinvoke.conf b/config/ssg_smcinvoke.conf index 4de3fd4c74..f64b0435db 100644 --- a/config/ssg_smcinvoke.conf +++ b/config/ssg_smcinvoke.conf @@ -5,5 +5,4 @@ export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m export CONFIG_CRYPTO_DEV_QCRYPTO=m export CONFIG_SCSI_UFS_CRYPTO=m export CONFIG_SCSI_UFS_CRYPTO_QTI=m -export CONFIG_HDCP_QSEECOM=m -export CONFIG_MSM_TMECOM_QMP=m +export CONFIG_HDCP_QSEECOM=m \ No newline at end of file diff --git a/linux/tme_hwkm_master.h b/linux/tme_hwkm_master.h deleted file mode 100644 index 90503b44fa..0000000000 --- a/linux/tme_hwkm_master.h +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ -#ifndef _TME_HWKM_MASTER_H_ -#define _TME_HWKM_MASTER_H_ - -#include - -/** - * API functions - */ - -/** - * Clear a Key Table entry. - * - * @param [in] key_id The ID of the key to clear. - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_clearkey(uint32_t key_id, - struct tme_ext_err_info *err_info); - -/** - * Generate a random key with an associated policy. - * - * @param [in] key_id The ID of the key to be generated. - * @param [in] policy The policy specifying the key to be generated. - * @param [in] cred_slot Credential slot to which this key will be bound. - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_generatekey(uint32_t key_id, - struct tme_key_policy *policy, - uint32_t cred_slot, - struct tme_ext_err_info *err_info); - -/** - * Derive a KEY using either HKDF or NIST algorithms. - * - * @param [in] key_id The ID of the key to be derived. - * @param [in] kdf_info Specifies how the key is to be derived - * and the properties of the derived key. - * @param [in] cred_slot Credential slot to which this key will be bound. - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_derivekey(uint32_t key_id, - struct tme_kdf_spec *kdf_info, - uint32_t cred_slot, - struct tme_ext_err_info *err_info); - -/** - * Wrap a key so that it can be safely moved outside the TME. - * - * @param [in] kwkey_id Denotes a key, already present in the - * Key Table, to be used to secure the target key. - * @param [in] targetkey_id Denotes the key to be wrapped. - * @param [in] cred_slot Credential slot to which this key is bound. - * @param [out] wrapped Buffer for wrapped key output from response - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_wrapkey(uint32_t key_id, - uint32_t targetkey_id, - uint32_t cred_slot, - struct tme_wrapped_key *wrapped, - struct tme_ext_err_info *err_info); - -/** - * Unwrap a key from outside the TME and store in the Key Table. - * - * @param [in] key_id The ID of the key to be unwrapped. - * @param [in] kwkey_id Denotes a key, already present in the - * Key Table, to be used to unwrap the key. - * @param [in] cred_slot Credential slot to which this key will be bound. - * @param [in] wrapped The key to be unwrapped. - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id, - uint32_t kwkey_id, - uint32_t cred_slot, - struct tme_wrapped_key *wrapped, - struct tme_ext_err_info *err_info); - -/** - * Import a plaintext key from outside the TME and store in the Key Table. - * - * @param [in] key_id The ID of the key to be imported. - * @param [in] policy The Key Policy to be associated with the key. - * @param [in] keyMaterial The plaintext key material. - * @param [in] cred_slot Credential slot to which this key will be bound. - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_importkey(uint32_t key_id, - struct tme_key_policy *policy, - struct tme_plaintext_key *key_material, - uint32_t cred_slot, - struct tme_ext_err_info *err_info); - -/** - * Broadcast Transport Key to HWKM slaves. - * - * @param [out] err_info Extended error info - * - * @return 0 if successful, error code otherwise. - */ -uint32_t tme_hwkm_master_broadcast_transportkey( - struct tme_ext_err_info *err_info); - -#endif /* _TME_HWKM_MASTER_H_ */ - diff --git a/linux/tme_hwkm_master_defs.h b/linux/tme_hwkm_master_defs.h deleted file mode 100644 index d6b1a8f5ac..0000000000 --- a/linux/tme_hwkm_master_defs.h +++ /dev/null @@ -1,462 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ -#ifndef _TME_HWKM_MASTER_DEFS_H_ -#define _TME_HWKM_MASTER_DEFS_H_ - -#include - -#define UINT32_C(x) (x ## U) - -/** - * Key ID - */ -/* L1 Key IDs that are Key Table slot numbers */ -/**< CUS, 512 bits, in fuses */ -#define TME_KID_CHIP_UNIQUE_SEED 8 -/**< CRBK, 512 bits, in fuses */ -#define TME_KID_CHIP_RAND_BASE 9 -/**< L1 Key derived from L0 slot numbers 0-3 or 4-7 */ -#define TME_KID_CHIP_FAM_L1 10 - -/* Transport Key ID */ -#define TME_KID_TP 11/**< 528 bits, retained */ - -/** - * KeyPolicy - */ -/** Key Policy: 64-bit integer with bit encoded values */ -struct tme_key_policy { - uint32_t low; - uint32_t high; -} __packed; - -#define TME_KPHALFBITS 32 - -#define TME_KPCOMBINE(lo32, hi32) (((uint64_t)(lo32)) | \ - (((uint64_t)(hi32)) << TME_KPHALFBITS)) - -/** - * Fields in Key Policy low word - */ - -/** Key Type: Fundamental crypto algorithm groups */ -/**< Position of Key Type bits */ -#define TME_KT_Shift 0 -/**< Mask for Key Type bits */ -#define TME_KT_Mask (UINT32_C(0x07) << TME_KT_Shift) -/**< Symmetric algorithms */ -#define TME_KT_Symmetric (UINT32_C(0x00) << TME_KT_Shift) -/**< Asymmetric algorithms: ECC */ -#define TME_KT_Asymmetric_ECC (UINT32_C(0x01) << TME_KT_Shift) -/**< Asymmetric algorithms: RSA */ -#define TME_KT_Asymmetric_RSA (UINT32_C(0x05) << TME_KT_Shift) - -/** Key Length */ -/**< Position of Key Length bits */ -#define TME_KL_Shift 3 -/**< Mask for Key Length bits */ -#define TME_KL_Mask (UINT32_C(0x0F) << TME_KL_Shift) -/**< 64 bits - AES/2TDES */ -#define TME_KL_64 (UINT32_C(0x00) << TME_KL_Shift) -/**< 128 bits - AES/2TDES */ -#define TME_KL_128 (UINT32_C(0x01) << TME_KL_Shift) -/**< 192 bits - AES/3TDES */ -#define TME_KL_192 (UINT32_C(0x02) << TME_KL_Shift) -/**< 224 bits - ECDSA */ -#define TME_KL_224 (UINT32_C(0x03) << TME_KL_Shift) -/**< 256 bits - ECDSA/AES */ -#define TME_KL_256 (UINT32_C(0x04) << TME_KL_Shift) -/**< 384 bits - ECDSA */ -#define TME_KL_384 (UINT32_C(0x05) << TME_KL_Shift) -/**< 448 bits - ECDSA */ -#define TME_KL_448 (UINT32_C(0x06) << TME_KL_Shift) -/**< 512 bits - ECDSA/HMAC/KDF/AES-SIV/AES-XTS */ -#define TME_KL_512 (UINT32_C(0x07) << TME_KL_Shift) -/**< 521 bits - ECDSA/HMAC/KDF */ -#define TME_KL_521 (UINT32_C(0x08) << TME_KL_Shift) -/**< 2048 bits - RSA */ -#define TME_KL_2048 (UINT32_C(0x09) << TME_KL_Shift) -/**< 3072 bits - RSA */ -#define TME_KL_3072 (UINT32_C(0x0A) << TME_KL_Shift) -/**< 4096 bits - RSA */ -#define TME_KL_4096 (UINT32_C(0x0B) << TME_KL_Shift) - -/** - * Key Profile: Only applicable at present - * if Key Type is #TME_KT_Symmetric - */ -/**< Position of Key Profile bits */ -#define TME_KP_Shift 7 -/**< Mask for Key Class bits */ -#define TME_KP_Mask (UINT32_C(0x07) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KP_Generic (UINT32_C(0x00) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric (aka KDK) */ -#define TME_KP_KeyDerivation (UINT32_C(0x01) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric (aka KWK) */ -#define TME_KP_KWK_STORAGE (UINT32_C(0x02) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric (aka KSK) */ -#define TME_KP_KWK_SESSION (UINT32_C(0x03) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric (aka TPK) */ -#define TME_KP_KWK_TRANSPORT (UINT32_C(0x04) << TME_KP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KP_KWK_XPORT (UINT32_C(0x05) << TME_KP_Shift) -/**< If Key Type is not #TME_KT_Symmetric */ -#define TME_KP_Unused (UINT32_C(0x00) << TME_KP_Shift) - -/** Key Operation: Crypto operations permitted for a key */ -/**< Position of Key Operation bits */ -#define TME_KOP_Shift 10 -/**< Mask for Key Operation bits */ -#define TME_KOP_Mask (UINT32_C(0x0F) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_Encryption (UINT32_C(0x01) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_Decryption (UINT32_C(0x02) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_MAC (UINT32_C(0x04) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_NISTDerive (UINT32_C(0x04) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_HKDFExtract (UINT32_C(0x08) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KOP_HKDFExpand (UINT32_C(0x09) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_ECC */ -#define TME_KOP_ECDSASign (UINT32_C(0x01) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_ECC */ -#define TME_KOP_ECDSAVerify (UINT32_C(0x02) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_ECC */ -#define TME_KOP_ECDHSharedSecret (UINT32_C(0x04) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_RSA */ -#define TME_KOP_RSAASign (UINT32_C(0x01) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_RSA */ -#define TME_KOP_RSAAVerify (UINT32_C(0x02) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_RSA */ -#define TME_KOP_RSAEnc (UINT32_C(0x04) << TME_KOP_Shift) -/**< If Key Type is #TME_KT_Asymmetric_RSA */ -#define TME_KOP_RSADec (UINT32_C(0x08) << TME_KOP_Shift) - -/** Key Algorithm */ -/**< Position of Key Algorithm bits */ -#define TME_KAL_Shift 14 -/**< Mask for Key Algorithm bits */ -#define TME_KAL_Mask (UINT32_C(0x3F) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Symmetric */ -#define TME_KAL_AES128_ECB (UINT32_C(0x00) << TME_KAL_Shift) -#define TME_KAL_AES256_ECB (UINT32_C(0x01) << TME_KAL_Shift) -#define TME_KAL_DES_ECB (UINT32_C(0x02) << TME_KAL_Shift) -#define TME_KAL_TDES_ECB (UINT32_C(0x03) << TME_KAL_Shift) -#define TME_KAL_AES128_CBC (UINT32_C(0x04) << TME_KAL_Shift) -#define TME_KAL_AES256_CBC (UINT32_C(0x05) << TME_KAL_Shift) -#define TME_KAL_DES_CBC (UINT32_C(0x06) << TME_KAL_Shift) -#define TME_KAL_TDES_CBC (UINT32_C(0x07) << TME_KAL_Shift) -#define TME_KAL_AES128_CCM_TC (UINT32_C(0x08) << TME_KAL_Shift) -#define TME_KAL_AES128_CCM_NTC (UINT32_C(0x09) << TME_KAL_Shift) -#define TME_KAL_AES256_CCM_TC (UINT32_C(0x0A) << TME_KAL_Shift) -#define TME_KAL_AES256_CCM_NTC (UINT32_C(0x0B) << TME_KAL_Shift) -#define TME_KAL_AES256_SIV (UINT32_C(0x0C) << TME_KAL_Shift) -#define TME_KAL_AES128_CTR (UINT32_C(0x0D) << TME_KAL_Shift) -#define TME_KAL_AES256_CTR (UINT32_C(0x0E) << TME_KAL_Shift) -#define TME_KAL_AES128_XTS (UINT32_C(0x0F) << TME_KAL_Shift) -#define TME_KAL_AES256_XTS (UINT32_C(0x10) << TME_KAL_Shift) -#define TME_KAL_SHA1_HMAC (UINT32_C(0x11) << TME_KAL_Shift) -#define TME_KAL_SHA256_HMAC (UINT32_C(0x12) << TME_KAL_Shift) -#define TME_KAL_AES128_CMAC (UINT32_C(0x13) << TME_KAL_Shift) -#define TME_KAL_AES256_CMAC (UINT32_C(0x14) << TME_KAL_Shift) -#define TME_KAL_SHA384_HMAC (UINT32_C(0x15) << TME_KAL_Shift) -#define TME_KAL_SHA512_HMAC (UINT32_C(0x16) << TME_KAL_Shift) -#define TME_KAL_AES128_GCM (UINT32_C(0x17) << TME_KAL_Shift) -#define TME_KAL_AES256_GCM (UINT32_C(0x18) << TME_KAL_Shift) -#define TME_KAL_KASUMI (UINT32_C(0x19) << TME_KAL_Shift) -#define TME_KAL_SNOW3G (UINT32_C(0x1A) << TME_KAL_Shift) -#define TME_KAL_ZUC (UINT32_C(0x1B) << TME_KAL_Shift) -#define TME_KAL_PRINCE (UINT32_C(0x1C) << TME_KAL_Shift) -#define TME_KAL_SIPHASH (UINT32_C(0x1D) << TME_KAL_Shift) -#define TME_KAL_TDES_2KEY_CBC (UINT32_C(0x1E) << TME_KAL_Shift) -#define TME_KAL_TDES_2KEY_ECB (UINT32_C(0x1F) << TME_KAL_Shift) -#define TME_KAL_KDF_NIST (UINT32_C(0x20) << TME_KAL_Shift) -#define TME_KAL_KDF_HKDF (UINT32_C(0x21) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ -#define TME_KAL_ECC_ALGO_ECDSA (UINT32_C(0x00) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ -#define TME_KAL_ECC_ALGO_ECDH (UINT32_C(0x01) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ -#define TME_KAL_ECC_CURVE_NIST (UINT32_C(0x00) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */ -#define TME_KAL_ECC_CURVE_BPOOL (UINT32_C(0x08) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */ -#define TME_KAL_DSA (UINT32_C(0x00) << TME_KAL_Shift) -/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */ -#define TME_KAL_DH (UINT32_C(0x01) << TME_KAL_Shift) - -/** Key Security Level */ -/**< Position of Key Security Level bits */ -#define TME_KSL_Shift 20 -/**< Mask for Key Security Level bits */ -#define TME_KSL_Mask (UINT32_C(0x03) << TME_KSL_Shift) -/**< Software Key */ -#define TME_KSL_SWKey (UINT32_C(0x00) << TME_KSL_Shift) -/**< Hardware Managed Key */ -#define TME_KSL_HWManagedKey (UINT32_C(0x01) << TME_KSL_Shift) -/**< Hardware Key */ -#define TME_KSL_HWKey (UINT32_C(0x02) << TME_KSL_Shift) - -/** Key Destination */ -/**< Position of Key Destination bits */ -#define TME_KD_Shift 22 -/**< Mask for Key Destination bits */ -#define TME_KD_Mask (UINT32_C(0x0F) << TME_KD_Shift) -/**< Master */ -#define TME_KD_TME_HW (UINT32_C(0x01) << TME_KD_Shift) -/**< ICE Slave */ -#define TME_KD_ICE (UINT32_C(0x02) << TME_KD_Shift) -/**< GPCE Slave */ -#define TME_KD_GPCE (UINT32_C(0x04) << TME_KD_Shift) -/**< Modem CE Slave */ -#define TME_KD_MDM_CE (UINT32_C(0x08) << TME_KD_Shift) - -/** Key Owner */ -/**< Position of Key Owner bits */ -#define TME_KO_Shift 26 -/**< Mask for Key Owner bits */ -#define TME_KO_Mask (UINT32_C(0x0F) << TME_KO_Shift) -/**< TME Hardware */ -#define TME_KO_TME_HW (UINT32_C(0x00) << TME_KO_Shift) -/**< TME Firmware */ -#define TME_KO_TME_FW (UINT32_C(0x01) << TME_KO_Shift) -/**< TZ (= APPS-S) */ -#define TME_KO_TZ (UINT32_C(0x02) << TME_KO_Shift) -/**< HLOS / HYP (= APPS-NS) */ -#define TME_KO_HLOS_HYP (UINT32_C(0x03) << TME_KO_Shift) -/**< Modem */ -#define TME_KO_MDM (UINT32_C(0x04) << TME_KO_Shift) -/**< SPU */ -#define TME_KO_SPU (UINT32_C(0x0F) << TME_KO_Shift) - -/** Key Lineage */ -/**< Position of Key Lineage bits */ -#define TME_KLI_Shift 30 -/**< Mask for Key Lineage bits */ -#define TME_KLI_Mask (UINT32_C(0x03) << TME_KLI_Shift) -/**< Not applicable */ -#define TME_KLI_NA (UINT32_C(0x00) << TME_KLI_Shift) -/**< Not provisioned, chip unique */ -#define TME_KLI_NP_CU (UINT32_C(0x01) << TME_KLI_Shift) -/**< Provisioned, not chip unique */ -#define TME_KLI_P_NCU (UINT32_C(0x02) << TME_KLI_Shift) -/**< Provisioned, chip unique */ -#define TME_KLI_P_CU (UINT32_C(0x03) << TME_KLI_Shift) - -/** - * Fields in Key Policy high word * - */ - -/** Reserved Bits, Group 1 */ -/**< Position of Reserved bits */ -#define TME_KR1_Shift (32 - TME_KPHALFBITS) -/**< Mask for Reserved bits */ -#define TME_KR1_Mask (UINT32_C(0x01) << TME_KR1_Shift) - -/** Key Wrapping Constraints */ -/**< Position of Key Attribute bits */ -#define TME_KWC_Shift (33 - TME_KPHALFBITS) -/**< Mask for Key Attribute bits */ -#define TME_KWC_Mask (UINT32_C(0x0F) << TME_KWC_Shift) -/**< Key is wrappable with KWK_EXPORT */ -#define TME_KWC_Wrappable_KXP (UINT32_C(0x01) << TME_KWC_Shift) -/**< Key is wrappable with KWK_STORAGE */ -#define TME_KWC_Wrappable_KWK (UINT32_C(0x02) << TME_KWC_Shift) -/**< Key is wrappable with KWK_TRANSPORT */ -#define TME_KWC_Wrappable_KTP (UINT32_C(0x04) << TME_KWC_Shift) -/**< Key is wrappable with KWK_SESSION */ -#define TME_KWC_Wrappable_KSK (UINT32_C(0x08) << TME_KWC_Shift) - -/** Throttling */ -/**< Position of Throttling bits */ -#define TME_KTH_Shift (37 - TME_KPHALFBITS) -/**< Mask for Throttling bits */ -#define TME_KTH_Mask (UINT32_C(0x01) << TME_KTH_Shift) -/**< Throttling enabled */ -#define TME_KTH_Enabled (UINT32_C(0x01) << TME_KTH_Shift) - -/** Reserved Bits, Group 2 */ -/**< Position of Reserved bits */ -#define TME_KR2_Shift (38 - TME_KPHALFBITS) -/**< Mask for Reserved bits */ -#define TME_KR2_Mask (UINT32_C(0x3F) << TME_KR2_Shift) - -/** Key Policy Version */ -/**< Position of Key Policy Version bits */ -#define TME_KPV_Shift (44 - TME_KPHALFBITS) -/**< Mask for Key Policy Version bits */ -#define TME_KPV_Mask (UINT32_C(0x0F) << TME_KPV_Shift) -/**< Mask for Key Policy Version bits */ -#define TME_KPV_Version (UINT32_C(0x03) << TME_KPV_Shift) - -/** Key Authorised Users */ -/**< Position of Authorised User bits */ -#define TME_KAU_Shift (48 - TME_KPHALFBITS) -/**< Mask for Authorised User bits */ -#define TME_KAU_Mask (UINT32_C(0xFF) << TME_KAU_Shift) -/**< Key usable by TME Hardware */ -#define TME_KAU_TME_HW (UINT32_C(0x01) << TME_KAU_Shift) -/**< Key usable by TME Firmware */ -#define TME_KAU_TME_FW (UINT32_C(0x02) << TME_KAU_Shift) -/**< Key usable by TZ (= APPS_S) */ -#define TME_KAU_TZ (UINT32_C(0x04) << TME_KAU_Shift) -/**< Key usable by HLOS / HYP (= APPS_NS) */ -#define TME_KAU_HLOS_HYP (UINT32_C(0x08) << TME_KAU_Shift) -/**< Key usable by Modem */ -#define TME_KAU_MDM (UINT32_C(0x10) << TME_KAU_Shift) -/**< Key usable by SPU */ -#define TME_KAU_SPU (UINT32_C(0x20) << TME_KAU_Shift) -/**< Key usable by all EEs */ -#define TME_KAU_ALL TME_KAU_Mask - -/** - * Credentials for throttling - */ -#define TME_CRED_SLOT_ID_NONE 0 /**< No throttling */ -#define TME_CRED_SLOT_ID_1 1 /**< Credential slot 1 */ -#define TME_CRED_SLOT_ID_2 2 /**< Credential slot 2 */ - -/** - * KDFSpec and associated structures - */ -/** Maximum context size that can be sent to the TME, in bytes */ -#define TME_KDF_SW_CONTEXT_BYTES_MAX 128 -#define TME_KDF_SALT_LABEL_BYTES_MAX 64 - -/** - * Security info to be appended to a KDF context by the Sequencer - * - * These fields allow keys to be tied to specific devices, states, - * OEMs, subsystems, etc. - * Values are obtained by the Sequencer from hardware, such as - * fuses or internal registers. - */ -#define TME_KSC_SOCTestSignState 0x00000001 /**< (32 bits) */ -#define TME_KSC_SOCSecBootState 0x00000002 /**< (8 bits) */ -#define TME_KSC_SOCDebugState 0x00000004 /**< (8 bits) */ -#define TME_KSC_TMELifecycleState 0x00000008 /**< (8 bits) */ -#define TME_KSC_BootStageOTP 0x00000010 /**< (8 bits) */ -#define TME_KSC_SWContext 0x00000020 /**< (variable) */ -#define TME_KSC_ChildKeyPolicy 0x00000040 /**< (64 bits) */ -#define TME_KSC_MixingKey 0x00000080 /**< (key len) */ -#define TME_KSC_ChipUniqueID 0x00000100 /**< (64 bits) */ -#define TME_KSC_ChipDeviceNumber 0x00000200 /**< (32 bits) */ -#define TME_KSC_TMEPatchVer 0x00000400 /**< (512 bits) */ -#define TME_KSC_SOCPatchVer 0x00000800 /**< (512 bits) */ -#define TME_KSC_OEMID 0x00001000 /**< (16 bits) */ -#define TME_KSC_OEMProductID 0x00002000 /**< (16 bits) */ -#define TME_KSC_TMEImgSecVer 0x00004000 /**< (512 bits) */ -#define TME_KSC_SOCInitImgSecVer 0x00008000 /**< (512 bits) */ -#define TME_KSC_OEMMRCHash 0x00010000 /**< (512 bits) */ -#define TME_KSC_OEMProductSeed 0x00020000 /**< (128 bits) */ -#define TME_KSC_SeqPatchVer 0x00040000 /**< (512 bits) */ -#define TME_KSC_HWMeasurement1 0x00080000 /**< (512 bits) */ -#define TME_KSC_HWMeasurement2 0x00100000 /**< (512 bits) */ -#define TME_KSC_Reserved 0xFFE00000 /**< RFU */ - -/** KDF Specification: encompasses both HKDF and NIST KDF algorithms */ -struct tme_kdf_spec { - /* Info common to HKDF and NIST algorithms */ - /**< @c TME_KAL_KDF_HKDF or @c TME_KAL_KDF_NIST */ - uint32_t kdfalgo; - /**< IKM for HKDF; IKS for NIST */ - uint32_t inputkey; - /**< If @c TME_KSC_MixingKey set in Security Context */ - uint32_t mixkey; - /**< If deriving a L3 key */ - uint32_t l2key; - /**< Derived key policy */ - struct tme_key_policy policy; - /**< Software provided context */ - uint8_t swcontext[TME_KDF_SW_CONTEXT_BYTES_MAX]; - /**< Length of @c swContext in bytes */ - uint32_t swcontextLength; - /**< Info to be appended to @c swContext */ - uint32_t security_context; - /**< Salt for HKDF; Label for NIST */ - uint8_t salt_label[TME_KDF_SALT_LABEL_BYTES_MAX]; - /**< Length of @c saltLabel in bytes */ - uint32_t salt_labelLength; - /* Additional info specific to HKDF: kdfAlgo == @c KAL_KDF_HKDF */ - /**< PRF Digest algorithm: @c KAL_SHA256_HMAC or @c KAL_SHA512_HMAC */ - uint32_t prf_digest_algo; -} __packed; - -/** - * WrappedKey and associated structures - */ -/* Maximum wrapped key context size, in bytes */ -/**< Cipher Text 68B, MAC 16B, KeyPolicy 8B, Nonce 8B */ -#define TME_WK_CONTEXT_BYTES_MAX 100 -struct tme_wrapped_key { - /**< Wrapped key context */ - uint8_t key[TME_WK_CONTEXT_BYTES_MAX]; - /**< Length of @c key in bytes*/ - uint32_t length; -} __packed; - -/** - * Plain text Key and associated structures - */ -/* Maximum plain text key size, in bytes */ -#define TME_PT_KEY_BYTES_MAX 68 - -/** - * Key format for intrinsically word aligned key - * lengths like 128/256/384/512... bits. - * - * Example: 256-bit key integer representation, - * Key = 0xK31 K30 K29.......K0 - * Byte array, key[] = {0xK31, 0xK30, 0xK29, ...., 0xK0} - * - * - * Key format for non-word aligned key lengths like 521 bits. - * The key length is rounded off to next word ie, 544 bits. - * - * Example: 521-bit key, Key = 0xK65 K64 K63.......K2 K1 K0 - * [bits 1-7 of K0 is expected to be zeros] - * 544 bit integer representation, Key = 0xK65 K64 K63.......K2 K1 K0 00 00 - * Byte array, key[] = {0xK65, 0xK64, 0xK63, ...., 0xK2, 0xK1, 0xK0, 0x00, 0x00} - * - */ -struct tme_plaintext_key { - /**< Plain text key */ - uint8_t key[TME_PT_KEY_BYTES_MAX]; - /**< Length of @c key in bytes */ - uint32_t length; -} __packed; - -/** - * Extended Error Information structure - */ -struct tme_ext_err_info { - /* TME FW */ - /**< TME FW Response status. */ - uint32_t tme_err_status; - - /* SEQ FW */ - /**< Contents of CSR_CMD_ERROR_STATUS */ - uint32_t seq_err_status; - - /* SEQ HW Key Policy */ - /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */ - uint32_t seq_kp_err_status0; - /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */ - uint32_t seq_kp_err_status1; - - /** - * Debug information: log/print this information - * if any of the above fields is non-zero - */ - /**< Contents of CSR_CMD_RESPONSE_STATUS */ - uint32_t seq_rsp_status; -} __packed; - -#endif /* _TME_HWKM_MASTER_DEFS_H_ */ - diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index e43f1db9a0..556281513c 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -5,7 +5,6 @@ PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ - $(KERNEL_MODULES_OUT)/tmecom-intf_dlkm.ko \ diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index 8e80187b18..841a8885b6 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -4,5 +4,4 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ - $(KERNEL_MODULES_OUT)/tmecom-intf_dlkm.ko \ diff --git a/tmecom/tme_hwkm_master.c b/tmecom/tme_hwkm_master.c deleted file mode 100644 index 28aa03744b..0000000000 --- a/tmecom/tme_hwkm_master.c +++ /dev/null @@ -1,404 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ - -#include -#include -#include -#include - -#include "tme_hwkm_master_intf.h" -#include "tmecom.h" - -#define TME_MSG_CBOR_TAG_HWKM (303) - -#define TME_CLEAR_KEY_CBOR_TAG 0x482F01D9 /* _be32 0xD9012F48 */ -#define TME_DERIVE_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ -#define TME_GENERATE_KEY_CBOR_TAG 0x542F01D9 /* _be32 0xD9012F54 */ -#define TME_IMPORT_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ -#define TME_WRAP_KEY_CBOR_TAG 0x502F01D9 /* _be32 0xD9012F50 */ -#define TME_UNWRAP_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */ -#define TME_BORADCAST_KEY_CBOR_TAG 0x442F01D9 /* _be32 0xD9012F44 */ - -/* - * Static alloc for wrapped key - * Protected by tmecom dev mutex - */ -static struct wrap_key_resp gwrpk_response = {0}; - -static inline uint32_t update_ext_err( - struct tme_ext_err_info *err_info, - struct tme_response_sts *result) -{ - bool is_failure = false; - - err_info->tme_err_status = result->tme_err_status; - err_info->seq_err_status = result->seq_err_status; - err_info->seq_kp_err_status0 = result->seq_kp_err_status0; - err_info->seq_kp_err_status1 = result->seq_kp_err_status1; - err_info->seq_rsp_status = result->seq_rsp_status; - - is_failure = err_info->tme_err_status || - err_info->seq_err_status || - err_info->seq_kp_err_status0 || - err_info->seq_kp_err_status1; - - print_hex_dump_bytes("err_info decoded bytes : ", - DUMP_PREFIX_ADDRESS, (void *)err_info, - sizeof(*err_info)); - - return is_failure ? 1 : 0; -} - -uint32_t tme_hwkm_master_clearkey(uint32_t key_id, - struct tme_ext_err_info *err_info) -{ - struct clear_key_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!err_info) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cmd.code = TME_HWKM_CMD_CLEAR_KEY; - request->key_id = key_id; - request->cbor_header = TME_CLEAR_KEY_CBOR_TAG; - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM clear key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_clearkey); - -uint32_t tme_hwkm_master_generatekey(uint32_t key_id, - struct tme_key_policy *policy, - uint32_t cred_slot, - struct tme_ext_err_info *err_info) -{ - struct gen_key_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!err_info || !policy) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cmd.code = TME_HWKM_CMD_GENERATE_KEY; - request->key_id = key_id; - request->cred_slot = cred_slot; - request->cbor_header = TME_GENERATE_KEY_CBOR_TAG; - memcpy(&request->key_policy, policy, sizeof(*policy)); - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM generate key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_generatekey); - -uint32_t tme_hwkm_master_derivekey(uint32_t key_id, - struct tme_kdf_spec *kdf_info, - uint32_t cred_slot, - struct tme_ext_err_info *err_info) -{ - struct derive_key_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!kdf_info || !err_info) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cmd.code = TME_HWKM_CMD_DERIVE_KEY; - request->key_id = key_id; - request->cred_slot = cred_slot; - request->cbor_header = TME_DERIVE_KEY_CBOR_TAG; - memcpy(&request->kdf_info, kdf_info, sizeof(*kdf_info)); - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM derive key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_derivekey); - -uint32_t tme_hwkm_master_wrapkey(uint32_t key_id, - uint32_t targetkey_id, - uint32_t cred_slot, - struct tme_wrapped_key *wrapped, - struct tme_ext_err_info *err_info) -{ - struct wrap_key_req *request = NULL; - struct wrap_key_resp *wrpk_response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*wrpk_response); - - if (!wrapped || !err_info) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - wrpk_response = &gwrpk_response; - - if (!request) - return -ENOMEM; - - request->cmd.code = TME_HWKM_CMD_WRAP_KEY; - request->key_id = key_id; - request->target_key_id = targetkey_id; - request->cbor_header = TME_WRAP_KEY_CBOR_TAG; - - ret = tmecom_process_request(request, sizeof(*request), wrpk_response, - &response_len); - - if (ret != 0) { - pr_err("HWKM wrap key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*wrpk_response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(wrpk_response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, &wrpk_response->status); - - if (!ret) - memcpy(wrapped, &wrpk_response->wrapped_key, sizeof(*wrapped)); - -err_exit: - kfree(request); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_wrapkey); - -uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id, - uint32_t kwkey_id, - uint32_t cred_slot, - struct tme_wrapped_key *wrapped, - struct tme_ext_err_info *err_info) -{ - struct unwrap_key_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!wrapped || !err_info) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cmd.code = TME_HWKM_CMD_UNWRAP_KEY; - request->key_id = key_id; - request->kw_key_id = kwkey_id; - request->cbor_header = TME_UNWRAP_KEY_CBOR_TAG; - memcpy(&request->wrapped, wrapped, sizeof(*wrapped)); - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM unwrap key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_unwrapkey); - -uint32_t tme_hwkm_master_importkey(uint32_t key_id, - struct tme_key_policy *policy, - struct tme_plaintext_key *key_material, - uint32_t cred_slot, - struct tme_ext_err_info *err_info) -{ - struct import_key_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!key_material || !err_info || !policy) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cmd.code = TME_HWKM_CMD_IMPORT_KEY; - request->key_id = key_id; - request->cred_slot = cred_slot; - request->cbor_header = TME_IMPORT_KEY_CBOR_TAG; - memcpy(&request->key_policy, policy, sizeof(*policy)); - memcpy(&request->key_material, key_material, sizeof(*key_material)); - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM import key request failed for %d\n", key_id); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_importkey); - -uint32_t tme_hwkm_master_broadcast_transportkey( - struct tme_ext_err_info *err_info) -{ - struct broadcast_tpkey_req *request = NULL; - struct tme_response_sts *response = NULL; - uint32_t ret = 0; - size_t response_len = sizeof(*response); - - if (!err_info) - return -EINVAL; - - request = kzalloc(sizeof(*request), GFP_KERNEL); - response = kzalloc(response_len, GFP_KERNEL); - - if (!request || !response) { - ret = -ENOMEM; - goto err_exit; - } - - request->cbor_header = TME_BORADCAST_KEY_CBOR_TAG; - request->cmd.code = TME_HWKM_CMD_BROADCAST_TP_KEY; - - ret = tmecom_process_request(request, sizeof(*request), response, - &response_len); - - if (ret != 0) { - pr_err("HWKM broadcast TP key request failed\n"); - goto err_exit; - } - - if (response_len != sizeof(*response)) { - pr_err("HWKM response failed with invalid length: %u, %u\n", - response_len, sizeof(response)); - ret = -EBADMSG; - goto err_exit; - } - - ret = update_ext_err(err_info, response); - -err_exit: - kfree(request); - kfree(response); - return ret; -} -EXPORT_SYMBOL(tme_hwkm_master_broadcast_transportkey); - diff --git a/tmecom/tme_hwkm_master_intf.h b/tmecom/tme_hwkm_master_intf.h deleted file mode 100644 index f847f68d48..0000000000 --- a/tmecom/tme_hwkm_master_intf.h +++ /dev/null @@ -1,132 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ -#ifndef _TME_HWKM_MASTER_INTERFACE_H_ -#define _TME_HWKM_MASTER_INTERFACE_H_ - -#include - -/** - * HWKM Master command IDs - */ -enum tme_hwkm_cmd { - TME_HWKM_CMD_CLEAR_KEY = 0, /**< Clear Key */ - TME_HWKM_CMD_GENERATE_KEY = 1, /**< Generate Key */ - TME_HWKM_CMD_DERIVE_KEY = 2, /**< Derive Key, NIST or HKDF */ - TME_HWKM_CMD_WRAP_KEY = 3, /**< Wrap Key */ - TME_HWKM_CMD_UNWRAP_KEY = 4, /**< Unwrap Key */ - TME_HWKM_CMD_IMPORT_KEY = 5, /**< Import Key */ - TME_HWKM_CMD_BROADCAST_TP_KEY = 6, /**< Broadcast Transport Key */ - TMW_HWKM_CMD_INVALID = 7, /**< Invalid cmd */ -}; - -/** - * Opcode and response structures - */ - -/* Values as per TME_HWKM_CMD_* */ -struct tme_hwkm_master_cmd { - uint32_t code; -} __packed; - - -struct tme_response_sts { - /* TME FW */ - uint32_t tme_err_status; /**< TME FW Response status. */ - - /* SEQ FW */ - uint32_t seq_err_status; /**< Contents of CSR_CMD_ERROR_STATUS */ - - /* SEQ HW Key Policy */ - uint32_t seq_kp_err_status0; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */ - uint32_t seq_kp_err_status1; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */ - - /* Debug information: log/print this information if any of the above fields is non-zero */ - uint32_t seq_rsp_status; /**< Contents of CSR_CMD_RESPONSE_STATUS */ - -} __packed; - -/** - * Clear Key ID structures - */ -struct clear_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_CLEAR_KEY */ - uint32_t key_id; /**< The ID of the key to clear.*/ -} __packed; - -/** - * Generate Key ID structures - */ -struct gen_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_GENERATE_KEY */ - uint32_t key_id; /**< The ID of the key to be generated. */ - struct tme_key_policy key_policy;/**< The policy specifying the key to be generated. */ - uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ -} __packed; - -/** - * Derive Key ID structures - */ -struct derive_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_DERIVE_KEY */ - uint32_t key_id; /**< The ID of the key to be derived. */ - struct tme_kdf_spec kdf_info; /**< Specifies how the key is to be derived. */ - uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ -} __packed; - -/** - * Wrap Key ID structures - */ -struct wrap_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_WRAP_KEY */ - uint32_t key_id; /**< The ID of the key to secure the target key. */ - uint32_t target_key_id; /**< Denotes the key to be wrapped. */ - uint32_t cred_slot; /**< Credential slot to which this key is bound. */ -} __packed; - - -struct wrap_key_resp { - struct tme_response_sts status; /**< Response status. */ - struct tme_wrapped_key wrapped_key; /**< The wrapped key. */ -} __packed; - -/** - * Unwrap Key ID structures - */ -struct unwrap_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_UNWRAP_KEY */ - uint32_t key_id; /**< The ID of the key to be unwrapped. */ - uint32_t kw_key_id; /**< The ID of the key to be used to unwrap the key. */ - struct tme_wrapped_key wrapped; /**< The key to be unwrapped. */ - uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ -} __packed; - -/** - * Import Key ID structures - */ -struct import_key_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_IMPORT_KEY */ - uint32_t key_id; /**< The ID of the key to be imported. */ - struct tme_key_policy key_policy;/**< The Key Policy to be associated with the key. */ - struct tme_plaintext_key key_material;/**< The plain-text key material. */ - uint32_t cred_slot; /**< Credential slot to which this key will be bound. */ -} __packed; - -/** - * Broadcast Transport Key structures - */ -struct broadcast_tpkey_req { - uint32_t cbor_header; /**< CBOR encoded tag */ - struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_BROADCAST_TP_KEY */ -} __packed; - - -#endif /* _TME_HWKM_MASTER_INTERFACE_H_ */ - diff --git a/tmecom/tmecom.c b/tmecom/tmecom.c deleted file mode 100644 index b8b694a180..0000000000 --- a/tmecom/tmecom.c +++ /dev/null @@ -1,318 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "tmecom.h" - -struct tmecom { - struct device *dev; - struct mbox_client cl; - struct mbox_chan *chan; - struct mutex lock; - struct qmp_pkt pkt; - wait_queue_head_t waitq; - void *txbuf; - bool rx_done; -}; - -#if IS_ENABLED(CONFIG_DEBUG_FS) -#include -#include - -char dpkt[MBOX_MAX_MSG_LEN + 1]; -struct dentry *debugfs_file; -#endif /* CONFIG_DEBUG_FS */ - -static struct tmecom *tmedev; - -/** - * tmecom_msg_hdr - Request/Response message header between HLOS and TME. - * - * This header is proceeding any request specific parameters. - * The transaction id is used to match request with response. - * - * Note: glink/QMP layer provides the rx/tx data size, so user payload size - * is calculated by reducing the header size. - */ -struct tmecom_msg_hdr { - unsigned int reserved; /* for future use */ - unsigned int txnid; /* transaction id */ -} __packed; -#define TMECOM_TX_HDR_SIZE sizeof(struct tmecom_msg_hdr) -#define CBOR_NUM_BYTES (sizeof(unsigned int)) -#define TMECOM_RX_HDR_SIZE (TMECOM_TX_HDR_SIZE + CBOR_NUM_BYTES) - -/* - * CBOR encode emulation - * Prepend tmecom_msg_hdr space - * CBOR tag is prepended in request - */ -static inline size_t tmecom_encode(struct tmecom *tdev, const void *reqbuf, - size_t size) -{ - unsigned int *msg = tdev->txbuf + TMECOM_TX_HDR_SIZE; - unsigned int *src = (unsigned int *)reqbuf; - - memcpy(msg, src, size); - return (size + TMECOM_TX_HDR_SIZE); -} - -/* - * CBOR decode emulation - * Strip tmecom_msg_hdr & CBOR tag - */ -static inline size_t tmecom_decode(struct tmecom *tdev, void *respbuf) -{ - unsigned int *msg = tdev->pkt.data + TMECOM_RX_HDR_SIZE; - unsigned int *rbuf = (unsigned int *)respbuf; - - memcpy(rbuf, msg, (tdev->pkt.size - TMECOM_RX_HDR_SIZE)); - return (tdev->pkt.size - TMECOM_RX_HDR_SIZE); -} - -static bool tmecom_check_rx_done(struct tmecom *tdev) -{ - return tdev->rx_done; -} - -int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf, - size_t *respsize) -{ - struct tmecom *tdev = tmedev; - long time_left = 0; - int ret = 0; - - /* - * Check to handle if probe is not successful or not completed yet - */ - if (!tdev) { - pr_err("%s: tmecom dev is NULL\n", __func__); - return -ENODEV; - } - - if (!reqbuf || !reqsize || (reqsize > MBOX_MAX_MSG_LEN)) { - dev_err(tdev->dev, "invalid reqbuf or reqsize\n"); - return -EINVAL; - } - - if (!respbuf || !respsize || (*respsize > MBOX_MAX_MSG_LEN)) { - dev_err(tdev->dev, "invalid respbuf or respsize\n"); - return -EINVAL; - } - - mutex_lock(&tdev->lock); - - tdev->rx_done = false; - tdev->pkt.size = tmecom_encode(tdev, reqbuf, reqsize); - /* - * Controller expects a 4 byte aligned buffer - */ - tdev->pkt.size = (tdev->pkt.size + 0x3) & ~0x3; - tdev->pkt.data = tdev->txbuf; - - pr_debug("tmecom encoded request size = %u\n", tdev->pkt.size); - print_hex_dump_bytes("tmecom sending bytes : ", - DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size); - - if (mbox_send_message(tdev->chan, &tdev->pkt) < 0) { - dev_err(tdev->dev, "failed to send qmp message\n"); - ret = -EAGAIN; - goto err_exit; - } - - time_left = wait_event_interruptible_timeout(tdev->waitq, - tmecom_check_rx_done(tdev), tdev->cl.tx_tout); - - if (!time_left) { - dev_err(tdev->dev, "request timed out\n"); - ret = -ETIMEDOUT; - goto err_exit; - } - - dev_info(tdev->dev, "response received\n"); - - pr_debug("tmecom received size = %u\n", tdev->pkt.size); - print_hex_dump_bytes("tmecom received bytes : ", - DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size); - - *respsize = tmecom_decode(tdev, respbuf); - - tdev->rx_done = false; - ret = 0; - -err_exit: - mutex_unlock(&tdev->lock); - return ret; -} -EXPORT_SYMBOL(tmecom_process_request); - -#if IS_ENABLED(CONFIG_DEBUG_FS) -static ssize_t tmecom_debugfs_write(struct file *file, - const char __user *userstr, size_t len, loff_t *pos) -{ - int ret = 0; - size_t rxlen = 0; - struct tme_ext_err_info *err_info = (struct tme_ext_err_info *)dpkt; - - - if (!len || (len > MBOX_MAX_MSG_LEN)) { - pr_err("invalid message length\n"); - return -EINVAL; - } - - memset(dpkt, 0, sizeof(*dpkt)); - ret = copy_from_user(dpkt, userstr, len); - if (ret) { - pr_err("%s copy from user failed, ret=%d\n", __func__, ret); - return len; - } - - tmecom_process_request(dpkt, len, dpkt, &rxlen); - - print_hex_dump_bytes("tmecom decoded bytes : ", - DUMP_PREFIX_ADDRESS, dpkt, rxlen); - - pr_debug("calling TME_HWKM_CMD_BROADCAST_TP_KEY api\n"); - ret = tme_hwkm_master_broadcast_transportkey(err_info); - - if (ret == 0) - pr_debug("%s successful\n", __func__); - - return len; -} - -static const struct file_operations tmecom_debugfs_ops = { - .open = simple_open, - .write = tmecom_debugfs_write, -}; -#endif /* CONFIG_DEBUG_FS */ - -static void tmecom_receive_message(struct mbox_client *client, void *message) -{ - struct tmecom *tdev = dev_get_drvdata(client->dev); - struct qmp_pkt *pkt = NULL; - - pr_debug("%s entered\n", __func__); - - if (!message) { - dev_err(tdev->dev, "spurious message received\n"); - goto tmecom_receive_end; - } - - if (tdev->rx_done) { - dev_err(tdev->dev, "tmecom response pending\n"); - goto tmecom_receive_end; - } - pkt = (struct qmp_pkt *)message; - tdev->pkt.size = pkt->size; - tdev->pkt.data = pkt->data; - tdev->rx_done = true; -tmecom_receive_end: - wake_up_interruptible(&tdev->waitq); -} - -static int tmecom_probe(struct platform_device *pdev) -{ - struct tmecom *tdev; - const char *label; - char name[32]; - - tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); - if (!tdev) - return -ENOMEM; - - tdev->cl.dev = &pdev->dev; - tdev->cl.tx_block = true; - tdev->cl.tx_tout = 500; - tdev->cl.knows_txdone = false; - tdev->cl.rx_callback = tmecom_receive_message; - - label = of_get_property(pdev->dev.of_node, "mbox-names", NULL); - if (!label) - return -EINVAL; - snprintf(name, 32, "%s_send_message", label); - - tdev->chan = mbox_request_channel(&tdev->cl, 0); - if (IS_ERR(tdev->chan)) { - dev_err(&pdev->dev, "failed to get mbox channel\n"); - return PTR_ERR(tdev->chan); - } - - mutex_init(&tdev->lock); - - if (tdev->chan) { - tdev->txbuf = - devm_kzalloc(&pdev->dev, MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->txbuf) { - dev_err(&pdev->dev, "message buffer alloc faile\n"); - return -ENOMEM; - } - } - - init_waitqueue_head(&tdev->waitq); - -#if IS_ENABLED(CONFIG_DEBUG_FS) - debugfs_file = debugfs_create_file(name, 0220, NULL, tdev, - &tmecom_debugfs_ops); - if (!debugfs_file) - goto err; -#endif /* CONFIG_DEBUG_FS */ - - tdev->rx_done = false; - tdev->dev = &pdev->dev; - dev_set_drvdata(&pdev->dev, tdev); - - tmedev = tdev; - - dev_info(&pdev->dev, "tmecom probe success\n"); - return 0; -err: - mbox_free_channel(tdev->chan); - return -ENOMEM; -} - -static int tmecom_remove(struct platform_device *pdev) -{ - struct tmecom *tdev = platform_get_drvdata(pdev); - -#if IS_ENABLED(CONFIG_DEBUG_FS) - debugfs_remove(debugfs_file); -#endif /* CONFIG_DEBUG_FS */ - - if (tdev->chan) - mbox_free_channel(tdev->chan); - - dev_info(&pdev->dev, "tmecom remove success\n"); - return 0; -} - -static const struct of_device_id tmecom_match_tbl[] = { - {.compatible = "qcom,tmecom-qmp-client"}, - {}, -}; - -static struct platform_driver tmecom_driver = { - .probe = tmecom_probe, - .remove = tmecom_remove, - .driver = { - .name = "tmecom-qmp-client", - .suppress_bind_attrs = true, - .of_match_table = tmecom_match_tbl, - }, -}; -module_platform_driver(tmecom_driver); - -MODULE_DESCRIPTION("MSM TMECom QTI mailbox protocol client"); -MODULE_LICENSE("GPL v2"); diff --git a/tmecom/tmecom.h b/tmecom/tmecom.h deleted file mode 100644 index 54dfb4cd0d..0000000000 --- a/tmecom/tmecom.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - */ -#ifndef _TMECOM_H_ -#define _TMECOM_H_ - -#define MBOX_MAX_MSG_LEN 1024 - -int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf, - size_t *respsize); -#endif /*_TMECOM_H_ */ From c1f17b2adc7ae66086a9d48592e52e344c52e01e Mon Sep 17 00:00:00 2001 From: Sonal Aggarwal Date: Mon, 21 Feb 2022 15:45:54 +0530 Subject: [PATCH 010/202] securemsm-kernel : Add new functionality in HLOS for TZ to sleep for certain amount of time. --- smcinvoke/smcinvoke.c | 22 +++++++++++++++++++++- smcinvoke/smcinvoke_object.h | 3 ++- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 4f2972e063..729ed813db 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2022, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__ @@ -950,6 +950,23 @@ out: return ret; } +static int32_t smcinvoke_sleep(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *msg = buf; + uint32_t sleepTimeMs_val = 0; + + if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) || + (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) { + pr_err("Invalid counts received for sleeping in hlos\n"); + return OBJECT_ERROR_INVALID; + } + + /* Time in miliseconds is expected from tz */ + sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset)); + msleep(sleepTimeMs_val); + return OBJECT_OK; +} + static void process_kernel_obj(void *buf, size_t buf_len) { struct smcinvoke_tzcb_req *cb_req = buf; @@ -961,6 +978,9 @@ static void process_kernel_obj(void *buf, size_t buf_len) case OBJECT_OP_YIELD: cb_req->result = OBJECT_OK; break; + case OBJECT_OP_SLEEP: + cb_req->result = smcinvoke_sleep(buf, buf_len); + break; default: pr_err(" invalid operation for tz kernel object\n"); cb_req->result = OBJECT_ERROR_INVALID; diff --git a/smcinvoke/smcinvoke_object.h b/smcinvoke/smcinvoke_object.h index 620922bfb0..639eb81910 100644 --- a/smcinvoke/smcinvoke_object.h +++ b/smcinvoke/smcinvoke_object.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2022, The Linux Foundation. All rights reserved. */ #ifndef __SMCINVOKE_OBJECT_H #define __SMCINVOKE_OBJECT_H @@ -20,6 +20,7 @@ #define OBJECT_OP_RETAIN (OBJECT_OP_METHOD_MASK - 1) #define OBJECT_OP_MAP_REGION 0 #define OBJECT_OP_YIELD 1 +#define OBJECT_OP_SLEEP 2 #define OBJECT_COUNTS_MAX_BI 0xF #define OBJECT_COUNTS_MAX_BO 0xF From d4014194cc69c0e99c572babe2ff7cca042ab1b7 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Fri, 25 Feb 2022 14:20:50 -0800 Subject: [PATCH 011/202] sec-kernel: qrng: add qrng driver support Add the qrng driver which adds support for the kernel to utilize QTI's PRNG hardware for RNG operations. Change-Id: I9c9cb7f907470c88e182dc869f302e4ca83fb798 --- Android.mk | 10 + Kbuild | 5 +- config/sec-kernel_defconfig.conf | 6 + config/ssg_smcinvoke.conf | 8 - linux/qrng.h | 16 + qrng/msm_rng.c | 484 ++++++++++++++++++++++++++++++ securemsm_kernel_product_board.mk | 1 + securemsm_kernel_vendor_board.mk | 1 + 8 files changed, 522 insertions(+), 9 deletions(-) create mode 100644 config/sec-kernel_defconfig.conf delete mode 100644 config/ssg_smcinvoke.conf create mode 100644 linux/qrng.h create mode 100644 qrng/msm_rng.c diff --git a/Android.mk b/Android.mk index cb8f0f94ea..d4fdb204f2 100644 --- a/Android.mk +++ b/Android.mk @@ -69,3 +69,13 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################# +################################################# +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := qrng_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := qrng_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################# \ No newline at end of file diff --git a/Kbuild b/Kbuild index f1b98ce19f..2002ab10fc 100644 --- a/Kbuild +++ b/Kbuild @@ -1,4 +1,4 @@ -include $(SSG_MODULE_ROOT)/config/ssg_smcinvoke.conf +include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ -I$(SSG_MODULE_ROOT)/linux/ @@ -22,3 +22,6 @@ qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o + +obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o +qrng_dlkm-objs := qrng/msm_rng.o \ No newline at end of file diff --git a/config/sec-kernel_defconfig.conf b/config/sec-kernel_defconfig.conf new file mode 100644 index 0000000000..c75cb200d6 --- /dev/null +++ b/config/sec-kernel_defconfig.conf @@ -0,0 +1,6 @@ +export CONFIG_QCOM_SMCINVOKE=m +export CONFIG_QTI_TZ_LOG=m +export CONFIG_CRYPTO_DEV_QCEDEV=m +export CONFIG_CRYPTO_DEV_QCRYPTO=m +export CONFIG_HDCP_QSEECOM=m +export CONFIG_HW_RANDOM_MSM_LEGACY=m diff --git a/config/ssg_smcinvoke.conf b/config/ssg_smcinvoke.conf deleted file mode 100644 index f64b0435db..0000000000 --- a/config/ssg_smcinvoke.conf +++ /dev/null @@ -1,8 +0,0 @@ -export CONFIG_QCOM_SMCINVOKE=m -export CONFIG_QTI_TZ_LOG=m -export CONFIG_CRYPTO_DEV_QCEDEV=m -export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m -export CONFIG_CRYPTO_DEV_QCRYPTO=m -export CONFIG_SCSI_UFS_CRYPTO=m -export CONFIG_SCSI_UFS_CRYPTO_QTI=m -export CONFIG_HDCP_QSEECOM=m \ No newline at end of file diff --git a/linux/qrng.h b/linux/qrng.h new file mode 100644 index 0000000000..09182af814 --- /dev/null +++ b/linux/qrng.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved. + */ +#ifndef _UAPI_QRNG_H_ +#define _UAPI_QRNG_H_ + +#include +#include + +#define QRNG_IOC_MAGIC 0x100 + +#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\ + _IO(QRNG_IOC_MAGIC, 1) + +#endif /* _UAPI_QRNG_H_ */ diff --git a/qrng/msm_rng.c b/qrng/msm_rng.c new file mode 100644 index 0000000000..fa5142a8b2 --- /dev/null +++ b/qrng/msm_rng.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2013, 2015, 2017-2022 The Linux Foundation. All rights + * reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "msm_rng" + +/* Device specific register offsets */ +#define PRNG_DATA_OUT_OFFSET 0x0000 +#define PRNG_STATUS_OFFSET 0x0004 +#define PRNG_LFSR_CFG_OFFSET 0x0100 +#define PRNG_CONFIG_OFFSET 0x0104 + +/* Device specific register masks and config values */ +#define PRNG_LFSR_CFG_MASK 0xFFFF0000 +#define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD +#define PRNG_CONFIG_MASK 0xFFFFFFFD +#define PRNG_HW_ENABLE 0x00000002 + +#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */ +#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */ + +#define RETRY_MAX_CNT 5 /* max retry times to read register */ +#define RETRY_DELAY_INTERVAL 440 /* retry delay interval in us */ + +struct msm_rng_device { + struct platform_device *pdev; + void __iomem *base; + struct clk *prng_clk; + struct mutex rng_lock; + struct icc_path *icc_path; +}; + +static struct msm_rng_device msm_rng_device_info; +static struct msm_rng_device *msm_rng_dev_cached; +static struct mutex cached_rng_lock; +static long msm_rng_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + + switch (cmd) { + case QRNG_IOCTL_RESET_BUS_BANDWIDTH: + pr_debug("calling msm_rng_bus_scale(LOW)\n"); + ret = icc_set_bw(msm_rng_device_info.icc_path, 0, 0); + if (ret) + pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret); + break; + default: + pr_err("Unsupported IOCTL call\n"); + break; + } + return ret; +} + +/* + * + * This function calls hardware random bit generator directory and retuns it + * back to caller + * + */ +static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, + void *data, size_t max) +{ + struct platform_device *pdev; + void __iomem *base; + size_t currsize = 0; + u32 val = 0; + u32 *retdata = data; + int ret; + int failed = 0; + + pdev = msm_rng_dev->pdev; + base = msm_rng_dev->base; + + /* no room for word data */ + if (max < 4) + return 0; + + mutex_lock(&msm_rng_dev->rng_lock); + + if (msm_rng_dev->icc_path) { + ret = icc_set_bw(msm_rng_dev->icc_path, 0, 300000); + if (ret) { + pr_err("bus_scale_client_update_req failed\n"); + goto bus_err; + } + } + /* enable PRNG clock */ + if (msm_rng_dev->prng_clk) { + ret = clk_prepare_enable(msm_rng_dev->prng_clk); + if (ret) { + pr_err("failed to enable prng clock\n"); + goto err; + } + } + /* read random data from h/w */ + do { + /* check status bit if data is available */ + if (!(readl_relaxed(base + PRNG_STATUS_OFFSET) + & 0x00000001)) { + if (failed++ == RETRY_MAX_CNT) { + if (currsize == 0) + pr_err("Data not available\n"); + break; + } + udelay(RETRY_DELAY_INTERVAL); + } else { + + /* read FIFO */ + val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET); + + /* write data back to callers pointer */ + *(retdata++) = val; + currsize += 4; + /* make sure we stay on 32bit boundary */ + if ((max - currsize) < 4) + break; + } + + } while (currsize < max); + + /* vote to turn off clock */ + if (msm_rng_dev->prng_clk) + clk_disable_unprepare(msm_rng_dev->prng_clk); +err: + if (msm_rng_dev->icc_path) { + ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0); + if (ret) + pr_err("bus_scale_client_update_req failed\n"); + } +bus_err: + mutex_unlock(&msm_rng_dev->rng_lock); + + val = 0L; + return currsize; +} +static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct msm_rng_device *msm_rng_dev; + int rv = 0; + + msm_rng_dev = (struct msm_rng_device *)rng->priv; + rv = msm_rng_direct_read(msm_rng_dev, data, max); + + return rv; +} + + +static struct hwrng msm_rng = { + .name = DRIVER_NAME, + .read = msm_rng_read, + .quality = 1024, +}; + +static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) +{ + unsigned long val = 0; + unsigned long reg_val = 0; + int ret = 0; + + if (msm_rng_dev->icc_path) { + ret = icc_set_bw(msm_rng_dev->icc_path, 0, 30000); + if (ret) + pr_err("bus_scale_client_update_req failed\n"); + } + /* Enable the PRNG CLK */ + if (msm_rng_dev->prng_clk) { + ret = clk_prepare_enable(msm_rng_dev->prng_clk); + if (ret) { + dev_err(&(msm_rng_dev->pdev)->dev, + "failed to enable clock in probe\n"); + return -EPERM; + } + } + + /* Enable PRNG h/w only if it is NOT ON */ + val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) & + PRNG_HW_ENABLE; + /* PRNG H/W is not ON */ + if (val != PRNG_HW_ENABLE) { + val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); + val &= PRNG_LFSR_CFG_MASK; + val |= PRNG_LFSR_CFG_CLOCKS; + writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET); + + /* The PRNG CONFIG register should be first written */ + mb(); + + reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) + & PRNG_CONFIG_MASK; + reg_val |= PRNG_HW_ENABLE; + writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET); + + /* The PRNG clk should be disabled only after we enable the + * PRNG h/w by writing to the PRNG CONFIG register. + */ + mb(); + } + if (msm_rng_dev->prng_clk) + clk_disable_unprepare(msm_rng_dev->prng_clk); + + if (msm_rng_dev->icc_path) { + ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0); + if (ret) + pr_err("bus_scale_client_update_req failed\n"); + } + + return 0; +} + +static const struct file_operations msm_rng_fops = { + .unlocked_ioctl = msm_rng_ioctl, +}; +static struct class *msm_rng_class; +static struct cdev msm_rng_cdev; + +static int msm_rng_probe(struct platform_device *pdev) +{ + struct resource *res; + struct msm_rng_device *msm_rng_dev = NULL; + void __iomem *base = NULL; + bool configure_qrng = true; + int error = 0; + struct device *dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "invalid address\n"); + error = -EFAULT; + goto err_exit; + } + + msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL); + if (!msm_rng_dev) { + error = -ENOMEM; + goto err_exit; + } + + base = ioremap(res->start, resource_size(res)); + if (!base) { + dev_err(&pdev->dev, "ioremap failed\n"); + error = -ENOMEM; + goto err_iomap; + } + msm_rng_dev->base = base; + + /* create a handle for clock control */ + if (pdev->dev.of_node) { + if (of_property_read_bool(pdev->dev.of_node, + "qcom,no-clock-support")) + msm_rng_dev->prng_clk = NULL; + else + msm_rng_dev->prng_clk = clk_get(&pdev->dev, + "km_clk_src"); + } + + if (IS_ERR(msm_rng_dev->prng_clk)) { + dev_err(&pdev->dev, "failed to register clock source\n"); + error = -ENODEV; + goto err_clk_get; + } + + /* save away pdev and register driver data */ + msm_rng_dev->pdev = pdev; + platform_set_drvdata(pdev, msm_rng_dev); + + if (pdev->dev.of_node) { + msm_rng_dev->icc_path = of_icc_get(&pdev->dev, "data_path"); + msm_rng_device_info.icc_path = msm_rng_dev->icc_path; + if (IS_ERR(msm_rng_dev->icc_path)) { + error = PTR_ERR(msm_rng_dev->icc_path); + dev_err(&pdev->dev, "get icc path err %d\n", error); + goto err_icc_get; + } + } + + /* Enable rng h/w for the targets which can access the entire + * address space of PRNG. + */ + if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node, + "qcom,no-qrng-config"))) + configure_qrng = false; + if (configure_qrng) { + error = msm_rng_enable_hw(msm_rng_dev); + if (error) + goto err_icc_get; + } + + mutex_init(&msm_rng_dev->rng_lock); + mutex_init(&cached_rng_lock); + + /* register with hwrng framework */ + msm_rng.priv = (unsigned long) msm_rng_dev; + error = hwrng_register(&msm_rng); + if (error) { + dev_err(&pdev->dev, "failed to register hwrng\n"); + goto err_reg_hwrng; + } + error = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops); + if (error) { + dev_err(&pdev->dev, "failed to register chrdev\n"); + goto err_reg_chrdev; + } + + msm_rng_class = class_create(THIS_MODULE, "msm-rng"); + if (IS_ERR(msm_rng_class)) { + pr_err("class_create failed\n"); + error = PTR_ERR(msm_rng_class); + goto err_create_cls; + } + + dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0), + NULL, "msm-rng"); + if (IS_ERR(dev)) { + pr_err("Device create failed\n"); + error = PTR_ERR(dev); + goto err_create_dev; + } + cdev_init(&msm_rng_cdev, &msm_rng_fops); + msm_rng_dev_cached = msm_rng_dev; + return error; + +err_create_dev: + class_destroy(msm_rng_class); +err_create_cls: + unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME); +err_reg_chrdev: + hwrng_unregister(&msm_rng); +err_reg_hwrng: + if (msm_rng_dev->icc_path) + icc_put(msm_rng_dev->icc_path); +err_icc_get: + if (msm_rng_dev->prng_clk) + clk_put(msm_rng_dev->prng_clk); +err_clk_get: + iounmap(msm_rng_dev->base); +err_iomap: + kfree_sensitive(msm_rng_dev); +err_exit: + return error; +} + +static int msm_rng_remove(struct platform_device *pdev) +{ + struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev); + + unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME); + hwrng_unregister(&msm_rng); + if (msm_rng_dev->prng_clk) + clk_put(msm_rng_dev->prng_clk); + iounmap(msm_rng_dev->base); + platform_set_drvdata(pdev, NULL); + if (msm_rng_dev->icc_path) + icc_put(msm_rng_dev->icc_path); + + kfree_sensitive(msm_rng_dev); + msm_rng_dev_cached = NULL; + return 0; +} + +static int qrng_get_random(struct crypto_rng *tfm, const u8 *src, + unsigned int slen, u8 *rdata, + unsigned int dlen) +{ + int sizeread = 0; + int rv = -EFAULT; + + if (!msm_rng_dev_cached) { + pr_err("%s: msm_rng_dev is not initialized\n", __func__); + rv = -ENODEV; + goto err_exit; + } + + if (!rdata) { + pr_err("%s: data buffer is null\n", __func__); + rv = -EINVAL; + goto err_exit; + } + + if (signal_pending(current) || + mutex_lock_interruptible(&cached_rng_lock)) { + pr_err("%s: mutex lock interrupted\n", __func__); + rv = -ERESTARTSYS; + goto err_exit; + } + sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen); + + if (sizeread == dlen) + rv = 0; + + mutex_unlock(&cached_rng_lock); +err_exit: + return rv; + +} + +static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) +{ + return 0; +} + +static struct rng_alg rng_algs[] = { { + .generate = qrng_get_random, + .seed = qrng_reset, + .seedsize = 0, + .base = { + .cra_name = "qrng", + .cra_driver_name = "fips_hw_qrng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + } +} }; + +static const struct of_device_id qrng_match[] = { + {.compatible = "qcom,msm-rng"}, + {}, +}; + +static struct platform_driver rng_driver = { + .probe = msm_rng_probe, + .remove = msm_rng_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = qrng_match, + }, +}; + +static int __init msm_rng_init(void) +{ + int ret; + + msm_rng_dev_cached = NULL; + ret = platform_driver_register(&rng_driver); + if (ret) { + pr_err("%s: platform_driver_register error:%d\n", + __func__, ret); + goto err_exit; + } + ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); + if (ret) { + pr_err("%s: crypto_register_algs error:%d\n", + __func__, ret); + goto err_exit; + } + +err_exit: + return ret; +} + +module_init(msm_rng_init); + +static void __exit msm_rng_exit(void) +{ + crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); + platform_driver_unregister(&rng_driver); +} + +module_exit(msm_rng_exit); + +MODULE_DESCRIPTION("QTI MSM Random Number Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 556281513c..4592a703d7 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -5,6 +5,7 @@ PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qrng_dlkm.ko \ diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index 841a8885b6..de877a7fb3 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -4,4 +4,5 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + $(KERNEL_MODULES_OUT)/qrng_dlkm.ko \ From 4fa9a50df282efa5309cc8b3bc295fe2178b7935 Mon Sep 17 00:00:00 2001 From: Joseph Oh Date: Fri, 4 Mar 2022 18:18:15 -0800 Subject: [PATCH 012/202] securemsm: Fix QCEDEV module compilation Fix the qcedev module compilation. --- Kbuild | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Kbuild b/Kbuild index 2002ab10fc..6448a830e6 100644 --- a/Kbuild +++ b/Kbuild @@ -14,14 +14,14 @@ tz_log_dlkm-objs := tz_log/tz_log.o obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o qce50_dlkm-objs := crypto-qti/qce50.o -obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qcedev-mod_dlkm.o +obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcedev-mod_dlkm.o qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o -qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o +qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o -qrng_dlkm-objs := qrng/msm_rng.o \ No newline at end of file +qrng_dlkm-objs := qrng/msm_rng.o From 5f29ef701b6e1a11e773ced1b90065446e56e499 Mon Sep 17 00:00:00 2001 From: Ashish Pratap Singh Bhadoria Date: Wed, 9 Mar 2022 12:35:31 +0530 Subject: [PATCH 013/202] securemsm-kernel: Correction in LF Copyright --- smcinvoke/smcinvoke.c | 3 ++- smcinvoke/smcinvoke_object.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 729ed813db..afed095af0 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "smcinvoke: %s: " fmt, __func__ diff --git a/smcinvoke/smcinvoke_object.h b/smcinvoke/smcinvoke_object.h index 639eb81910..74005ab22e 100644 --- a/smcinvoke/smcinvoke_object.h +++ b/smcinvoke/smcinvoke_object.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2016-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SMCINVOKE_OBJECT_H #define __SMCINVOKE_OBJECT_H From 1496aefc9e02ef610da872f15b5046ca6a290830 Mon Sep 17 00:00:00 2001 From: Phalguni Bumhyavarapu Date: Wed, 9 Mar 2022 15:55:32 -0800 Subject: [PATCH 014/202] securemsm-kernel : Correction in LF Copyright --- hdcp/hdcp_qseecom.c | 1 + linux/hdcp_qseecom.h | 1 + 2 files changed, 2 insertions(+) diff --git a/hdcp/hdcp_qseecom.c b/hdcp/hdcp_qseecom.c index f97ff22fcc..7c72602243 100644 --- a/hdcp/hdcp_qseecom.c +++ b/hdcp/hdcp_qseecom.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #define pr_fmt(fmt) "[hdcp-qseecom] %s: " fmt, __func__ diff --git a/linux/hdcp_qseecom.h b/linux/hdcp_qseecom.h index bc5eef6acc..ebcddbf2a9 100644 --- a/linux/hdcp_qseecom.h +++ b/linux/hdcp_qseecom.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __HDCP_QSEECOM_H From 7398cbd4c4273662677123dc3da86b51f387c271 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Wed, 16 Mar 2022 15:21:16 -0700 Subject: [PATCH 015/202] securemsm-kernel : Correction in LF Copyright for qrng. Change-Id: I1f0b5a2b39a1bd0207a9cedcb85f5ef53e843e86 --- linux/qrng.h | 3 ++- qrng/msm_rng.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/linux/qrng.h b/linux/qrng.h index 09182af814..a56119bcd4 100644 --- a/linux/qrng.h +++ b/linux/qrng.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _UAPI_QRNG_H_ #define _UAPI_QRNG_H_ diff --git a/qrng/msm_rng.c b/qrng/msm_rng.c index fa5142a8b2..b33e37b988 100644 --- a/qrng/msm_rng.c +++ b/qrng/msm_rng.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2011-2013, 2015, 2017-2022 The Linux Foundation. All rights + * Copyright (c) 2011-2013, 2015, 2017-2021 The Linux Foundation. All rights * reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include From 31689a70c872136fab35fb2685be210d490becdc Mon Sep 17 00:00:00 2001 From: Griffin Stamp Date: Tue, 29 Mar 2022 14:44:29 -0700 Subject: [PATCH 016/202] securemsm-kernel: compile only smcinvoke on LE Change-Id: I13c0794a621f62165a2e6cb0eb1eb117325ee097 --- Kbuild | 4 ++++ config/sec-kernel_defconfig_tvm.conf | 6 ++++++ 2 files changed, 10 insertions(+) create mode 100644 config/sec-kernel_defconfig_tvm.conf diff --git a/Kbuild b/Kbuild index 6448a830e6..37a32fe0a1 100644 --- a/Kbuild +++ b/Kbuild @@ -1,4 +1,8 @@ +ifeq ($(CONFIG_ARCH_QTI_VM), y) +include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_tvm.conf +else include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf +endif LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ -I$(SSG_MODULE_ROOT)/linux/ diff --git a/config/sec-kernel_defconfig_tvm.conf b/config/sec-kernel_defconfig_tvm.conf new file mode 100644 index 0000000000..9c1ad4c4f2 --- /dev/null +++ b/config/sec-kernel_defconfig_tvm.conf @@ -0,0 +1,6 @@ +export CONFIG_QCOM_SMCINVOKE=m +export CONFIG_QTI_TZ_LOG=n +export CONFIG_CRYPTO_DEV_QCEDEV=n +export CONFIG_CRYPTO_DEV_QCRYPTO=n +export CONFIG_HDCP_QSEECOM=n +export CONFIG_HW_RANDOM_MSM_LEGACY=n From abb78cf97c68f1c73237e4977c0ddd869ee8c7eb Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Tue, 5 Apr 2022 19:16:42 -0700 Subject: [PATCH 017/202] securemsm-kernel: Fix product_packages entry Fix product_package entry for module list. Change-Id: I48acc82fca94fc2f8c4547d5d1c600e62fa8c114 --- securemsm_kernel_product_board.mk | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 4592a703d7..7b15db3408 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -1,11 +1,12 @@ #Build ssg kernel driver -PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ - $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ - $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ - $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ - $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ - $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ - $(KERNEL_MODULES_OUT)/qrng_dlkm.ko \ + +PRODUCT_PACKAGES += smcinvoke_dlkm.ko +PRODUCT_PACKAGES += tz_log_dlkm.ko +PRODUCT_PACKAGES += qcedev-mod_dlkm.ko +PRODUCT_PACKAGES += qce50_dlkm.ko +PRODUCT_PACKAGES += qcrypto-msm_dlkm.ko +PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko +PRODUCT_PACKAGES += qrng_dlkm.ko From 5387ff08842fdb34d3aaf2a0d7a7f08f75428ecb Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Tue, 5 Apr 2022 18:52:41 -0700 Subject: [PATCH 018/202] smcinvoke: Upgrade smcinvoke to latest pull commits smcinvoke: Release lock for non critical region When entering into non critical region make sure to release the lock and increase reference for memobj instead of holding the lock and causing memobj not to be released when third party module using it. smcinvoke: Add support for splitbin Remove hard limit on the number of splitbins available. Let the TZ decide if the reassembled binary is good for usage. cherrypicked from I6d7b5c3154c8c362be0a6bad2da1c4687191536d Change-Id: Ibb12bc906fb3e995928a0b51b742e8193d737ba3 --- smcinvoke/smcinvoke.c | 97 +++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 41 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index afed095af0..75b2937c7a 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -29,9 +29,9 @@ #include #include #include +#include "misc/qseecom_kernel.h" #include "smcinvoke.h" #include "smcinvoke_object.h" -#include "misc/qseecom_kernel.h" #define CREATE_TRACE_POINTS #include "trace_smcinvoke.h" @@ -372,11 +372,23 @@ static uint32_t next_mem_map_obj_id_locked(void) static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) { + int ret = 0; + bool is_bridge_created_by_others = mem_obj->bridge_created_by_others; + struct dma_buf *dmabuf_to_free = mem_obj->dma_buf; + uint64_t shmbridge_handle = mem_obj->shmbridge_handle; + list_del(&mem_obj->list); - dma_buf_put(mem_obj->dma_buf); - if (!mem_obj->bridge_created_by_others) - qtee_shmbridge_deregister(mem_obj->shmbridge_handle); kfree(mem_obj); + mem_obj = NULL; + mutex_unlock(&g_smcinvoke_lock); + + if (!is_bridge_created_by_others) + ret = qtee_shmbridge_deregister(shmbridge_handle); + if (ret) + pr_err("Error:%d delete bridge failed leaking memory 0x%x\n", + ret, dmabuf_to_free); + else + dma_buf_put(dmabuf_to_free); } static void del_mem_regn_obj_locked(struct kref *kref) @@ -931,11 +943,36 @@ static int32_t smcinvoke_map_mem_region(void *buf, size_t buf_len) pr_err("invalid physical address, ret: %d\n", ret); goto out; } + + /* Increase reference count as we are feeding the memobj to + * smcinvoke and unlock the mutex. No need to hold the mutex in + * case of shmbridge creation. + */ + kref_get(&mem_obj->mem_map_obj_ref_cnt); + mutex_unlock(&g_smcinvoke_lock); + ret = smcinvoke_create_bridge(mem_obj); + + /* Take lock again and decrease the reference count which we + * increased for shmbridge but before proceeding further we + * have to check again if the memobj is still valid or not + * after decreasing the reference. + */ + mutex_lock(&g_smcinvoke_lock); + kref_put(&mem_obj->mem_map_obj_ref_cnt, del_mem_map_obj_locked); + if (ret) { ret = OBJECT_ERROR_INVALID; goto out; } + + if (!find_mem_obj_locked(TZHANDLE_GET_OBJID(msg->args[1].handle), + SMCINVOKE_MEM_RGN_OBJ)) { + mutex_unlock(&g_smcinvoke_lock); + pr_err("Memory object not found\n"); + return OBJECT_ERROR_BADOBJ; + } + mem_obj->mem_map_obj_id = next_mem_map_obj_id_locked(); } else { kref_get(&mem_obj->mem_map_obj_ref_cnt); @@ -951,23 +988,6 @@ out: return ret; } -static int32_t smcinvoke_sleep(void *buf, size_t buf_len) -{ - struct smcinvoke_tzcb_req *msg = buf; - uint32_t sleepTimeMs_val = 0; - - if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) || - (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) { - pr_err("Invalid counts received for sleeping in hlos\n"); - return OBJECT_ERROR_INVALID; - } - - /* Time in miliseconds is expected from tz */ - sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset)); - msleep(sleepTimeMs_val); - return OBJECT_OK; -} - static void process_kernel_obj(void *buf, size_t buf_len) { struct smcinvoke_tzcb_req *cb_req = buf; @@ -979,9 +999,6 @@ static void process_kernel_obj(void *buf, size_t buf_len) case OBJECT_OP_YIELD: cb_req->result = OBJECT_OK; break; - case OBJECT_OP_SLEEP: - cb_req->result = smcinvoke_sleep(buf, buf_len); - break; default: pr_err(" invalid operation for tz kernel object\n"); cb_req->result = OBJECT_ERROR_INVALID; @@ -2123,12 +2140,13 @@ char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, stru { int rc = 0; - const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entry07 = NULL; + const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entrylast = NULL; char fw_name[MAX_APP_NAME_SIZE] = "\0"; int num_images = 0, phi = 0; unsigned char app_arch = 0; u8 *img_data_ptr = NULL; - size_t offset[8], bufferOffset = 0, phdr_table_offset = 0; + size_t bufferOffset = 0, phdr_table_offset = 0; + size_t *offset = NULL; Elf32_Phdr phdr32; Elf64_Phdr phdr64; struct elf32_hdr *ehdr = NULL; @@ -2146,15 +2164,13 @@ char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, stru app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS); /*Get the offsets for split images header*/ - offset[0] = 0; if (app_arch == ELFCLASS32) { ehdr = (struct elf32_hdr *)fw_entry00->data; num_images = ehdr->e_phnum; - if (num_images != 8) { - pr_err("Number of images :%d is not valid\n", num_images); + offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL); + if (offset == NULL) goto release_fw_entry00; - } phdr_table_offset = (size_t) ehdr->e_phoff; for (phi = 1; phi < num_images; ++phi) { bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr); @@ -2166,10 +2182,9 @@ char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, stru ehdr64 = (struct elf64_hdr *)fw_entry00->data; num_images = ehdr64->e_phnum; - if (num_images != 8) { - pr_err("Number of images :%d is not valid\n", num_images); + offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL); + if (offset == NULL) goto release_fw_entry00; - } phdr_table_offset = (size_t) ehdr64->e_phoff; for (phi = 1; phi < num_images; ++phi) { bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr); @@ -2185,23 +2200,22 @@ char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, stru /*Find the size of last split bin image*/ snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1); - rc = firmware_request_nowarn(&fw_entry07, fw_name, class_dev); + rc = firmware_request_nowarn(&fw_entrylast, fw_name, class_dev); if (rc) { pr_err("Failed to locate blob %s\n", fw_name); goto release_fw_entry00; } /*Total size of image will be the offset of last image + the size of last split image*/ - *fw_size = fw_entry07->size + offset[num_images-1]; + *fw_size = fw_entrylast->size + offset[num_images-1]; /*Allocate memory for the buffer that will hold the split image*/ rc = qtee_shmbridge_allocate_shm((*fw_size), shm); if (rc) { pr_err("smbridge alloc failed for size: %zu\n", *fw_size); - goto release_fw_entry07; + goto release_fw_entrylast; } img_data_ptr = shm->vaddr; - /* * Copy contents of split bins to the buffer */ @@ -2213,18 +2227,19 @@ char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, stru pr_err("Failed to locate blob %s\n", fw_name); qtee_shmbridge_free_shm(shm); img_data_ptr = NULL; - goto release_fw_entry07; + goto release_fw_entrylast; } memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size); release_firmware(fw_entry); fw_entry = NULL; } - memcpy(img_data_ptr + offset[phi], fw_entry07->data, fw_entry07->size); + memcpy(img_data_ptr + offset[phi], fw_entrylast->data, fw_entrylast->size); -release_fw_entry07: - release_firmware(fw_entry07); +release_fw_entrylast: + release_firmware(fw_entrylast); release_fw_entry00: release_firmware(fw_entry00); + kfree(offset); return img_data_ptr; } EXPORT_SYMBOL(firmware_request_from_smcinvoke); From 177899dde3b1b246d90acd2aa292dabb1232d876 Mon Sep 17 00:00:00 2001 From: Sheik Anwar Shabic Y Date: Mon, 21 Mar 2022 14:09:02 +0530 Subject: [PATCH 019/202] securemsm-kernel : Resolve external dependancy for hdcp module Resolve external dependancy for hdcp symbols. Change-Id: If4bad51a7aabdbb334f9e7efaa45f40f0988c857 --- Android.mk | 42 +++++++--- Kbuild | 5 +- config/sec-kernel_defconfig.h | 11 +++ hdcp/hdcp_qseecom.c | 2 +- linux/hdcp_qseecom.h | 154 ---------------------------------- 5 files changed, 44 insertions(+), 170 deletions(-) create mode 100644 config/sec-kernel_defconfig.h delete mode 100644 linux/hdcp_qseecom.h diff --git a/Android.mk b/Android.mk index d4fdb204f2..f50ab6fd44 100644 --- a/Android.mk +++ b/Android.mk @@ -3,12 +3,29 @@ LOCAL_PATH := $(call my-dir) DLKM_DIR := $(TOP)/device/qcom/common/dlkm +SEC_KERNEL_DIR := $(TOP)/vendor/qcom/opensource/securemsm-kernel + SSG_SRC_FILES := \ $(wildcard $(LOCAL_PATH)/*) \ $(wildcard $(LOCAL_PATH)/*/*) \ $(wildcard $(LOCAL_PATH)/*/*/*) \ $(wildcard $(LOCAL_PATH)/*/*/*/*) +# This is set once per LOCAL_PATH, not per (kernel) module +KBUILD_OPTIONS := SSG_ROOT=$(SEC_KERNEL_DIR) +KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM) + +################################################### +include $(CLEAR_VARS) +# For incremental compilation +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := sec-module-symvers +LOCAL_MODULE_STEM := Module.symvers +LOCAL_MODULE_KBUILD_NAME := Module.symvers +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +################################################### +################################################### #$(error $(SSG_SRC_FILES)) include $(CLEAR_VARS) #LOCAL_SRC_FILES := $(SSG_SRC_FILES) @@ -19,7 +36,8 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################## +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := tz_log_dlkm.ko @@ -28,8 +46,8 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# -################################################## +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := qce50_dlkm.ko @@ -38,8 +56,8 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# -################################################## +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := qcedev-mod_dlkm.ko @@ -48,8 +66,8 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# -################################################## +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := qcrypto-msm_dlkm.ko @@ -58,8 +76,8 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# -################################################# +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := hdcp_qseecom_dlkm.ko @@ -68,8 +86,8 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# -################################################# +################################################### +################################################### include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := qrng_dlkm.ko @@ -78,4 +96,4 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -################################################# \ No newline at end of file +################################################### diff --git a/Kbuild b/Kbuild index 37a32fe0a1..3828f354c2 100644 --- a/Kbuild +++ b/Kbuild @@ -5,9 +5,8 @@ include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf endif LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ - -I$(SSG_MODULE_ROOT)/linux/ - -KBUILD_CPPFLAGS += -DCONFIG_HDCP_QSEECOM + -I$(SSG_MODULE_ROOT)/linux/ \ + -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o diff --git a/config/sec-kernel_defconfig.h b/config/sec-kernel_defconfig.h new file mode 100644 index 0000000000..4cf5f029dc --- /dev/null +++ b/config/sec-kernel_defconfig.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.. + */ + +#define CONFIG_QCOM_SMCINVOKE 1 +#define CONFIG_QTI_TZ_LOG 1 +#define CONFIG_CRYPTO_DEV_QCEDEV 1 +#define CONFIG_CRYPTO_DEV_QCRYPTO 1 +#define CONFIG_HDCP_QSEECOM 1 +#define CONFIG_HW_RANDOM_MSM_LEGACY 1 diff --git a/hdcp/hdcp_qseecom.c b/hdcp/hdcp_qseecom.c index 7c72602243..a3f8ebf2fb 100644 --- a/hdcp/hdcp_qseecom.c +++ b/hdcp/hdcp_qseecom.c @@ -22,7 +22,7 @@ #include #include #include -#include "linux/hdcp_qseecom.h" +#include #include "misc/qseecom_kernel.h" #define HDCP2P2_APP_NAME "hdcp2p2" diff --git a/linux/hdcp_qseecom.h b/linux/hdcp_qseecom.h deleted file mode 100644 index ebcddbf2a9..0000000000 --- a/linux/hdcp_qseecom.h +++ /dev/null @@ -1,154 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015-2022, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. - */ - -#ifndef __HDCP_QSEECOM_H -#define __HDCP_QSEECOM_H -#include - -#define HDCP_QSEECOM_ENUM_STR(x) #x - -enum hdcp2_app_cmd { - HDCP2_CMD_START, - HDCP2_CMD_START_AUTH, - HDCP2_CMD_STOP, - HDCP2_CMD_PROCESS_MSG, - HDCP2_CMD_TIMEOUT, - HDCP2_CMD_EN_ENCRYPTION, - HDCP2_CMD_QUERY_STREAM, -}; - -struct hdcp2_buffer { - unsigned char *data; - u32 length; -}; - -struct hdcp2_app_data { - u32 timeout; - bool repeater_flag; - struct hdcp2_buffer request; // requests to TA, sent from sink - struct hdcp2_buffer response; // responses from TA, sent to sink -}; - -struct hdcp1_topology { - uint32_t depth; - uint32_t device_count; - uint32_t max_devices_exceeded; - uint32_t max_cascade_exceeded; - uint32_t hdcp2LegacyDeviceDownstream; - uint32_t hdcp1DeviceDownstream; -}; - -static inline const char *hdcp2_app_cmd_str(enum hdcp2_app_cmd cmd) -{ - switch (cmd) { - case HDCP2_CMD_START: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START); - case HDCP2_CMD_START_AUTH: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH); - case HDCP2_CMD_STOP: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP); - case HDCP2_CMD_PROCESS_MSG: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_PROCESS_MSG); - case HDCP2_CMD_TIMEOUT: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_TIMEOUT); - case HDCP2_CMD_EN_ENCRYPTION: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_EN_ENCRYPTION); - case HDCP2_CMD_QUERY_STREAM: - return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_QUERY_STREAM); - default: return "???"; - } -} - -#if IS_ENABLED(CONFIG_HDCP_QSEECOM) -void *hdcp1_init(void); -void hdcp1_deinit(void *data); -bool hdcp1_feature_supported(void *data); -int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb); -int hdcp1_set_enc(void *data, bool enable); -int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated); -void hdcp1_stop(void *data); - -void *hdcp2_init(u32 device_type); -void hdcp2_deinit(void *ctx); -bool hdcp2_feature_supported(void *ctx); -int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, - struct hdcp2_app_data *app_data); -int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, - uint8_t stream_number, uint32_t *stream_id); -int hdcp2_close_stream(void *ctx, uint32_t stream_id); -int hdcp2_force_encryption(void *ctx, uint32_t enable); -#else -static inline void *hdcp1_init(void) -{ - return NULL; -} - -static inline void hdcp1_deinit(void *data) -{ -} - -static inline bool hdcp1_feature_supported(void *data) -{ - return false; -} - -static inline int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb) -{ - return 0; -} - -static inline int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated) -{ - return 0; -} - -static inline int hdcp1_set_enc(void *data, bool enable) -{ - return 0; -} - -static inline void hdcp1_stop(void *data) -{ -} - -static inline void *hdcp2_init(u32 device_type) -{ - return NULL; -} - -static inline void hdcp2_deinit(void *ctx) -{ -} - -static inline bool hdcp2_feature_supported(void *ctx) -{ - return false; -} - -static inline int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, - struct hdcp2_app_data *app_data) -{ - return 0; -} - -static inline int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, - uint8_t stream_number, uint32_t *stream_id) -{ - return 0; -} - -static inline int hdcp2_close_stream(void *ctx, uint32_t stream_id) -{ - return 0; -} - -static inline int hdcp2_force_encryption(void *ctx, uint32_t enable) -{ - return 0; -} -#endif /* CONFIG_HDCP_QSEECOM */ - -#endif /* __HDCP_QSEECOM_H */ From 31f097f98882a29fa4b93fb7d017538da9366a38 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Tue, 1 Mar 2022 10:29:43 -0800 Subject: [PATCH 020/202] crypto-qti: qcedev: add support for hlos offload path Add support for HLOS offload data path in the qcedev driver mainly to support DRM and HDCP usecases. Changes extend the current driver to support the following. - Register multiple pipes for different offload usecases. - Report timer expiry errors back to userspace. - Support different iv CTR sizes based on userspace input. - Support new IOCTLS to support encryption, decryption and copy offload usecases for DRM and HDCP. Change-Id: Ie9b74c173d0afd7b8c863ed57a68ec6e74baa9b4 --- Kbuild | 2 +- crypto-qti/qce.h | 21 +- crypto-qti/qce50.c | 572 +++++++++++++++++++++++++++++--------- crypto-qti/qce50.h | 28 +- crypto-qti/qcedev.c | 446 +++++++++++++++++++++++++++-- crypto-qti/qcedevi.h | 4 +- crypto-qti/qcryptohw_50.h | 7 + linux/qcedev.h | 93 +++++++ linux/qcota.h | 2 + 9 files changed, 1014 insertions(+), 161 deletions(-) diff --git a/Kbuild b/Kbuild index 3828f354c2..7d70810490 100644 --- a/Kbuild +++ b/Kbuild @@ -17,7 +17,7 @@ tz_log_dlkm-objs := tz_log/tz_log.o obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o qce50_dlkm-objs := crypto-qti/qce50.o -obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcedev-mod_dlkm.o +obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h index c3d06b8739..f7f8e9863c 100644 --- a/crypto-qti/qce.h +++ b/crypto-qti/qce.h @@ -105,6 +105,14 @@ enum qce_req_op_enum { QCE_REQ_LAST }; +/* Offload operation type */ +enum qce_offload_op_enum { + QCE_OFFLOAD_HLOS_HLOS = 1, + QCE_OFFLOAD_HLOS_CPB = 2, + QCE_OFFLOAD_CPB_HLOS = 3, + QCE_OFFLOAD_OPER_LAST +}; + /* Algorithms/features supported in CE HW engine */ struct ce_hw_support { bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/ @@ -147,6 +155,7 @@ struct qce_sha_req { unsigned int size; /* data length in bytes */ void *areq; unsigned int flags; + int current_req_info; }; struct qce_req { @@ -168,10 +177,17 @@ struct qce_req { unsigned int encklen; /* cipher key length */ unsigned char *iv; /* initialization vector */ unsigned int ivsize; /* initialization vector size*/ + unsigned int iv_ctr_size; /* iv increment counter size*/ unsigned int cryptlen; /* data length */ unsigned int use_pmem; /* is source of data PMEM allocated? */ struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/ unsigned int flags; + enum qce_offload_op_enum offload_op; /* Offload usecase */ + bool is_pattern_valid; /* Is pattern setting required */ + unsigned int pattern_info; /* Pattern info for offload operation */ + unsigned int block_offset; /* partial first block for AES CTR */ + bool is_copy_op; /* copy buffers without crypto ops */ + int current_req_info; }; struct qce_pm_table { @@ -192,5 +208,8 @@ int qce_disable_clk(void *handle); void qce_get_driver_stats(void *handle); void qce_clear_driver_stats(void *handle); void qce_dump_req(void *handle); - +void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2, + unsigned int *s3, unsigned int *s4, + unsigned int *s5, unsigned int *s6); +int qce_manage_timeout(void *handle, int req_info); #endif /* __CRYPTO_MSM_QCE_H */ diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 427e8b6e3f..b303d239ea 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -36,7 +36,7 @@ #define CRYPTO_SMMU_IOVA_START 0x10000000 #define CRYPTO_SMMU_IOVA_SIZE 0x40000000 -#define CRYPTO_CONFIG_RESET 0xE01EF +#define CRYPTO_CONFIG_RESET 0xE001F #define MAX_SPS_DESC_FIFO_SIZE 0xfff0 #define QCE_MAX_NUM_DSCR 0x200 #define QCE_SECTOR_SIZE 0x200 @@ -84,6 +84,9 @@ static LIST_HEAD(qce50_bam_list); #define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec)) +#define AES_CTR_IV_CTR_SIZE 64 +#define EXPECTED_STATUS1_REG_VAL 0x2000006 + enum qce_owner { QCE_OWNER_NONE = 0, QCE_OWNER_CLIENT = 1, @@ -156,6 +159,8 @@ struct qce_device { struct dma_iommu_mapping *smmu_mapping; bool enable_s1_smmu; bool no_clock_support; + bool kernel_pipes_support; + bool offload_pipes_support; }; static void print_notify_debug(struct sps_event_notify *notify); @@ -175,6 +180,112 @@ static uint32_t _std_init_vector_sha256[] = { 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; +/* + * Requests for offload operations do not require explicit dma operations + * as they already have SMMU mapped source/destination buffers. + */ +static bool is_offload_op(int op) +{ + return (op == QCE_OFFLOAD_HLOS_HLOS || op == QCE_OFFLOAD_HLOS_CPB || + op == QCE_OFFLOAD_CPB_HLOS); +} + +static uint32_t qce_get_config_be(struct qce_device *pce_dev, + uint32_t pipe_pair) +{ + uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1; + + return (beats << CRYPTO_REQ_SIZE | + BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) | + BIT(CRYPTO_MASK_OP_DONE_INTR) | 0 << CRYPTO_HIGH_SPD_EN_N | + pipe_pair << CRYPTO_PIPE_SET_SELECT); +} + +static void dump_status_regs(unsigned int s1, unsigned int s2,unsigned int s3, + unsigned int s4, unsigned int s5,unsigned int s6) +{ + pr_err("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, s1); + pr_err("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, s2); + pr_err("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, s3); + pr_err("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, s4); + pr_err("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, s5); + pr_err("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, s6); +} + +void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2, + unsigned int *s3, unsigned int *s4, + unsigned int *s5, unsigned int *s6) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + *s1 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + *s2 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS2_REG); + *s3 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS3_REG); + *s4 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS4_REG); + *s5 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS5_REG); + *s6 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS6_REG); + +#ifdef QCE_DEBUG + dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6); +#else + if (*s1 != EXPECTED_STATUS1_REG_VAL) + dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6); +#endif + return; +} +EXPORT_SYMBOL(qce_get_crypto_status); + +static void qce_set_offload_config(struct qce_device *pce_dev, + struct qce_req *creq) +{ + uint32_t config_be = pce_dev->reg.crypto_cfg_be; + + switch (creq->offload_op) { + case QCE_OFFLOAD_HLOS_HLOS: + config_be = qce_get_config_be(pce_dev, + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS]); + break; + case QCE_OFFLOAD_HLOS_CPB: + config_be = qce_get_config_be(pce_dev, + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB]); + break; + case QCE_OFFLOAD_CPB_HLOS: + config_be = qce_get_config_be(pce_dev, + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS]); + break; + default: + break; + } + + pce_dev->reg.crypto_cfg_be = config_be; + pce_dev->reg.crypto_cfg_le = (config_be | + CRYPTO_LITTLE_ENDIAN_MASK); + return; +} + +/* + * IV counter mask is be set based on the values sent through the offload ioctl + * calls. Currently for offload operations, it is 64 bytes of mask for AES CTR, + * and 128 bytes of mask for AES CBC. + */ +static void qce_set_iv_ctr_mask(struct qce_device *pce_dev, + struct qce_req *creq) +{ + if (creq->iv_ctr_size == AES_CTR_IV_CTR_SIZE) { + pce_dev->reg.encr_cntr_mask_0 = 0x0; + pce_dev->reg.encr_cntr_mask_1 = 0x0; + pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF; + } else { + pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF; + } + + return; +} + static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b, unsigned int len) { @@ -725,12 +836,21 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, uint32_t ivsize = creq->ivsize; int i; struct sps_command_element *pce = NULL; + bool is_des_cipher = false; if (creq->mode == QCE_MODE_XTS) key_size = creq->encklen/2; else key_size = creq->encklen; + qce_set_offload_config(pce_dev, creq); + + pce = cmdlistinfo->crypto_cfg; + pce->data = pce_dev->reg.crypto_cfg_be; + + pce = cmdlistinfo->crypto_cfg_le; + pce->data = pce_dev->reg.crypto_cfg_le; + pce = cmdlistinfo->go_proc; if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) { use_hw_key = true; @@ -739,7 +859,6 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, QCRYPTO_CTX_USE_PIPE_KEY) use_pipe_key = true; } - pce = cmdlistinfo->go_proc; if (use_hw_key) pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG + pce_dev->phy_iobase); @@ -857,6 +976,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, pce++; pce->data = enckey32[1]; } + is_des_cipher = true; break; case CIPHER_ALG_3DES: if (creq->mode != QCE_MODE_ECB) { @@ -877,6 +997,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, for (i = 0; i < 6; i++, pce++) pce->data = enckey32[i]; } + is_des_cipher = true; break; case CIPHER_ALG_AES: default: @@ -971,6 +1092,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; } + if (use_hw_key) encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR); else @@ -979,10 +1101,14 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, /* write encr seg size */ pce = cmdlistinfo->encr_seg_size; + if (creq->is_copy_op) { + pce->data = 0; + } else { if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) pce->data = (creq->cryptlen + creq->authsize); else pce->data = creq->cryptlen; + } /* write encr seg start */ pce = cmdlistinfo->encr_seg_start; @@ -992,6 +1118,41 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, pce = cmdlistinfo->seg_size; pce->data = totallen_in; + if (is_offload_op(creq->offload_op)) { + /* pattern info */ + pce = cmdlistinfo->pattern_info; + if (creq->is_pattern_valid) + pce->data = creq->pattern_info; + + /* block offset */ + pce = cmdlistinfo->block_offset; + pce->data = (creq->block_offset << 4) | + (creq->block_offset ? 1: 0); + + /* IV counter size */ + qce_set_iv_ctr_mask(pce_dev, creq); + } + + if (!is_des_cipher) { + pce = cmdlistinfo->encr_mask_3; + pce->data = pce_dev->reg.encr_cntr_mask_3; + pce = cmdlistinfo->encr_mask_2; + pce->data = pce_dev->reg.encr_cntr_mask_2; + pce = cmdlistinfo->encr_mask_1; + pce->data = pce_dev->reg.encr_cntr_mask_1; + pce = cmdlistinfo->encr_mask_0; + pce->data = pce_dev->reg.encr_cntr_mask_0; + } + + pce = cmdlistinfo->go_proc; + pce->data = 0; + if (is_offload_op(creq->offload_op)) + pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT)); + else + pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT) | + (1 << CRYPTO_RESULTS_DUMP)); + + return 0; } @@ -1128,11 +1289,11 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; iovec = pce_sps_data->in_transfer.iovec; - pr_info("==============================================\n"); - pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); - pr_info("==============================================\n"); + pr_err("==============================================\n"); + pr_err("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); + pr_err("==============================================\n"); for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) { - pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, + pr_err(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); if (iovec->flags & cmd_flags) { struct sps_command_element *pced; @@ -1141,7 +1302,7 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) (GET_VIRT_ADDR(iovec->addr)); ents = iovec->size/(sizeof(struct sps_command_element)); for (j = 0; j < ents; j++) { - pr_info(" [%d] [0x%x] 0x%x\n", j, + pr_err(" [%d] [0x%x] 0x%x\n", j, pced->addr, pced->data); pced++; } @@ -1149,9 +1310,9 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) iovec++; } - pr_info("==============================================\n"); - pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); - pr_info("==============================================\n"); + pr_err("==============================================\n"); + pr_err("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); + pr_err("==============================================\n"); iovec = pce_sps_data->out_transfer.iovec; for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) { pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, @@ -1567,8 +1728,11 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev, /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); - QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + - CRYPTO_CONFIG_REG)); + qce_set_offload_config(pce_dev, creq); + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, + (pce_dev->iobase + CRYPTO_CONFIG_REG)); + QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, + (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* * Ensure previous instructions (setting the CONFIG register) * was completed before issuing starting to set other config register @@ -1837,25 +2001,34 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG); } + /* write pattern */ + if (creq->is_pattern_valid) + QCE_WRITE_REG(creq->pattern_info, pce_dev->iobase + + CRYPTO_DATA_PATT_PROC_CFG_REG); + + /* write block offset to CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG? */ + QCE_WRITE_REG(((creq->block_offset << 4) | + (creq->block_offset ? 1 : 0)), + pce_dev->iobase + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG); + /* write encr seg start */ QCE_WRITE_REG((coffset & 0xffff), pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG); /* write encr counter mask */ - QCE_WRITE_REG(0xffffffff, + qce_set_iv_ctr_mask(pce_dev, creq); + QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_3, pce_dev->iobase + CRYPTO_CNTR_MASK_REG); - QCE_WRITE_REG(0xffffffff, - pce_dev->iobase + CRYPTO_CNTR_MASK_REG0); - QCE_WRITE_REG(0xffffffff, - pce_dev->iobase + CRYPTO_CNTR_MASK_REG1); - QCE_WRITE_REG(0xffffffff, + QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_2, pce_dev->iobase + CRYPTO_CNTR_MASK_REG2); + QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_1, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG1); + QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_0, + pce_dev->iobase + CRYPTO_CNTR_MASK_REG0); /* write seg size */ QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); - QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase + - CRYPTO_CONFIG_REG)); /* issue go to crypto */ if (!use_hw_key) { QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -2042,11 +2215,12 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) int rc = 0; struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info [req_info].ce_sps; + uint16_t op = pce_dev->ce_request_info[req_info].offload_op; if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr) return rc; - rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe, + rc = sps_transfer_one(pce_dev->ce_bam_info.consumer[op].pipe, GET_PHYS_ADDR( pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist), 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK)); @@ -2060,6 +2234,34 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, bool is_complete); +int qce_manage_timeout(void *handle, int req_info) +{ + int rc = 0; + struct qce_device *pce_dev = (struct qce_device *) handle; + struct skcipher_request *areq; + struct ce_request_info *preq_info; + qce_comp_func_ptr_t qce_callback; + uint16_t op = pce_dev->ce_request_info[req_info].offload_op; + + preq_info = &pce_dev->ce_request_info[req_info]; + qce_callback = preq_info->qce_cb; + areq = (struct skcipher_request *) preq_info->areq; + + pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info, op); + rc = _qce_unlock_other_pipes(pce_dev, req_info); + if (rc) + pr_err("%s: fail unlock other pipes, rc = %d", __func__, rc); + qce_free_req_info(pce_dev, req_info, true); + qce_callback(areq, NULL, NULL, 0); + sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, + pce_dev->ce_bam_info.dest_pipe_index[op]); + sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, + pce_dev->ce_bam_info.src_pipe_index[op]); + + return rc; +} +EXPORT_SYMBOL(qce_manage_timeout); + static int _aead_complete(struct qce_device *pce_dev, int req_info) { struct aead_request *areq; @@ -2260,13 +2462,16 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) pce_sps_data = &preq_info->ce_sps; qce_callback = preq_info->qce_cb; areq = (struct skcipher_request *) preq_info->areq; - if (areq->src != areq->dst) { - qce_dma_unmap_sg(pce_dev->pdev, areq->dst, - preq_info->dst_nents, DMA_FROM_DEVICE); - } - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + + if (!is_offload_op(preq_info->offload_op)) { + if (areq->src != areq->dst) + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, + preq_info->dst_nents, DMA_FROM_DEVICE); + qce_dma_unmap_sg(pce_dev->pdev, areq->src, + preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + } if (_qce_unlock_other_pipes(pce_dev, req_info)) { qce_free_req_info(pce_dev, req_info, true); @@ -2276,12 +2481,16 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) result_dump_status = be32_to_cpu(pce_sps_data->result->status); pce_sps_data->result->status = 0; - if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) - | (1 << CRYPTO_HSD_ERR))) { - pr_err("ablk_cipher operation error. Status %x\n", + if (!is_offload_op(preq_info->offload_op)) { + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | + (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { + pr_err("ablk_cipher operation error. Status %x\n", result_dump_status); - result_status = -ENXIO; - } else if (pce_sps_data->consumer_status | + result_status = -ENXIO; + } + } + + if (pce_sps_data->consumer_status | pce_sps_data->producer_status) { pr_err("ablk_cipher sps operation error. sps status %x %x\n", pce_sps_data->consumer_status, @@ -2579,6 +2788,7 @@ static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info) { int rc = 0; struct ce_sps_data *pce_sps_data; + uint16_t op = pce_dev->ce_request_info[req_info].offload_op; pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; pce_sps_data->out_transfer.user = @@ -2590,20 +2800,20 @@ static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info) _qce_dump_descr_fifos_dbg(pce_dev, req_info); if (pce_sps_data->in_transfer.iovec_count) { - rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe, + rc = sps_transfer(pce_dev->ce_bam_info.consumer[op].pipe, &pce_sps_data->in_transfer); if (rc) { - pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.consumer.pipe, + pr_err("sps_xfr() fail (cons pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_bam_info.consumer[op].pipe, rc); goto ret; } } - rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, + rc = sps_transfer(pce_dev->ce_bam_info.producer[op].pipe, &pce_sps_data->out_transfer); if (rc) pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc); + (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc); ret: if (rc) _qce_dump_descr_fifos(pce_dev, req_info); @@ -2625,6 +2835,7 @@ ret: * * @pce_dev - Pointer to qce_device structure * @ep - Pointer to sps endpoint data structure + * @index - Points to crypto use case * @is_produce - 1 means Producer endpoint * 0 means Consumer endpoint * @@ -2633,6 +2844,7 @@ ret: */ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, struct qce_sps_ep_conn_data *ep, + int index, bool is_producer) { int rc = 0; @@ -2686,12 +2898,13 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, /* Producer pipe index */ sps_connect_info->src_pipe_index = - pce_dev->ce_bam_info.src_pipe_index; + pce_dev->ce_bam_info.src_pipe_index[index]; /* Consumer pipe index */ sps_connect_info->dest_pipe_index = - pce_dev->ce_bam_info.dest_pipe_index; + pce_dev->ce_bam_info.dest_pipe_index[index]; /* Set pipe group */ - sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index; + sps_connect_info->lock_group = + pce_dev->ce_bam_info.pipe_pair_index[index]; sps_connect_info->event_thresh = 0x10; /* * Max. no of scatter/gather buffers that can @@ -2941,7 +3154,7 @@ ret: */ static int qce_sps_init(struct qce_device *pce_dev) { - int rc = 0; + int rc = 0, i = 0; rc = qce_sps_get_bam(pce_dev); if (rc) @@ -2949,14 +3162,20 @@ static int qce_sps_init(struct qce_device *pce_dev) pr_debug("BAM device registered. bam_handle=0x%lx\n", pce_dev->ce_bam_info.bam_handle); - rc = qce_sps_init_ep_conn(pce_dev, - &pce_dev->ce_bam_info.producer, true); - if (rc) - goto sps_connect_producer_err; - rc = qce_sps_init_ep_conn(pce_dev, - &pce_dev->ce_bam_info.consumer, false); - if (rc) - goto sps_connect_consumer_err; + for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { + if (i == 0 && !(pce_dev->kernel_pipes_support)) + continue; + else if ((i > 0) && !(pce_dev->offload_pipes_support)) + break; + rc = qce_sps_init_ep_conn(pce_dev, + &pce_dev->ce_bam_info.producer[i], i, true); + if (rc) + goto sps_connect_producer_err; + rc = qce_sps_init_ep_conn(pce_dev, + &pce_dev->ce_bam_info.consumer[i], i, false); + if (rc) + goto sps_connect_consumer_err; + } pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n", (unsigned long long)pce_dev->ce_bam_info.bam_mem, @@ -2964,7 +3183,7 @@ static int qce_sps_init(struct qce_device *pce_dev) return rc; sps_connect_consumer_err: - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer[i]); sps_connect_producer_err: qce_sps_release_bam(pce_dev); return rc; @@ -3124,6 +3343,7 @@ static void _sps_producer_callback(struct sps_event_notify *notify) unsigned int req_info; struct ce_sps_data *pce_sps_data; struct ce_request_info *preq_info; + uint16_t op; print_notify_debug(notify); @@ -3140,24 +3360,29 @@ static void _sps_producer_callback(struct sps_event_notify *notify) } preq_info = &pce_dev->ce_request_info[req_info]; + op = pce_dev->ce_request_info[req_info].offload_op; pce_sps_data = &preq_info->ce_sps; if ((preq_info->xfer_type == QCE_XFER_CIPHERING || preq_info->xfer_type == QCE_XFER_AEAD) && pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) { pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; - pce_sps_data->out_transfer.iovec_count = 0; - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + if (!is_offload_op(op)) { + pce_sps_data->out_transfer.iovec_count = 0; + _qce_sps_add_data(GET_PHYS_ADDR( + pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); - _qce_set_flag(&pce_sps_data->out_transfer, + _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); - rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, - &pce_sps_data->out_transfer); - if (rc) { - pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.producer.pipe, + rc = sps_transfer( + pce_dev->ce_bam_info.producer[op].pipe, + &pce_sps_data->out_transfer); + if (rc) { + pr_err("sps_xfr fail (prod pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc); + } } return; } @@ -3179,8 +3404,18 @@ static void _sps_producer_callback(struct sps_event_notify *notify) */ static void qce_sps_exit(struct qce_device *pce_dev) { - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer); - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); + int i = 0; + + for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { + if (i == 0 && !(pce_dev->kernel_pipes_support)) + continue; + else if ((i > 0) && !(pce_dev->offload_pipes_support)) + break; + qce_sps_exit_ep_conn(pce_dev, + &pce_dev->ce_bam_info.consumer[i]); + qce_sps_exit_ep_conn(pce_dev, + &pce_dev->ce_bam_info.producer[i]); + } qce_sps_release_bam(pce_dev); } @@ -3301,6 +3536,11 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, /* clear status register */ qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS2_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS3_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS4_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS5_REG, 0, NULL); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS6_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg); @@ -3314,15 +3554,20 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, - (uint32_t)0xffffffff, &pcl_info->encr_mask); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0, - (uint32_t)0xffffffff, NULL); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1, - (uint32_t)0xffffffff, NULL); + pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2, - (uint32_t)0xffffffff, NULL); + pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1, + pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0, + pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, &pcl_info->auth_seg_cfg); + qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_DATA_PATT_PROC_CFG_REG, 0, + &pcl_info->pattern_info); + qce_add_cmd_element(pdev, &ce_vaddr, + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG, 0, + &pcl_info->block_offset); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0, &pcl_info->encr_key); for (i = 1; i < key_reg; i++) @@ -3359,7 +3604,7 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, 0, &pcl_info->auth_seg_size); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -3473,7 +3718,7 @@ static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index, } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -3695,7 +3940,7 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index, 0, NULL); } qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -3908,7 +4153,7 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev, &pcl_info->auth_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -3980,13 +4225,13 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, &pcl_info->encr_seg_start); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG, - (uint32_t)0xffffffff, &pcl_info->encr_mask); + pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0, - (uint32_t)0xffffffff, NULL); + pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1, - (uint32_t)0xffffffff, NULL); + pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2, - (uint32_t)0xffffffff, NULL); + pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, auth_cfg, &pcl_info->auth_seg_cfg); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0, @@ -4041,7 +4286,7 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -4127,7 +4372,7 @@ static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index, qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -4209,7 +4454,7 @@ static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index, qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, - pdev->reg.crypto_cfg_le, NULL); + pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le); qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | @@ -4396,13 +4641,9 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev) static int qce_init_ce_cfg_val(struct qce_device *pce_dev) { - uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1; - uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index; + uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index[0]; - pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) | - BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) | - BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) | - (pipe_pair << CRYPTO_PIPE_SET_SELECT); + pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair); pce_dev->reg.crypto_cfg_le = (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK); @@ -4565,6 +4806,13 @@ static int qce_init_ce_cfg_val(struct qce_device *pce_dev) pce_dev->reg.auth_cfg_snow3g = (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) | BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST); + + /* Initialize IV counter mask values */ + pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF; + pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF; + return 0; } @@ -4701,6 +4949,7 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + q_req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; @@ -4880,15 +5129,22 @@ static int _qce_suspend(void *handle) { struct qce_device *pce_dev = (struct qce_device *)handle; struct sps_pipe *sps_pipe_info; + int i = 0; if (handle == NULL) return -ENODEV; - sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; - sps_disconnect(sps_pipe_info); + for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { + if (i == 0 && !(pce_dev->kernel_pipes_support)) + continue; + else if ((i > 0) && !(pce_dev->offload_pipes_support)) + break; + sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe; + sps_disconnect(sps_pipe_info); - sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; - sps_disconnect(sps_pipe_info); + sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe; + sps_disconnect(sps_pipe_info); + } return 0; } @@ -4898,32 +5154,41 @@ static int _qce_resume(void *handle) struct qce_device *pce_dev = (struct qce_device *)handle; struct sps_pipe *sps_pipe_info; struct sps_connect *sps_connect_info; - int rc; + int rc, i; if (handle == NULL) return -ENODEV; - sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; - sps_connect_info = &pce_dev->ce_bam_info.consumer.connect; - memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); - rc = sps_connect(sps_pipe_info, sps_connect_info); - if (rc) { - pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", + for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { + if (i == 0 && !(pce_dev->kernel_pipes_support)) + continue; + else if ((i > 0) && !(pce_dev->offload_pipes_support)) + break; + sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe; + sps_connect_info = &pce_dev->ce_bam_info.consumer[i].connect; + memset(sps_connect_info->desc.base, 0x00, + sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n", (uintptr_t)sps_pipe_info, rc); - return rc; - } - sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; - sps_connect_info = &pce_dev->ce_bam_info.producer.connect; - memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); - rc = sps_connect(sps_pipe_info, sps_connect_info); - if (rc) - pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", + return rc; + } + sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe; + sps_connect_info = &pce_dev->ce_bam_info.producer[i].connect; + memset(sps_connect_info->desc.base, 0x00, + sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) + pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n", (uintptr_t)sps_pipe_info, rc); - rc = sps_register_event(sps_pipe_info, - &pce_dev->ce_bam_info.producer.event); - if (rc) - pr_err("Producer callback registration failed rc = %d\n", rc); + rc = sps_register_event(sps_pipe_info, + &pce_dev->ce_bam_info.producer[i].event); + if (rc) + pr_err("Producer cb registration failed rc = %d\n", + rc); + } return rc; } @@ -4951,6 +5216,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req) req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + q_req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; areq = (struct aead_request *) q_req->areq; @@ -5124,6 +5390,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + c_req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; @@ -5133,12 +5400,16 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) /* cipher input */ preq_info->src_nents = count_sg(areq->src, areq->cryptlen); - qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, - (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : - DMA_TO_DEVICE); + if (!is_offload_op(c_req->offload_op)) + qce_dma_map_sg(pce_dev->pdev, areq->src, + preq_info->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + /* cipher output */ if (areq->src != areq->dst) { preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen); + if (!is_offload_op(c_req->offload_op)) qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, DMA_FROM_DEVICE); } else { @@ -5172,6 +5443,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) goto bad; preq_info->mode = c_req->mode; + preq_info->offload_op = c_req->offload_op; /* setup for client callback, and issue command to BAM */ preq_info->areq = areq; @@ -5185,7 +5457,7 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) if (pce_dev->support_cmd_dscr && cmdlistinfo) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, &pce_sps_data->in_transfer); - if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen, + if (_qce_sps_add_data(areq->src->dma_address, areq->cryptlen, &pce_sps_data->in_transfer)) goto bad; _qce_set_flag(&pce_sps_data->in_transfer, @@ -5196,16 +5468,17 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); - if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->cryptlen, + if (_qce_sps_add_data(areq->dst->dma_address, areq->cryptlen, &pce_sps_data->out_transfer)) goto bad; if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) { pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; - if (_qce_sps_add_data( + if (!is_offload_op(c_req->offload_op)) + if (_qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer)) - goto bad; + goto bad; } else { pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; } @@ -5218,18 +5491,19 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) return 0; bad: - if (areq->src != areq->dst) { - if (preq_info->dst_nents) { - qce_dma_unmap_sg(pce_dev->pdev, areq->dst, - preq_info->dst_nents, DMA_FROM_DEVICE); - } - } - if (preq_info->src_nents) { - qce_dma_unmap_sg(pce_dev->pdev, areq->src, + if (!is_offload_op(c_req->offload_op)) { + if (areq->src != areq->dst) + if (preq_info->dst_nents) + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, + preq_info->dst_nents, DMA_FROM_DEVICE); + + if (preq_info->src_nents) + qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } + qce_free_req_info(pce_dev, req_info, false); return rc; } @@ -5257,6 +5531,7 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) return -EBUSY; } + sreq->current_req_info = req_info; areq = (struct ahash_request *)sreq->areq; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; @@ -5349,6 +5624,7 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; @@ -5472,6 +5748,7 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; @@ -5579,6 +5856,7 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, req_info = qce_alloc_req_info(pce_dev); if (req_info < 0) return -EBUSY; + req->current_req_info = req_info; preq_info = &pce_dev->ce_request_info[req_info]; pce_sps_data = &preq_info->ce_sps; switch (req->algorithm) { @@ -5648,7 +5926,7 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, struct qce_device *pce_dev) { struct resource *resource; - int rc = 0; + int rc = 0, i = 0; pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node, "qcom,ce-hw-shared"); @@ -5680,12 +5958,39 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, pce_dev->request_bw_before_clk = of_property_read_bool( (&pdev->dev)->of_node, "qcom,request-bw-before-clk"); + pce_dev->kernel_pipes_support = true; if (of_property_read_u32((&pdev->dev)->of_node, "qcom,bam-pipe-pair", - &pce_dev->ce_bam_info.pipe_pair_index)) { - pr_err("Fail to get bam pipe pair information.\n"); - return -EINVAL; + &pce_dev->ce_bam_info.pipe_pair_index[0])) { + pr_warn("Kernel pipes not supported.\n"); + //Unused pipe, just as failsafe. + pce_dev->ce_bam_info.pipe_pair_index[0] = 2; + pce_dev->kernel_pipes_support = false; } + + if (of_property_read_bool((&pdev->dev)->of_node, + "qcom,offload-ops-support")) { + pce_dev->offload_pipes_support = true; + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,bam-pipe-offload-cpb-hlos", + &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS])) { + pr_err("Fail to get bam offload cpb-hlos pipe pair info.\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,bam-pipe-offload-hlos-hlos", + &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS])) { + pr_err("Fail to get bam offload hlos-hlos info.\n"); + return -EINVAL; + } + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,bam-pipe-offload-hlos-cpb", + &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB])) { + pr_err("Fail to get bam offload hlos-cpb info\n"); + return -EINVAL; + } + } + if (of_property_read_u32((&pdev->dev)->of_node, "qcom,ce-device", &pce_dev->ce_bam_info.ce_device)) { @@ -5717,10 +6022,13 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node, "qcom,no-clock-support"); - pce_dev->ce_bam_info.dest_pipe_index = - 2 * pce_dev->ce_bam_info.pipe_pair_index; - pce_dev->ce_bam_info.src_pipe_index = - pce_dev->ce_bam_info.dest_pipe_index + 1; + for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { + /* Source/destination pipes for all usecases */ + pce_dev->ce_bam_info.dest_pipe_index[i] = + 2 * pce_dev->ce_bam_info.pipe_pair_index[i]; + pce_dev->ce_bam_info.src_pipe_index[i] = + pce_dev->ce_bam_info.dest_pipe_index[i] + 1; + } resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crypto-base"); @@ -6055,9 +6363,9 @@ void *qce_open(struct platform_device *pdev, int *rc) qce_init_ce_cfg_val(pce_dev); *rc = qce_sps_init(pce_dev); - if (*rc == 0) - qce_setup_ce_sps_data(pce_dev); - *rc = 0; + if (*rc) + goto err; + qce_setup_ce_sps_data(pce_dev); qce_disable_clk(pce_dev); setup_dummy_req(pce_dev); atomic_set(&pce_dev->no_of_queued_req, 0); diff --git a/crypto-qti/qce50.h b/crypto-qti/qce50.h index f1e9b6827d..e679df095c 100644 --- a/crypto-qti/qce50.h +++ b/crypto-qti/qce50.h @@ -71,6 +71,7 @@ struct qce_cmdlist_info { unsigned long cmdlist; struct sps_command_element *crypto_cfg; + struct sps_command_element *crypto_cfg_le; struct sps_command_element *encr_seg_cfg; struct sps_command_element *encr_seg_size; struct sps_command_element *encr_seg_start; @@ -78,8 +79,13 @@ struct qce_cmdlist_info { struct sps_command_element *encr_xts_key; struct sps_command_element *encr_cntr_iv; struct sps_command_element *encr_ccm_cntr_iv; - struct sps_command_element *encr_mask; + struct sps_command_element *encr_mask_0; + struct sps_command_element *encr_mask_1; + struct sps_command_element *encr_mask_2; + struct sps_command_element *encr_mask_3; struct sps_command_element *encr_xts_du_size; + struct sps_command_element *pattern_info; + struct sps_command_element *block_offset; struct sps_command_element *auth_seg_cfg; struct sps_command_element *auth_seg_size; @@ -170,6 +176,15 @@ struct qce_ce_cfg_reg_setting { uint32_t auth_cfg_aead_sha256_hmac; uint32_t auth_cfg_kasumi; uint32_t auth_cfg_snow3g; + + /* iv0 - bits 127:96 - CRYPTO_CNTR_MASK_REG0*/ + uint32_t encr_cntr_mask_0; + /* iv1 - bits 95:64 - CRYPTO_CNTR_MASK_REG1*/ + uint32_t encr_cntr_mask_1; + /* iv2 - bits 63:32 - CRYPTO_CNTR_MASK_REG2*/ + uint32_t encr_cntr_mask_2; + /* iv3 - bits 31:0 - CRYPTO_CNTR_MASK_REG*/ + uint32_t encr_cntr_mask_3; }; struct ce_bam_info { @@ -179,14 +194,14 @@ struct ce_bam_info { uint32_t ce_device; uint32_t ce_hw_instance; uint32_t bam_ee; - unsigned int pipe_pair_index; - unsigned int src_pipe_index; - unsigned int dest_pipe_index; + unsigned int pipe_pair_index[QCE_OFFLOAD_OPER_LAST]; + unsigned int src_pipe_index[QCE_OFFLOAD_OPER_LAST]; + unsigned int dest_pipe_index[QCE_OFFLOAD_OPER_LAST]; unsigned long bam_handle; int ce_burst_size; uint32_t minor_version; - struct qce_sps_ep_conn_data producer; - struct qce_sps_ep_conn_data consumer; + struct qce_sps_ep_conn_data producer[QCE_OFFLOAD_OPER_LAST]; + struct qce_sps_ep_conn_data consumer[QCE_OFFLOAD_OPER_LAST]; }; /* SPS data structure with buffers, commandlists & commmand pointer lists */ @@ -227,6 +242,7 @@ struct ce_request_info { dma_addr_t phy_ota_dst; unsigned int ota_size; unsigned int req_len; + unsigned int offload_op; }; struct qce_driver_stats { diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 9280d500c6..660b054df1 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -34,8 +34,11 @@ #include -#define CACHE_LINE_SIZE 32 +#define CACHE_LINE_SIZE 64 #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE +#define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024) +/* Max wait time once a crypt o request is done */ +#define MAX_CRYPTO_WAIT_TIME 1500 static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, @@ -50,6 +53,13 @@ static uint8_t _std_init_vector_sha256_uint8[] = { 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 }; +#define QCEDEV_CTX_KEY_MASK 0x000000ff +#define QCEDEV_CTX_USE_HW_KEY 0x00000001 +#define QCEDEV_CTX_USE_PIPE_KEY 0x00000002 + +#define QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK 0x000000FF +#define QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK 0x00000003 + static DEFINE_MUTEX(send_cmd_lock); static DEFINE_MUTEX(qcedev_sent_bw_req); static DEFINE_MUTEX(hash_access_lock); @@ -184,8 +194,12 @@ exit_unlock_mutex: static int qcedev_open(struct inode *inode, struct file *file); static int qcedev_release(struct inode *inode, struct file *file); -static int start_cipher_req(struct qcedev_control *podev); -static int start_sha_req(struct qcedev_control *podev); +static int start_cipher_req(struct qcedev_control *podev, + int *current_req_info); +static int start_offload_cipher_req(struct qcedev_control *podev, + int *current_req_info); +static int start_sha_req(struct qcedev_control *podev, + int *current_req_info); static const struct file_operations qcedev_fops = { .owner = THIS_MODULE, @@ -283,6 +297,7 @@ static void req_done(unsigned long data) unsigned long flags = 0; struct qcedev_async_req *new_req = NULL; int ret = 0; + int current_req_info = 0; spin_lock_irqsave(&podev->lock, flags); areq = podev->active_command; @@ -296,9 +311,11 @@ again: podev->active_command = new_req; new_req->err = 0; if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER) - ret = start_cipher_req(podev); + ret = start_cipher_req(podev, ¤t_req_info); + else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) + ret = start_offload_cipher_req(podev, ¤t_req_info); else - ret = start_sha_req(podev); + ret = start_sha_req(podev, ¤t_req_info); } spin_unlock_irqrestore(&podev->lock, flags); @@ -361,7 +378,8 @@ void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, tasklet_schedule(&podev->done_tasklet); }; -static int start_cipher_req(struct qcedev_control *podev) +static int start_cipher_req(struct qcedev_control *podev, + int *current_req_info) { struct qcedev_async_req *qcedev_areq; struct qce_req creq; @@ -454,16 +472,125 @@ static int start_cipher_req(struct qcedev_control *podev) creq.qce_cb = qcedev_cipher_req_cb; creq.areq = (void *)&qcedev_areq->cipher_req; creq.flags = 0; + creq.offload_op = 0; ret = qce_ablk_cipher_req(podev->qce, &creq); + *current_req_info = creq.current_req_info; unsupported: - if (ret) - qcedev_areq->err = -ENXIO; - else - qcedev_areq->err = 0; + qcedev_areq->err = ret ? -ENXIO : 0 + return ret; }; -static int start_sha_req(struct qcedev_control *podev) +void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv, + unsigned char *iv, int ret) +{ + struct qcedev_cipher_req *areq; + struct qcedev_handle *handle; + struct qcedev_control *podev; + struct qcedev_async_req *qcedev_areq; + + areq = (struct qcedev_cipher_req *) cookie; + handle = (struct qcedev_handle *) areq->cookie; + podev = handle->cntl; + qcedev_areq = podev->active_command; + + if (iv) + memcpy(&qcedev_areq->offload_cipher_op_req.iv[0], iv, + qcedev_areq->offload_cipher_op_req.ivlen); + + tasklet_schedule(&podev->done_tasklet); +} + +static int start_offload_cipher_req(struct qcedev_control *podev, + int *current_req_info) +{ + struct qcedev_async_req *qcedev_areq; + struct qce_req creq; + u8 patt_sz = 0, proc_data_sz = 0; + int ret = 0; + + /* Start the command on the podev->active_command */ + qcedev_areq = podev->active_command; + qcedev_areq->cipher_req.cookie = qcedev_areq->handle; + + switch (qcedev_areq->offload_cipher_op_req.alg) { + case QCEDEV_ALG_AES: + creq.alg = CIPHER_ALG_AES; + break; + default: + return -EINVAL; + } + + switch (qcedev_areq->offload_cipher_op_req.mode) { + case QCEDEV_AES_MODE_CBC: + creq.mode = QCE_MODE_CBC; + break; + case QCEDEV_AES_MODE_CTR: + creq.mode = QCE_MODE_CTR; + break; + default: + return -EINVAL; + } + + if (qcedev_areq->offload_cipher_op_req.is_copy_op) { + creq.dir = QCE_ENCRYPT; + } else { + switch(qcedev_areq->offload_cipher_op_req.op) { + case QCEDEV_OFFLOAD_HLOS_HLOS: + case QCEDEV_OFFLOAD_HLOS_CPB: + creq.dir = QCE_DECRYPT; + break; + case QCEDEV_OFFLOAD_CPB_HLOS: + creq.dir = QCE_ENCRYPT; + break; + default: + return -EINVAL; + } + } + + creq.iv = &qcedev_areq->offload_cipher_op_req.iv[0]; + creq.ivsize = qcedev_areq->offload_cipher_op_req.ivlen; + creq.iv_ctr_size = qcedev_areq->offload_cipher_op_req.iv_ctr_size; + + creq.encklen = qcedev_areq->offload_cipher_op_req.encklen; + + /* OFFLOAD use cases use PIPE keys so no need to set keys */ + creq.flags = QCEDEV_CTX_USE_PIPE_KEY; + creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY; + creq.offload_op = (int)qcedev_areq->offload_cipher_op_req.op; + if (qcedev_areq->offload_cipher_op_req.is_copy_op) + creq.is_copy_op = true; + + creq.cryptlen = qcedev_areq->offload_cipher_op_req.data_len; + + creq.qce_cb = qcedev_offload_cipher_req_cb; + creq.areq = (void *)&qcedev_areq->cipher_req; + + patt_sz = qcedev_areq->offload_cipher_op_req.pattern_info.patt_sz; + proc_data_sz = + qcedev_areq->offload_cipher_op_req.pattern_info.proc_data_sz; + creq.is_pattern_valid = + qcedev_areq->offload_cipher_op_req.is_pattern_valid; + if (creq.is_pattern_valid) { + creq.pattern_info = 0x1; + if (patt_sz) + creq.pattern_info |= (patt_sz - 1) << 4; + if (proc_data_sz) + creq.pattern_info |= (proc_data_sz - 1) << 8; + creq.pattern_info |= + qcedev_areq->offload_cipher_op_req.pattern_info.patt_offset << 12; + } + creq.block_offset = qcedev_areq->offload_cipher_op_req.block_offset; + ret = qce_ablk_cipher_req(podev->qce, &creq); + + *current_req_info = creq.current_req_info; + qcedev_areq->err = ret ? -ENXIO : 0 + + return ret; +} + +static int start_sha_req(struct qcedev_control *podev, + int *current_req_info) { struct qcedev_async_req *qcedev_areq; struct qce_sha_req sreq; @@ -532,13 +659,37 @@ static int start_sha_req(struct qcedev_control *podev) ret = qce_process_sha_req(podev->qce, &sreq); - if (ret) - qcedev_areq->err = -ENXIO; - else - qcedev_areq->err = 0; + *current_req_info = sreq.current_req_info; + qcedev_areq->err = ret ? -ENXIO : 0 + return ret; }; +static void qcedev_check_crypto_status( + struct qcedev_async_req *qcedev_areq, void *handle, + bool print_err) +{ + unsigned int s1, s2, s3, s4, s5, s6; + + qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR; + qce_get_crypto_status(handle, &s1, &s2, &s3, &s4, &s5, &s6); + + if (print_err) { + pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, + s1, s2, s3, s4, s5, s6); + } + if ((s6 & QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK) || + (s3 & QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK)) { + pr_info("%s: crypto timer expired\n", __func__); + pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, + s1, s2, s3, s4, s5, s6); + qcedev_areq->offload_cipher_op_req.err = + QCEDEV_OFFLOAD_TIMER_ERROR; + } + + return; +} + static int submit_req(struct qcedev_async_req *qcedev_areq, struct qcedev_handle *handle) { @@ -546,18 +697,27 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, unsigned long flags = 0; int ret = 0; struct qcedev_stat *pstat; + int current_req_info = 0; + int wait = 0; + bool print_sts = false; qcedev_areq->err = 0; podev = handle->cntl; + qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); + if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) + return 0; + spin_lock_irqsave(&podev->lock, flags); if (podev->active_command == NULL) { podev->active_command = qcedev_areq; if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) - ret = start_cipher_req(podev); + ret = start_cipher_req(podev, ¤t_req_info); + else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) + ret = start_offload_cipher_req(podev, ¤t_req_info); else - ret = start_sha_req(podev); + ret = start_sha_req(podev, ¤t_req_info); } else { list_add_tail(&qcedev_areq->list, &podev->ready_commands); } @@ -568,11 +728,30 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, spin_unlock_irqrestore(&podev->lock, flags); if (ret == 0) - wait_for_completion(&qcedev_areq->complete); + wait = wait_for_completion_timeout(&qcedev_areq->complete, + msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME)); + + if (!wait) { + /* + * This means wait timed out, and the callback routine was not + * exercised. The callback sequence does some housekeeping which + * would be missed here, hence having a call to qce here to do + * that. + */ + pr_err("%s: wait timed out, req info = %d\n", __func__, + current_req_info); + print_sts = true; + qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); + qce_manage_timeout(podev->qce, current_req_info); + } if (ret) qcedev_areq->err = -EIO; + qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); + if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) + return 0; + pstat = &_qcedev_stat; if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) { switch (qcedev_areq->cipher_op_req.op) { @@ -591,6 +770,8 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, default: break; } + } else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) { + //Do nothing } else { if (qcedev_areq->err) pstat->qcedev_sha_fail++; @@ -1417,6 +1598,72 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq, } +static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int i = 0; + int err = 0; + size_t byteoffset = 0; + size_t transfer_data_len = 0; + size_t pending_data_len = 0; + size_t max_data_xfer = MAX_CEHW_REQ_TRANSFER_SIZE - byteoffset; + uint8_t *user_src = NULL; + uint8_t *user_dst = NULL; + struct scatterlist sg_src; + struct scatterlist sg_dst; + + if (areq->offload_cipher_op_req.mode == QCEDEV_AES_MODE_CTR) + byteoffset = areq->offload_cipher_op_req.byteoffset; + + /* + * areq has two components: + * a) Request that comes from userspace i.e. offload_cipher_op_req + * b) Request that QCE understands - skcipher i.e. cipher_req.creq + * skcipher has sglist pointers src and dest that would carry + * data to/from CE. + */ + areq->cipher_req.creq.src = &sg_src; + areq->cipher_req.creq.dst = &sg_dst; + sg_init_table(&sg_src, 1); + sg_init_table(&sg_dst, 1); + + for (i = 0; i < areq->offload_cipher_op_req.entries; i++) { + transfer_data_len = 0; + pending_data_len = areq->offload_cipher_op_req.vbuf.src[i].len; + user_src = areq->offload_cipher_op_req.vbuf.src[i].vaddr; + user_src += byteoffset; + + user_dst = areq->offload_cipher_op_req.vbuf.dst[i].vaddr; + user_dst += byteoffset; + + areq->cipher_req.creq.iv = areq->offload_cipher_op_req.iv; + + while (pending_data_len) { + transfer_data_len = min(max_data_xfer, + pending_data_len); + sg_src.dma_address = (dma_addr_t)user_src; + sg_dst.dma_address = (dma_addr_t)user_dst; + areq->cipher_req.creq.cryptlen = transfer_data_len; + + sg_src.length = transfer_data_len; + sg_dst.length = transfer_data_len; + + err = submit_req(areq, handle); + if (err) { + pr_err("%s: Error processing req, err = %d\n", + __func__, err); + goto exit; + } + /* update data len to be processed */ + pending_data_len -= transfer_data_len; + user_src += transfer_data_len; + user_dst += transfer_data_len; + } + } +exit: + return err; +} + static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req, struct qcedev_control *podev) { @@ -1663,6 +1910,138 @@ sha_error: return -EINVAL; } +static int qcedev_check_offload_cipher_key(struct qcedev_offload_cipher_op_req *req, + struct qcedev_control *podev) +{ + if (req->encklen == 0) + return -EINVAL; + + /* AES-192 is not a valid option for OFFLOAD use case */ + if ((req->encklen != QCEDEV_AES_KEY_128) && + (req->encklen != QCEDEV_AES_KEY_256)) { + pr_err("%s: unsupported key size %d\n", + __func__, req->encklen); + goto error; + } + + return 0; +error: + return -EINVAL; +} + +static int qcedev_check_offload_cipher_params(struct qcedev_offload_cipher_op_req *req, + struct qcedev_control *podev) +{ + uint32_t total = 0; + int i = 0; + + if ((req->entries == 0) || (req->data_len == 0) || + (req->entries > QCEDEV_MAX_BUFFERS)) { + pr_err("%s: Invalid cipher length/entries\n", __func__); + goto error; + } + + if ((req->alg != QCEDEV_ALG_AES) || + (req->mode > QCEDEV_AES_MODE_CTR)) { + pr_err("%s: Invalid algorithm %d\n", __func__, + (uint32_t)req->alg); + goto error; + } + + if (qcedev_check_offload_cipher_key(req, podev)) + goto error; + + if (req->block_offset >= AES_CE_BLOCK_SIZE) + goto error; + + /* if using a byteoffset, make sure it is CTR mode using vbuf */ + if (req->byteoffset) { + if (req->mode != QCEDEV_AES_MODE_CTR) { + pr_err("%s: Operation on byte offset not supported\n", + __func__); + goto error; + } + if (req->byteoffset >= AES_CE_BLOCK_SIZE) { + pr_err("%s: Invalid byte offset\n", __func__); + goto error; + } + total = req->byteoffset; + for (i = 0; i < req->entries; i++) { + if (total > U32_MAX - req->vbuf.src[i].len) { + pr_err("%s:Int overflow on total src len\n", + __func__); + goto error; + } + total += req->vbuf.src[i].len; + } + } + + if (req->data_len < req->byteoffset) { + pr_err("%s: req data length %u is less than byteoffset %u\n", + __func__, req->data_len, req->byteoffset); + goto error; + } + + /* Ensure IV size */ + if (req->ivlen > QCEDEV_MAX_IV_SIZE) { + pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen); + goto error; + } + + /* Ensure Key size */ + if (req->encklen > QCEDEV_MAX_KEY_SIZE) { + pr_err("%s: Klen is not correct: %u\n", __func__, + req->encklen); + goto error; + } + + /* Check for sum of all dst length is equal to data_len */ + for (i = 0, total = 0; i < req->entries; i++) { + if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) { + pr_err("%s: NULL req dst vbuf[%d] with length %d\n", + __func__, i, req->vbuf.dst[i].len); + goto error; + } + if (req->vbuf.dst[i].len >= U32_MAX - total) { + pr_err("%s: Int overflow on total req dst vbuf len\n", + __func__); + goto error; + } + total += req->vbuf.dst[i].len; + } + + if (total != req->data_len) { + pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n", + __func__, i, total, req->data_len); + goto error; + } + + /* Check for sum of all src length is equal to data_len */ + for (i = 0, total = 0; i < req->entries; i++) { + if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) { + pr_err("%s: NULL req src vbuf[%d] with length %d\n", + __func__, i, req->vbuf.src[i].len); + goto error; + } + if (req->vbuf.src[i].len > U32_MAX - total) { + pr_err("%s: Int overflow on total req src vbuf len\n", + __func__); + goto error; + } + total += req->vbuf.src[i].len; + } + + if (total != req->data_len) { + pr_err("%s: Total src(%d) buf size != data_len (%d)\n", + __func__, total, req->data_len); + goto error; + } + + return 0; +error: + return -EINVAL; +} + long qcedev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1727,6 +2106,33 @@ long qcedev_ioctl(struct file *file, } break; + case QCEDEV_IOCTL_OFFLOAD_OP_REQ: + if (copy_from_user(&qcedev_areq->offload_cipher_op_req, + (void __user *)arg, + sizeof(struct qcedev_offload_cipher_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + + qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER; + if (qcedev_check_offload_cipher_params( + &qcedev_areq->offload_cipher_op_req, podev)) { + err = -EINVAL; + goto exit_free_qcedev_areq; + } + + err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle); + if (err) + goto exit_free_qcedev_areq; + + if (copy_to_user((void __user *)arg, + &qcedev_areq->offload_cipher_op_req, + sizeof(struct qcedev_offload_cipher_op_req))) { + err = -EFAULT; + goto exit_free_qcedev_areq; + } + break; + case QCEDEV_IOCTL_SHA_INIT_REQ: { struct scatterlist sg_src; @@ -1944,8 +2350,8 @@ long qcedev_ioctl(struct file *file, goto exit_free_qcedev_areq; } map_buf.buf_vaddr[i] = vaddr; - pr_info("%s: info: vaddr = %llx\n", - __func__, vaddr); + pr_info("%s: info: vaddr = %llx\n, fd = %d", + __func__, vaddr, map_buf.fd[i]); } if (copy_to_user((void __user *)arg, &map_buf, diff --git a/crypto-qti/qcedevi.h b/crypto-qti/qcedevi.h index 41810784d9..ca0208a736 100644 --- a/crypto-qti/qcedevi.h +++ b/crypto-qti/qcedevi.h @@ -16,12 +16,13 @@ #include "qce.h" #include "qcedev_smmu.h" -#define CACHE_LINE_SIZE 32 +#define CACHE_LINE_SIZE 64 #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE enum qcedev_crypto_oper_type { QCEDEV_CRYPTO_OPER_CIPHER = 0, QCEDEV_CRYPTO_OPER_SHA = 1, + QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER = 2, QCEDEV_CRYPTO_OPER_LAST }; @@ -56,6 +57,7 @@ struct qcedev_async_req { union { struct qcedev_cipher_op_req cipher_op_req; struct qcedev_sha_op_req sha_op_req; + struct qcedev_offload_cipher_op_req offload_cipher_op_req; }; union { diff --git a/crypto-qti/qcryptohw_50.h b/crypto-qti/qcryptohw_50.h index 16bb7d5ede..253cfd1654 100644 --- a/crypto-qti/qcryptohw_50.h +++ b/crypto-qti/qcryptohw_50.h @@ -26,6 +26,11 @@ #define CRYPTO_STATUS_REG 0x1A100 #define CRYPTO_STATUS2_REG 0x1A104 +#define CRYPTO_STATUS3_REG 0x1A11C +#define CRYPTO_STATUS4_REG 0x1A124 +#define CRYPTO_STATUS5_REG 0x1A128 +#define CRYPTO_STATUS6_REG 0x1A13C + #define CRYPTO_ENGINES_AVAIL 0x1A108 #define CRYPTO_FIFO_SIZES_REG 0x1A10C @@ -37,6 +42,8 @@ #define CRYPTO_ENCR_SEG_CFG_REG 0x1A200 #define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204 #define CRYPTO_ENCR_SEG_START_REG 0x1A208 +#define CRYPTO_DATA_PATT_PROC_CFG_REG 0x1A500 +#define CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG 0x1A504 #define CRYPTO_ENCR_KEY0_REG 0x1D000 #define CRYPTO_ENCR_KEY1_REG 0x1D004 diff --git a/linux/qcedev.h b/linux/qcedev.h index 6968e92c4b..b797cd4fae 100644 --- a/linux/qcedev.h +++ b/linux/qcedev.h @@ -41,6 +41,31 @@ enum qcedev_oper_enum { QCEDEV_OPER_LAST }; +/** + *qcedev_offload_oper_enum: Offload operation types (uses pipe keys) + * @QCEDEV_OFFLOAD_HLOS_HLOS: Non-secure to non-secure (eg. audio dec). + * @QCEDEV_OFFLOAD_HLOS_CPB: Non-secure to secure (eg. video dec). + * @QCEDEV_OFFLOAD_CPB_HLOS: Secure to non-secure (eg. hdcp video enc). + */ +enum qcedev_offload_oper_enum { + QCEDEV_OFFLOAD_HLOS_HLOS = 1, + QCEDEV_OFFLOAD_HLOS_CPB = 2, + QCEDEV_OFFLOAD_CPB_HLOS = 3, + QCEDEV_OFFLOAD_OPER_LAST +}; + +/** + *qcedev_offload_err_enum: Offload error conditions + * @QCEDEV_OFFLOAD_NO_ERROR: Successful crypto operation. + * @QCEDEV_OFFLOAD_GENERIC_ERROR: Generic error in crypto status. + * @QCEDEV_OFFLOAD_TIMER_ERROR: Pipe key timer errors in crypto status. + */ +enum qcedev_offload_err_enum { + QCEDEV_OFFLOAD_NO_ERROR = 0, + QCEDEV_OFFLOAD_GENERIC_ERROR = 1, + QCEDEV_OFFLOAD_TIMER_ERROR = 2 +}; + /** *qcedev_oper_enum: Cipher algorithm types * @QCEDEV_ALG_DES: DES @@ -223,6 +248,72 @@ struct qcedev_sha_op_req { enum qcedev_sha_alg_enum alg; }; +/** + * struct pattern_info - Holds pattern information for pattern-based + * decryption/encryption for AES ECB, counter, and CBC modes. + * @patt_sz (IN): Total number of blocks. + * @proc_data_sz (IN): Number of blocks to be processed. + * @patt_offset (IN): Start of the segment. + */ +struct pattern_info { + __u8 patt_sz; + __u8 proc_data_sz; + __u8 patt_offset; +}; + +/** + * struct qcedev_offload_cipher_op_req - Holds the offload request information + * @vbuf (IN/OUT): Stores Source and destination Buffer information. + * Refer to struct qcedev_vbuf_info. + * @entries (IN): Number of entries to be processed as part of request. + * @data_len (IN): Total Length of input/src and output/dst in bytes + * @in_place_op (IN): Indicates whether the operation is inplace where + * source == destination. + * @encklen (IN): Length of the encryption key(set to 128 bits/16 + * bytes in the driver). + * @iv (IN/OUT): Initialisation vector data + * This is updated by the driver, incremented by + * number of blocks encrypted/decrypted. + * @ivlen (IN): Length of the IV. + * @iv_ctr_size (IN): IV counter increment mask size. + * Driver sets the mask value based on this size. + * @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set + * for AES-128 CTR mode only). + * @block_offset (IN): Offset in the block that needs a skip of encrypt/ + * decrypt. + * @pattern_valid (IN): Indicates the request contains a valid pattern. + * @pattern_info (IN): The pattern to be used for the offload request. + * @is_copy_op (IN): Offload operations sometimes requires a copy between + * secure and non-secure buffers without any encrypt/ + * decrypt operations. + * @alg (IN): Type of ciphering algorithm: AES/DES/3DES. + * @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR. + * Applicable when using AES algorithm only. + * @op (IN): Type of operation. + * Refer to qcedev_offload_oper_enum. + * @err (OUT): Error in crypto status. + * Refer to qcedev_offload_err_enum. + */ +struct qcedev_offload_cipher_op_req { + struct qcedev_vbuf_info vbuf; + __u32 entries; + __u32 data_len; + __u32 in_place_op; + __u32 encklen; + __u8 iv[QCEDEV_MAX_IV_SIZE]; + __u32 ivlen; + __u32 iv_ctr_size; + __u32 byteoffset; + __u8 block_offset; + __u8 is_pattern_valid; + __u8 is_copy_op; + struct pattern_info pattern_info; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_offload_oper_enum op; + enum qcedev_offload_err_enum err; +}; + /** * struct qfips_verify_t - Holds data for FIPS Integrity test * @kernel_size (IN): Size of kernel Image @@ -286,4 +377,6 @@ struct file; _IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req) #define QCEDEV_IOCTL_UNMAP_BUF_REQ \ _IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req) +#define QCEDEV_IOCTL_OFFLOAD_OP_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 12, struct qcedev_offload_cipher_op_req) #endif /* _QCEDEV__H */ diff --git a/linux/qcota.h b/linux/qcota.h index 1a1682e5e5..f803c3dc53 100644 --- a/linux/qcota.h +++ b/linux/qcota.h @@ -55,6 +55,7 @@ struct qce_f8_req { __u8 ckey[OTA_KEY_SIZE]; enum qce_ota_dir_enum direction; enum qce_ota_algo_enum algorithm; + int current_req_info; }; /** @@ -202,6 +203,7 @@ struct qce_f9_req { enum qce_ota_dir_enum direction; __u8 ikey[OTA_KEY_SIZE]; enum qce_ota_algo_enum algorithm; + int current_req_info; }; #define QCOTA_IOC_MAGIC 0x85 From 48af4216290cdecbbe2898d64fc333f82c374db5 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Wed, 13 Apr 2022 23:37:50 -0700 Subject: [PATCH 021/202] qcedev compilation fix Change-Id: Ib1c531b002faa1bcdc5ab306f629bb1702787a46 --- crypto-qti/qcedev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 660b054df1..81c14da28a 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -476,7 +476,7 @@ static int start_cipher_req(struct qcedev_control *podev, ret = qce_ablk_cipher_req(podev->qce, &creq); *current_req_info = creq.current_req_info; unsupported: - qcedev_areq->err = ret ? -ENXIO : 0 + qcedev_areq->err = ret ? -ENXIO : 0; return ret; }; @@ -584,7 +584,7 @@ static int start_offload_cipher_req(struct qcedev_control *podev, ret = qce_ablk_cipher_req(podev->qce, &creq); *current_req_info = creq.current_req_info; - qcedev_areq->err = ret ? -ENXIO : 0 + qcedev_areq->err = ret ? -ENXIO : 0; return ret; } @@ -660,7 +660,7 @@ static int start_sha_req(struct qcedev_control *podev, ret = qce_process_sha_req(podev->qce, &sreq); *current_req_info = sreq.current_req_info; - qcedev_areq->err = ret ? -ENXIO : 0 + qcedev_areq->err = ret ? -ENXIO : 0; return ret; }; From d1d7d597d61b9606b79dd8df5e10c31a1a903ac6 Mon Sep 17 00:00:00 2001 From: Nitin LNU Date: Thu, 7 Apr 2022 15:50:44 +0530 Subject: [PATCH 022/202] smcinvoke: take lock again in free_mem_obj_locked insted of premature unlock return with lock in free_mem_obj_locked api. Signed-off-by: Nitin LNU --- smcinvoke/smcinvoke.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 75b2937c7a..95798de9a9 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -389,6 +389,8 @@ static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) ret, dmabuf_to_free); else dma_buf_put(dmabuf_to_free); + + mutex_lock(&g_smcinvoke_lock); } static void del_mem_regn_obj_locked(struct kref *kref) From 64e85a3b4a8013c5e9b6835bdacee0dfc00fc305 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 14 Apr 2022 19:34:42 -0700 Subject: [PATCH 023/202] qcedev: add gpce key pause error conditions Add support in the qcedev driver to detect KEY_PAUSE error conditions and report accordingly to userspace. Change-Id: Id7bbde02ca4d08917bcf21d6f62e21f3c71abc7b --- crypto-qti/qce50.c | 4 ++-- crypto-qti/qcedev.c | 46 +++++++++++++++++++++++++++++++++++++++------ linux/qcedev.h | 6 ++++-- 3 files changed, 46 insertions(+), 10 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index b303d239ea..81d7d2c1be 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -85,7 +85,7 @@ static LIST_HEAD(qce50_bam_list); #define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec)) #define AES_CTR_IV_CTR_SIZE 64 -#define EXPECTED_STATUS1_REG_VAL 0x2000006 +#define STATUS1_ERR_INTR_MASK 0x10 enum qce_owner { QCE_OWNER_NONE = 0, @@ -228,7 +228,7 @@ void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2, #ifdef QCE_DEBUG dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6); #else - if (*s1 != EXPECTED_STATUS1_REG_VAL) + if (*s1 & STATUS1_ERR_INTR_MASK) dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6); #endif return; diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 81c14da28a..34c4238c7e 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -57,8 +57,16 @@ static uint8_t _std_init_vector_sha256_uint8[] = { #define QCEDEV_CTX_USE_HW_KEY 0x00000001 #define QCEDEV_CTX_USE_PIPE_KEY 0x00000002 -#define QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK 0x000000FF -#define QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK 0x00000003 +// Key timer expiry for pipes 1-15 (Status3) +#define PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK 0x000000FF +// Key timer expiry for pipes 16-19 (Status6) +#define PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK 0x00000003 +// Key pause for pipes 1-15 (Status3) +#define PIPE_KEY_PAUSE_STATUS3_MASK 0xFF0000 +// Key pause for pipes 16-19 (Status6) +#define PIPE_KEY_PAUSE_STATUS6_MASK 0x30000 + +#define QCEDEV_STATUS1_ERR_INTR_MASK 0x10 static DEFINE_MUTEX(send_cmd_lock); static DEFINE_MUTEX(qcedev_sent_bw_req); @@ -678,16 +686,39 @@ static void qcedev_check_crypto_status( pr_err("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, s1, s2, s3, s4, s5, s6); } - if ((s6 & QCEDEV_PIPE_KEY_TIMER2_EXPIRED_VEC_MASK) || - (s3 & QCEDEV_PIPE_KEY_TIMER1_EXPIRED_VEC_MASK)) { + + // Check for key timer expiry + if ((s6 & PIPE_KEY_TIMER_EXPIRED_STATUS6_MASK) || + (s3 & PIPE_KEY_TIMER_EXPIRED_STATUS3_MASK)) { pr_info("%s: crypto timer expired\n", __func__); pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, s1, s2, s3, s4, s5, s6); qcedev_areq->offload_cipher_op_req.err = - QCEDEV_OFFLOAD_TIMER_ERROR; + QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR; + return; + } + + // Check for key pause + if ((s6 & PIPE_KEY_PAUSE_STATUS6_MASK) || + (s3 & PIPE_KEY_PAUSE_STATUS3_MASK)) { + pr_info("%s: crypto key paused\n", __func__); + pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, + s1, s2, s3, s4, s5, s6); + qcedev_areq->offload_cipher_op_req.err = + QCEDEV_OFFLOAD_KEY_PAUSE_ERROR; + return; + } + + // Check for generic error + if (s1 & QCEDEV_STATUS1_ERR_INTR_MASK) { + pr_err("%s: generic crypto error\n", __func__); + pr_info("%s: sts = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __func__, + s1, s2, s3, s4, s5, s6); + qcedev_areq->offload_cipher_op_req.err = + QCEDEV_OFFLOAD_GENERIC_ERROR; + return; } - return; } static int submit_req(struct qcedev_async_req *qcedev_areq, @@ -743,6 +774,9 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, print_sts = true; qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); qce_manage_timeout(podev->qce, current_req_info); + if (qcedev_areq->offload_cipher_op_req.err != + QCEDEV_OFFLOAD_NO_ERROR) + return 0; } if (ret) diff --git a/linux/qcedev.h b/linux/qcedev.h index b797cd4fae..8f362e9464 100644 --- a/linux/qcedev.h +++ b/linux/qcedev.h @@ -58,12 +58,14 @@ enum qcedev_offload_oper_enum { *qcedev_offload_err_enum: Offload error conditions * @QCEDEV_OFFLOAD_NO_ERROR: Successful crypto operation. * @QCEDEV_OFFLOAD_GENERIC_ERROR: Generic error in crypto status. - * @QCEDEV_OFFLOAD_TIMER_ERROR: Pipe key timer errors in crypto status. + * @QCEDEV_OFFLOAD_TIMER_EXPIRED_ERROR: Pipe key timer expired. + * @QCEDEV_OFFLOAD_KEY_PAUSE_ERROR: Pipe key pause (means GPCE is paused). */ enum qcedev_offload_err_enum { QCEDEV_OFFLOAD_NO_ERROR = 0, QCEDEV_OFFLOAD_GENERIC_ERROR = 1, - QCEDEV_OFFLOAD_TIMER_ERROR = 2 + QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR = 2, + QCEDEV_OFFLOAD_KEY_PAUSE_ERROR = 3 }; /** From 0311094bc0de2a002518ca683705201cd0aa5a58 Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Mon, 2 May 2022 13:40:22 -0700 Subject: [PATCH 024/202] smcinvoke: Conditionally disable DLKM Disable DLKM's for specific target as for that target these DLKM's comes as kernel modules. Change-Id: Ie6a53add3e6ebc17e662f3b564cade7e7f284020 --- Android.mk | 5 +++++ Kbuild | 2 ++ 2 files changed, 7 insertions(+) diff --git a/Android.mk b/Android.mk index f50ab6fd44..e11b31e30a 100644 --- a/Android.mk +++ b/Android.mk @@ -1,5 +1,9 @@ # Android makefile for audio kernel modules +#Target based of taro does not need these DLKM's as they are present as kernel drivers +#But the project is mapped for DEV SP due to dependency on smcinvoke_kernel_headers +#Hence preventing the DLKM's to be part of the taro based DEV SP +ifneq ($(TARGET_BOARD_PLATFORM), taro) LOCAL_PATH := $(call my-dir) DLKM_DIR := $(TOP)/device/qcom/common/dlkm @@ -97,3 +101,4 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################### +endif diff --git a/Kbuild b/Kbuild index 7d70810490..bb0ab0db40 100644 --- a/Kbuild +++ b/Kbuild @@ -1,3 +1,4 @@ +ifneq ($(TARGET_BOARD_PLATFORM ), taro) ifeq ($(CONFIG_ARCH_QTI_VM), y) include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_tvm.conf else @@ -28,3 +29,4 @@ hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o qrng_dlkm-objs := qrng/msm_rng.o +endif From ac5ae74540e7dc8456dafc7ddc29a1594f406e07 Mon Sep 17 00:00:00 2001 From: Indranil Pradhan Date: Thu, 28 Apr 2022 17:20:32 +0530 Subject: [PATCH 025/202] securemsm-kernel: Enable qseecom module for auto platform Add support for qseecom as DLKM with basic qseecom test cases passed except listener services. Change-Id: I09ad0433f757d00f0902df937714132c6dff5030 --- Android.mk | 14 + Kbuild | 14 +- config/sec-kernel_auto_defconfig.conf | 6 + linux/misc/qseecom_kernel.h | 2 +- qseecom/ice.h | 145 + qseecom/qseecom.c | 9810 +++++++++++++++++++++++++ securemsm_kernel_product_board.mk | 6 +- securemsm_kernel_vendor_board.mk | 8 +- 8 files changed, 9996 insertions(+), 9 deletions(-) create mode 100644 config/sec-kernel_auto_defconfig.conf create mode 100644 qseecom/ice.h create mode 100644 qseecom/qseecom.c diff --git a/Android.mk b/Android.mk index e11b31e30a..69f390fe78 100644 --- a/Android.mk +++ b/Android.mk @@ -30,6 +30,7 @@ LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################### ################################################### +ifneq ($(TARGET_BOARD_AUTO),true) #$(error $(SSG_SRC_FILES)) include $(CLEAR_VARS) #LOCAL_SRC_FILES := $(SSG_SRC_FILES) @@ -40,6 +41,7 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk +endif #TARGET_BOARD_AUTO ################################################### ################################################### include $(CLEAR_VARS) @@ -101,4 +103,16 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################### +################################################### +ifeq ($(TARGET_BOARD_AUTO),true) +include $(CLEAR_VARS) +LOCAL_SRC_FILES := $(SSG_SRC_FILES) +LOCAL_MODULE := qseecom_dlkm.ko +LOCAL_MODULE_KBUILD_NAME := qseecom_dlkm.ko +LOCAL_MODULE_TAGS := optional +LOCAL_MODULE_DEBUG_ENABLE := true +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +include $(DLKM_DIR)/Build_external_kernelmodule.mk +endif #TARGET_BOARD_AUTO +################################################### endif diff --git a/Kbuild b/Kbuild index bb0ab0db40..8da36daae8 100644 --- a/Kbuild +++ b/Kbuild @@ -1,16 +1,20 @@ ifneq ($(TARGET_BOARD_PLATFORM ), taro) +LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ + -I$(SSG_MODULE_ROOT)/linux/ \ + -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h +ifeq ($(CONFIG_ARCH_SA8155),y) +include $(SSG_MODULE_ROOT)/config/sec-kernel_auto_defconfig.conf +obj-$(CONFIG_QSEECOM) += qseecom_dlkm.o +qseecom_dlkm-objs := qseecom/qseecom.o +else ifeq ($(CONFIG_ARCH_QTI_VM), y) include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_tvm.conf else include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf endif - -LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ - -I$(SSG_MODULE_ROOT)/linux/ \ - -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h - obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o +endif obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o tz_log_dlkm-objs := tz_log/tz_log.o diff --git a/config/sec-kernel_auto_defconfig.conf b/config/sec-kernel_auto_defconfig.conf new file mode 100644 index 0000000000..d0aefdd092 --- /dev/null +++ b/config/sec-kernel_auto_defconfig.conf @@ -0,0 +1,6 @@ +export CONFIG_QTI_TZ_LOG=m +export CONFIG_CRYPTO_DEV_QCEDEV=m +export CONFIG_CRYPTO_DEV_QCRYPTO=m +export CONFIG_HDCP_QSEECOM=m +export CONFIG_HW_RANDOM_MSM_LEGACY=m +export CONFIG_QSEECOM=m diff --git a/linux/misc/qseecom_kernel.h b/linux/misc/qseecom_kernel.h index 2c0ffeca76..f67ca47c0c 100644 --- a/linux/misc/qseecom_kernel.h +++ b/linux/misc/qseecom_kernel.h @@ -33,7 +33,7 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len); int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high); -#if IS_ENABLED(CONFIG_QSEECOM) +#if IS_ENABLED(CONFIG_QSEECOM) || IS_ENABLED(CONFIG_ARCH_SA8155) int qseecom_process_listener_from_smcinvoke(uint32_t *result, u64 *response_type, unsigned int *data); #else diff --git a/qseecom/ice.h b/qseecom/ice.h new file mode 100644 index 0000000000..037bf1486e --- /dev/null +++ b/qseecom/ice.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_ +#define _QCOM_INLINE_CRYPTO_ENGINE_H_ + +#include +#include + +struct request; + +enum ice_cryto_algo_mode { + ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0, + ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3, +}; + +enum ice_crpto_key_size { + ICE_CRYPTO_KEY_SIZE_128 = 0x0, + ICE_CRYPTO_KEY_SIZE_256 = 0x2, +}; + +enum ice_crpto_key_mode { + ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0, + ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1, + ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2, + ICE_CRYPTO_USE_LUT_SW_KEY = 0x3 +}; + +#define QCOM_ICE_TYPE_NAME_LEN 8 + +typedef void (*ice_error_cb)(void *, u32 error); + +struct qcom_ice_bus_vote { + uint32_t client_handle; + uint32_t curr_vote; + int min_bw_vote; + int max_bw_vote; + int saved_vote; + bool is_max_bw_needed; + struct device_attribute max_bus_bw; +}; + +/* + * ICE HW device structure. + */ +struct ice_device { + struct list_head list; + struct device *pdev; + struct cdev cdev; + dev_t device_no; + struct class *driver_class; + void __iomem *mmio; + struct resource *res; + int irq; + bool is_ice_enabled; + bool is_ice_disable_fuse_blown; + ice_error_cb error_cb; + void *host_controller_data; /* UFS/EMMC/other? */ + struct list_head clk_list_head; + u32 ice_hw_version; + bool is_ice_clk_available; + char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN]; + struct regulator *reg; + bool is_regulator_available; + struct qcom_ice_bus_vote bus_vote; + ktime_t ice_reset_start_time; + ktime_t ice_reset_complete_time; + void *key_table; +}; + +struct ice_crypto_setting { + enum ice_crpto_key_size key_size; + enum ice_cryto_algo_mode algo_mode; + enum ice_crpto_key_mode key_mode; + short key_index; + +}; + +struct ice_data_setting { + struct ice_crypto_setting crypto_data; + bool sw_forced_context_switch; + bool decr_bypass; + bool encr_bypass; +}; + +/* MSM ICE Crypto Data Unit of target DUN of Transfer Request */ +enum ice_crypto_data_unit { + ICE_CRYPTO_DATA_UNIT_512_B = 0, + ICE_CRYPTO_DATA_UNIT_1_KB = 1, + ICE_CRYPTO_DATA_UNIT_2_KB = 2, + ICE_CRYPTO_DATA_UNIT_4_KB = 3, + ICE_CRYPTO_DATA_UNIT_8_KB = 4, + ICE_CRYPTO_DATA_UNIT_16_KB = 5, + ICE_CRYPTO_DATA_UNIT_32_KB = 6, + ICE_CRYPTO_DATA_UNIT_64_KB = 7, +}; + +struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node); +struct platform_device *qcom_ice_get_pdevice(struct device_node *node); + +#if IS_ENABLED(CONFIG_CYRPTO_DEV_QCOM_ICE) +int enable_ice_setup(struct ice_device *ice_dev); +int disable_ice_setup(struct ice_device *ice_dev); +int qcom_ice_setup_ice_hw(const char *storage_type, int enable); +void qcom_ice_set_fde_flag(int flag); +struct list_head *get_ice_dev_list(void); +#else +static inline int enable_ice_setup(struct ice_device *ice_dev) +{ + return 0; +} +static inline int disable_ice_setup(struct ice_device *ice_dev) +{ + return 0; +} +static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable) +{ + return 0; +} +static inline void qcom_ice_set_fde_flag(int flag) {} +static inline struct list_head *get_ice_dev_list(void) +{ + return NULL; +} +#endif + +struct qcom_ice_variant_ops { + const char *name; + int (*init)(struct platform_device *device_init, void *init_data, + ice_error_cb err); + int (*reset)(struct platform_device *device_reset); + int (*resume)(struct platform_device *device_resume); + int (*suspend)(struct platform_device *device_suspend); + int (*config_start)(struct platform_device *device_start, + struct request *req, struct ice_data_setting *setting, + bool start); + int (*config_end)(struct platform_device *pdev, + struct request *req); + int (*status)(struct platform_device *device_status); + void (*debug)(struct platform_device *device_debug); +}; + +#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */ diff --git a/qseecom/qseecom.c b/qseecom/qseecom.c new file mode 100644 index 0000000000..ffa14cdcc3 --- /dev/null +++ b/qseecom/qseecom.c @@ -0,0 +1,9810 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * QTI Secure Execution Environment Communicator (QSEECOM) driver + * + * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ice.h" + +#define QSEECOM_DEV "qseecom" +#define QSEOS_VERSION_14 0x14 +#define QSEEE_VERSION_00 0x400000 +#define QSEE_VERSION_01 0x401000 +#define QSEE_VERSION_02 0x402000 +#define QSEE_VERSION_03 0x403000 +#define QSEE_VERSION_04 0x404000 +#define QSEE_VERSION_05 0x405000 +#define QSEE_VERSION_20 0x800000 +#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */ + +#define QSEE_CE_CLK_100MHZ 100000000 +#define CE_CLK_DIV 1000000 + +#define QSEECOM_MAX_SG_ENTRY 4096 +#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \ + (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT) + +#define QSEECOM_INVALID_KEY_ID 0xff + +/* Save partition image hash for authentication check */ +#define SCM_SAVE_PARTITION_HASH_ID 0x01 + +/* Check if enterprise security is activate */ +#define SCM_IS_ACTIVATED_ID 0x02 + +/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */ +#define SCM_MDTP_CIPHER_DIP 0x01 + +/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */ +#define MAX_DIP 0x20000 + +#define RPMB_SERVICE 0x2000 +#define SSD_SERVICE 0x3000 + +#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000 +#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000 +#define TWO 2 +#define QSEECOM_UFS_ICE_CE_NUM 10 +#define QSEECOM_SDCC_ICE_CE_NUM 20 +#define QSEECOM_ICE_FDE_KEY_INDEX 0 + +#define PHY_ADDR_4G (1ULL<<32) + +#define QSEECOM_STATE_NOT_READY 0 +#define QSEECOM_STATE_SUSPEND 1 +#define QSEECOM_STATE_READY 2 +#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2 + +/* + * default ce info unit to 0 for + * services which + * support only single instance. + * Most of services are in this category. + */ +#define DEFAULT_CE_INFO_UNIT 0 +#define DEFAULT_NUM_CE_INFO_UNIT 1 + +#define FDE_FLAG_POS 4 +#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS) + +enum qseecom_clk_definitions { + CLK_DFAB = 0, + CLK_SFPB, +}; + +enum qseecom_ice_key_size_type { + QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE = + (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK), + QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE = + (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK), + QSEE_ICE_FDE_KEY_SIZE_UNDEFINED = + (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK), +}; + +enum qseecom_client_handle_type { + QSEECOM_CLIENT_APP = 1, + QSEECOM_LISTENER_SERVICE, + QSEECOM_SECURE_SERVICE, + QSEECOM_GENERIC, + QSEECOM_UNAVAILABLE_CLIENT_APP, +}; + +enum qseecom_ce_hw_instance { + CLK_QSEE = 0, + CLK_CE_DRV, + CLK_INVALID, +}; + +enum qseecom_cache_ops { + QSEECOM_CACHE_CLEAN, + QSEECOM_CACHE_INVALIDATE, +}; + +enum qseecom_listener_unregister_kthread_state { + LSNR_UNREG_KT_SLEEP = 0, + LSNR_UNREG_KT_WAKEUP, +}; + +enum qseecom_unload_app_kthread_state { + UNLOAD_APP_KT_SLEEP = 0, + UNLOAD_APP_KT_WAKEUP, +}; + +static DEFINE_MUTEX(qsee_bw_mutex); +static DEFINE_MUTEX(app_access_lock); +static DEFINE_MUTEX(clk_access_lock); +static DEFINE_MUTEX(listener_access_lock); +static DEFINE_MUTEX(unload_app_pending_list_lock); + + +struct sglist_info { + uint32_t indexAndFlags; + uint32_t sizeOrCount; +}; + +/* + * The 31st bit indicates only one or multiple physical address inside + * the request buffer. If it is set, the index locates a single physical addr + * inside the request buffer, and `sizeOrCount` is the size of the memory being + * shared at that physical address. + * Otherwise, the index locates an array of {start, len} pairs (a + * "scatter/gather list"), and `sizeOrCount` gives the number of entries in + * that array. + * + * The 30th bit indicates 64 or 32bit address; when it is set, physical addr + * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values. + * + * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer. + */ +#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \ + ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff))) + +#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD) + +#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/ + +#define MAKE_WHITELIST_VERSION(major, minor, patch) \ + (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) + +#define MAKE_NULL(sgt, attach, dmabuf) do {\ + sgt = NULL;\ + attach = NULL;\ + dmabuf = NULL;\ + } while (0) + + +struct qseecom_registered_listener_list { + struct list_head list; + struct qseecom_register_listener_req svc; + void *user_virt_sb_base; + struct dma_buf *dmabuf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + u8 *sb_virt; + phys_addr_t sb_phys; + size_t sb_length; + wait_queue_head_t rcv_req_wq; + /* rcv_req_flag: 0: ready and empty; 1: received req */ + int rcv_req_flag; + int send_resp_flag; + bool listener_in_use; + /* wq for thread blocked on this listener*/ + wait_queue_head_t listener_block_app_wq; + struct sglist_info *sglistinfo_ptr; + struct qtee_shm sglistinfo_shm; + uint32_t sglist_cnt; + int abort; + bool unregister_pending; +}; + +struct qseecom_unregister_pending_list { + struct list_head list; + struct qseecom_dev_handle *data; +}; + +struct qseecom_registered_app_list { + struct list_head list; + u32 app_id; + u32 ref_cnt; + char app_name[MAX_APP_NAME_SIZE]; + u32 app_arch; + bool app_blocked; + u32 check_block; + u32 blocked_on_listener_id; +}; + +struct qseecom_registered_kclient_list { + struct list_head list; + struct qseecom_handle *handle; +}; + +struct qseecom_ce_info_use { + unsigned char handle[MAX_CE_INFO_HANDLE_SIZE]; + unsigned int unit_num; + unsigned int num_ce_pipe_entries; + struct qseecom_ce_pipe_entry *ce_pipe_entry; + bool alloc; + uint32_t type; +}; + +struct ce_hw_usage_info { + uint32_t qsee_ce_hw_instance; + uint32_t num_fde; + struct qseecom_ce_info_use *fde; + uint32_t num_pfe; + struct qseecom_ce_info_use *pfe; +}; + +struct qseecom_clk { + enum qseecom_ce_hw_instance instance; + struct clk *ce_core_clk; + struct clk *ce_clk; + struct clk *ce_core_src_clk; + struct clk *ce_bus_clk; + uint32_t clk_access_cnt; +}; + +struct qseecom_control { + struct list_head registered_listener_list_head; + + struct list_head registered_app_list_head; + spinlock_t registered_app_list_lock; + + struct list_head registered_kclient_list_head; + spinlock_t registered_kclient_list_lock; + + wait_queue_head_t send_resp_wq; + int send_resp_flag; + + uint32_t qseos_version; + uint32_t qsee_version; + struct device *pdev; /* class_dev */ + struct device *dev; /* platform_dev->dev */ + struct class *driver_class; + dev_t qseecom_device_no; + + bool whitelist_support; + bool commonlib_loaded; + bool commonlib64_loaded; + struct ce_hw_usage_info ce_info; + + int qsee_bw_count; + int qsee_sfpb_bw_count; + + uint32_t qsee_perf_client; + struct icc_path *icc_path; + uint32_t avg_bw; + uint32_t peak_bw; + struct qseecom_clk qsee; + struct qseecom_clk ce_drv; + + bool support_bus_scaling; + bool support_fde; + bool support_pfe; + bool fde_key_size; + uint32_t cumulative_mode; + enum qseecom_bandwidth_request_mode current_mode; + struct timer_list bw_scale_down_timer; + struct work_struct bw_inactive_req_ws; + struct cdev cdev; + bool timer_running; + bool no_clock_support; + unsigned int ce_opp_freq_hz; + bool appsbl_qseecom_support; + uint32_t qsee_reentrancy_support; + bool enable_key_wrap_in_ks; + + uint32_t app_block_ref_cnt; + wait_queue_head_t app_block_wq; + atomic_t qseecom_state; + int is_apps_region_protected; + bool smcinvoke_support; + uint64_t qseecom_bridge_handle; + uint64_t ta_bridge_handle; + uint64_t user_contig_bridge_handle; + + struct list_head unregister_lsnr_pending_list_head; + wait_queue_head_t register_lsnr_pending_wq; + struct task_struct *unregister_lsnr_kthread_task; + wait_queue_head_t unregister_lsnr_kthread_wq; + atomic_t unregister_lsnr_kthread_state; + + struct list_head unload_app_pending_list_head; + struct task_struct *unload_app_kthread_task; + wait_queue_head_t unload_app_kthread_wq; + atomic_t unload_app_kthread_state; +}; + +struct qseecom_unload_app_pending_list { + struct list_head list; + struct qseecom_dev_handle *data; +}; + +struct qseecom_sec_buf_fd_info { + bool is_sec_buf_fd; + size_t size; + void *vbase; + phys_addr_t pbase; + struct qtee_shm shm; +}; + +struct qseecom_param_memref { + uint32_t buffer; + uint32_t size; +}; + +struct qseecom_client_handle { + u32 app_id; + struct dma_buf *dmabuf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + u8 *sb_virt; + phys_addr_t sb_phys; + size_t sb_length; + unsigned long user_virt_sb_base; + char app_name[MAX_APP_NAME_SIZE]; + u32 app_arch; + struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD]; + bool from_smcinvoke; + struct qtee_shm shm; /* kernel client's shm for req/rsp buf */ + bool unload_pending; + bool from_loadapp; +}; + +struct qseecom_listener_handle { + u32 id; + bool unregister_pending; + bool release_called; +}; + +static struct qseecom_control qseecom; + +struct qseecom_dev_handle { + enum qseecom_client_handle_type type; + union { + struct qseecom_client_handle client; + struct qseecom_listener_handle listener; + }; + bool released; + int abort; + wait_queue_head_t abort_wq; + atomic_t ioctl_count; + bool perf_enabled; + bool fast_load_enabled; + enum qseecom_bandwidth_request_mode mode; + struct sglist_info *sglistinfo_ptr; + struct qtee_shm sglistinfo_shm; + uint32_t sglist_cnt; + bool use_legacy_cmd; +}; + +struct qseecom_key_id_usage_desc { + uint8_t desc[QSEECOM_KEY_ID_SIZE]; +}; + +struct qseecom_crypto_info { + unsigned int unit_num; + unsigned int ce; + unsigned int pipe_pair; +}; + +static struct qseecom_key_id_usage_desc key_id_array[] = { + { + .desc = "Undefined Usage Index", + }, + + { + .desc = "Full Disk Encryption", + }, + + { + .desc = "Per File Encryption", + }, + + { + .desc = "UFS ICE Full Disk Encryption", + }, + + { + .desc = "SDCC ICE Full Disk Encryption", + }, +}; + +/* Function proto types */ +static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t); +static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t); +static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce); +static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce); +static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce); +static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data, + char *cmnlib_name); +static int qseecom_enable_ice_setup(int usage); +static int qseecom_disable_ice_setup(int usage); +static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id); +static int qseecom_get_ce_info(struct qseecom_dev_handle *data, + void __user *argp); +static int qseecom_free_ce_info(struct qseecom_dev_handle *data, + void __user *argp); +static int qseecom_query_ce_info(struct qseecom_dev_handle *data, + void __user *argp); +static int __qseecom_unload_app(struct qseecom_dev_handle *data, + uint32_t app_id); + +static int __maybe_unused get_qseecom_keymaster_status(char *str) +{ + get_option(&str, &qseecom.is_apps_region_protected); + return 1; +} +__setup("androidboot.keymaster=", get_qseecom_keymaster_status); + +static int __qseecom_alloc_coherent_buf( + uint32_t size, u8 **vaddr, phys_addr_t *paddr); +static void __qseecom_free_coherent_buf(uint32_t size, + u8 *vaddr, phys_addr_t paddr); + +#define QSEECOM_SCM_EBUSY_WAIT_MS 30 +#define QSEECOM_SCM_EBUSY_MAX_RETRY 67 +#define QSEE_RESULT_FAIL_APP_BUSY 315 + +static int __qseecom_scm_call2_locked(uint32_t smc_id, struct qseecom_scm_desc *desc) +{ + int ret = 0; + int retry_count = 0; + + do { + ret = qcom_scm_qseecom_call(smc_id, desc, false); + if ((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) { + mutex_unlock(&app_access_lock); + msleep(QSEECOM_SCM_EBUSY_WAIT_MS); + mutex_lock(&app_access_lock); + } + if (retry_count == 33) + pr_warn("secure world has been busy for 1 second!\n"); + } while (((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) && + (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY)); + return ret; +} + +static char *__qseecom_alloc_tzbuf(uint32_t size, + phys_addr_t *pa, struct qtee_shm *shm) +{ + char *tzbuf = NULL; + int ret = qtee_shmbridge_allocate_shm(size, shm); + + if (ret) + return NULL; + tzbuf = shm->vaddr; + memset(tzbuf, 0, size); + *pa = shm->paddr; + return tzbuf; +} + +static void __qseecom_free_tzbuf(struct qtee_shm *shm) +{ + qtee_shmbridge_free_shm(shm); +} + +static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id, + const void *req_buf, void *resp_buf) +{ + int ret = 0; + uint32_t smc_id = 0; + uint32_t qseos_cmd_id = 0; + struct qseecom_scm_desc desc = {0}; + struct qseecom_command_scm_resp *scm_resp = NULL; + struct qtee_shm shm = {0}; + phys_addr_t pa; + + if (!req_buf || !resp_buf) { + pr_err("Invalid buffer pointer\n"); + return -EINVAL; + } + qseos_cmd_id = *(uint32_t *)req_buf; + scm_resp = (struct qseecom_command_scm_resp *)resp_buf; + + switch (svc_id) { + case SCM_SVC_INFO: { + if (tz_cmd_id == 3) { + smc_id = TZ_INFO_GET_FEATURE_VERSION_ID; + desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID; + desc.args[0] = *(uint32_t *)req_buf; + } else { + pr_err("Unsupported svc_id %d, tz_cmd_id %d\n", + svc_id, tz_cmd_id); + return -EINVAL; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case SCM_SVC_ES: { + switch (tz_cmd_id) { + case SCM_SAVE_PARTITION_HASH_ID: { + u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH); + struct qseecom_save_partition_hash_req *p_hash_req = + (struct qseecom_save_partition_hash_req *) + req_buf; + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, p_hash_req->digest, + SHA256_DIGEST_LENGTH); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_ES_SAVE_PARTITION_HASH_ID; + desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID; + desc.args[0] = p_hash_req->partition_id; + desc.args[1] = pa; + desc.args[2] = SHA256_DIGEST_LENGTH; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + default: { + pr_err("tz_cmd_id %d is not supported\n", tz_cmd_id); + ret = -EINVAL; + break; + } + } /* end of switch (tz_cmd_id) */ + break; + } /* end of case SCM_SVC_ES */ + case SCM_SVC_TZSCHEDULER: { + switch (qseos_cmd_id) { + case QSEOS_APP_START_COMMAND: { + struct qseecom_load_app_ireq *req; + struct qseecom_load_app_64bit_ireq *req_64bit; + + smc_id = TZ_OS_APP_START_ID; + desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_app_ireq *)req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_app_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_APP_SHUTDOWN_COMMAND: { + struct qseecom_unload_app_ireq *req; + + req = (struct qseecom_unload_app_ireq *)req_buf; + smc_id = TZ_OS_APP_SHUTDOWN_ID; + desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID; + desc.args[0] = req->app_id; + ret = qcom_scm_qseecom_call(smc_id, &desc, true); + break; + } + case QSEOS_APP_LOOKUP_COMMAND: { + struct qseecom_check_app_ireq *req; + u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name)); + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + req = (struct qseecom_check_app_ireq *)req_buf; + pr_debug("Lookup app_name = %s\n", req->app_name); + strlcpy(tzbuf, req->app_name, sizeof(req->app_name)); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_OS_APP_LOOKUP_ID; + desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID; + desc.args[0] = pa; + desc.args[1] = strlen(req->app_name); + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + case QSEOS_APP_REGION_NOTIFICATION: { + struct qsee_apps_region_info_ireq *req; + struct qsee_apps_region_info_64bit_ireq *req_64bit; + + smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID; + desc.arginfo = + TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qsee_apps_region_info_ireq *) + req_buf; + desc.args[0] = req->addr; + desc.args[1] = req->size; + } else { + req_64bit = + (struct qsee_apps_region_info_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->addr; + desc.args[1] = req_64bit->size; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_LOAD_SERV_IMAGE_COMMAND: { + struct qseecom_load_lib_image_ireq *req; + struct qseecom_load_lib_image_64bit_ireq *req_64bit; + + smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID; + desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_lib_image_ireq *) + req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_lib_image_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: { + smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID; + desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_REGISTER_LISTENER: { + struct qseecom_register_listener_ireq *req; + struct qseecom_register_listener_64bit_ireq *req_64bit; + + desc.arginfo = + TZ_OS_REGISTER_LISTENER_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_register_listener_ireq *) + req_buf; + desc.args[0] = req->listener_id; + desc.args[1] = req->sb_ptr; + desc.args[2] = req->sb_len; + } else { + req_64bit = + (struct qseecom_register_listener_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->listener_id; + desc.args[1] = req_64bit->sb_ptr; + desc.args[2] = req_64bit->sb_len; + } + qseecom.smcinvoke_support = true; + smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + if (ret == -EIO) { + /* smcinvoke is not supported */ + qseecom.smcinvoke_support = false; + smc_id = TZ_OS_REGISTER_LISTENER_ID; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + } + break; + } + case QSEOS_DEREGISTER_LISTENER: { + struct qseecom_unregister_listener_ireq *req; + + req = (struct qseecom_unregister_listener_ireq *) + req_buf; + smc_id = TZ_OS_DEREGISTER_LISTENER_ID; + desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID; + desc.args[0] = req->listener_id; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_LISTENER_DATA_RSP_COMMAND: { + struct qseecom_client_listener_data_irsp *req; + + req = (struct qseecom_client_listener_data_irsp *) + req_buf; + smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID; + desc.arginfo = + TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID; + desc.args[0] = req->listener_id; + desc.args[1] = req->status; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: { + struct qseecom_client_listener_data_irsp *req; + struct qseecom_client_listener_data_64bit_irsp *req_64; + + smc_id = + TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID; + desc.arginfo = + TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = + (struct qseecom_client_listener_data_irsp *) + req_buf; + desc.args[0] = req->listener_id; + desc.args[1] = req->status; + desc.args[2] = req->sglistinfo_ptr; + desc.args[3] = req->sglistinfo_len; + } else { + req_64 = + (struct qseecom_client_listener_data_64bit_irsp *) + req_buf; + desc.args[0] = req_64->listener_id; + desc.args[1] = req_64->status; + desc.args[2] = req_64->sglistinfo_ptr; + desc.args[3] = req_64->sglistinfo_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: { + struct qseecom_load_app_ireq *req; + struct qseecom_load_app_64bit_ireq *req_64bit; + + smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID; + desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_app_ireq *)req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_app_64bit_ireq *)req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: { + smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID; + desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + + case QSEOS_CLIENT_SEND_DATA_COMMAND: { + struct qseecom_client_send_data_ireq *req; + struct qseecom_client_send_data_64bit_ireq *req_64bit; + + smc_id = TZ_APP_QSAPP_SEND_DATA_ID; + desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_client_send_data_ireq *) + req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->rsp_ptr; + desc.args[4] = req->rsp_len; + } else { + req_64bit = + (struct qseecom_client_send_data_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->rsp_ptr; + desc.args[4] = req_64bit->rsp_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: { + struct qseecom_client_send_data_ireq *req; + struct qseecom_client_send_data_64bit_ireq *req_64bit; + + smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_client_send_data_ireq *) + req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->rsp_ptr; + desc.args[4] = req->rsp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = + (struct qseecom_client_send_data_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->rsp_ptr; + desc.args[4] = req_64bit->rsp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_RPMB_PROVISION_KEY_COMMAND: { + struct qseecom_client_send_service_ireq *req; + + req = (struct qseecom_client_send_service_ireq *) + req_buf; + smc_id = TZ_OS_RPMB_PROVISION_KEY_ID; + desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID; + desc.args[0] = req->key_type; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_RPMB_ERASE_COMMAND: { + smc_id = TZ_OS_RPMB_ERASE_ID; + desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: { + smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID; + desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_DIAG_FUSE_REQ_CMD: + case QSEOS_DIAG_FUSE_REQ_RSP_CMD: { + struct qseecom_client_send_fsm_diag_req *req; + + smc_id = TZ_SECBOOT_GET_FUSE_INFO; + desc.arginfo = TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID; + + req = (struct qseecom_client_send_fsm_diag_req *) req_buf; + desc.args[0] = req->req_ptr; + desc.args[1] = req->req_len; + desc.args[2] = req->rsp_ptr; + desc.args[3] = req->rsp_len; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + + } + case QSEOS_GENERATE_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_generate_ireq) - + sizeof(uint32_t)); + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_generate_ireq) - + sizeof(uint32_t))); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_OS_KS_GEN_KEY_ID; + desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID; + desc.args[0] = pa; + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + case QSEOS_DELETE_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_delete_ireq) - + sizeof(uint32_t)); + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_delete_ireq) - + sizeof(uint32_t))); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_OS_KS_DEL_KEY_ID; + desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID; + desc.args[0] = pa; + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + case QSEOS_SET_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_select_ireq) - + sizeof(uint32_t)); + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_select_ireq) - + sizeof(uint32_t))); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_OS_KS_SET_PIPE_KEY_ID; + desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID; + desc.args[0] = pa; + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + case QSEOS_UPDATE_KEY_USERINFO: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_userinfo_update_ireq) - + sizeof(uint32_t)); + char *tzbuf = __qseecom_alloc_tzbuf( + tzbuflen, &pa, &shm); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof + (struct qseecom_key_userinfo_update_ireq) - + sizeof(uint32_t))); + qtee_shmbridge_flush_shm_buf(&shm); + smc_id = TZ_OS_KS_UPDATE_KEY_ID; + desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID; + desc.args[0] = pa; + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = __qseecom_scm_call2_locked(smc_id, &desc); + __qseecom_free_tzbuf(&shm); + break; + } + case QSEOS_TEE_OPEN_SESSION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID; + desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_TEE_OPEN_SESSION_WHITELIST: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_TEE_INVOKE_COMMAND: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID; + desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_TEE_CLOSE_SESSION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID; + desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_TEE_REQUEST_CANCELLATION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID; + desc.arginfo = + TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: { + struct qseecom_continue_blocked_request_ireq *req = + (struct qseecom_continue_blocked_request_ireq *) + req_buf; + if (qseecom.smcinvoke_support) + smc_id = + TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID; + else + smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID; + desc.arginfo = + TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID; + desc.args[0] = req->app_or_session_id; + ret = __qseecom_scm_call2_locked(smc_id, &desc); + break; + } + default: { + pr_err("qseos_cmd_id %d is not supported.\n", + qseos_cmd_id); + ret = -EINVAL; + break; + } + } /*end of switch (qsee_cmd_id) */ + break; + } /*end of case SCM_SVC_TZSCHEDULER*/ + default: { + pr_err("svc_id 0x%x is not supported.\n", svc_id); + ret = -EINVAL; + break; + } + } /*end of switch svc_id */ + scm_resp->result = desc.ret[0]; + scm_resp->resp_type = desc.ret[1]; + scm_resp->data = desc.ret[2]; + pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n", + svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo); + pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n", + scm_resp->result, scm_resp->resp_type, scm_resp->data); + return ret; +} + +static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len) +{ + return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf); +} + +static struct qseecom_registered_listener_list *__qseecom_find_svc( + int32_t listener_id) +{ + struct qseecom_registered_listener_list *entry = NULL; + + list_for_each_entry(entry, + &qseecom.registered_listener_list_head, list) { + if (entry->svc.listener_id == listener_id) + break; + } + if ((entry != NULL) && (entry->svc.listener_id != listener_id)) { + pr_debug("Service id: %u is not found\n", listener_id); + return NULL; + } + + return entry; +} + +static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf, + enum qseecom_cache_ops cache_op) +{ + int ret = 0; + + if (!dmabuf) { + pr_err("dmabuf is NULL\n"); + ret = -EINVAL; + goto exit; + } + + switch (cache_op) { + case QSEECOM_CACHE_CLEAN: /* Doing CLEAN and INVALIDATE */ + dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + break; + case QSEECOM_CACHE_INVALIDATE: + dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE); + dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE); + break; + default: + pr_err("cache (%d) operation not supported\n", + cache_op); + ret = -EINVAL; + goto exit; + } +exit: + return ret; +} + +static int qseecom_destroy_bridge_callback(void *dtor_data) +{ + int ret = 0; + uint64_t handle = (uint64_t)dtor_data; + + pr_debug("to destroy shm bridge %lld\n", handle); + ret = qtee_shmbridge_deregister(handle); + if (ret) { + pr_err("failed to destroy shm bridge %lld\n", handle); + return ret; + } + return ret; +} + +static int qseecom_create_bridge_for_secbuf(int ion_fd, struct dma_buf *dmabuf, + struct sg_table *sgt) +{ + int ret = 0; + phys_addr_t phys; + size_t size = 0; + uint64_t handle = 0; + int tz_perm = PERM_READ|PERM_WRITE; + uint32_t *vmid_list; + uint32_t *perms_list; + uint32_t nelems = 0; + struct scatterlist *sg = sgt->sgl; + + if (!qtee_shmbridge_is_enabled()) + return 0; + + phys = sg_phys(sg); + size = sg->length; + + ret = qtee_shmbridge_query(phys); + if (ret) { + pr_debug("bridge exists\n"); + return 0; + } + + if (mem_buf_dma_buf_exclusive_owner(dmabuf) || (sgt->nents != 1)) { + pr_debug("just create bridge for contiguous secure buf\n"); + return 0; + } + + ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list, + (int **)&perms_list, (int *)&nelems); + if (ret) { + pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret); + return ret; + } + + ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems, + tz_perm, &handle); + + if (ret && ret != -EEXIST) { + pr_err("creation of shm bridge failed with ret: %d\n", + ret); + goto exit; + } + + pr_debug("created shm bridge %lld\n", handle); + mem_buf_dma_buf_set_destructor(dmabuf, qseecom_destroy_bridge_callback, + (void *)handle); + +exit: + kfree(perms_list); + kfree(vmid_list); + return ret; +} + +static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt, + struct dma_buf_attachment **attach, + struct dma_buf **dmabuf) +{ + struct dma_buf *new_dma_buf = NULL; + struct dma_buf_attachment *new_attach = NULL; + struct sg_table *new_sgt = NULL; + int ret = 0; + + new_dma_buf = dma_buf_get(ion_fd); + if (IS_ERR_OR_NULL(new_dma_buf)) { + pr_err("dma_buf_get() for ion_fd %d failed\n", ion_fd); + ret = -ENOMEM; + goto err; + } + + new_attach = dma_buf_attach(new_dma_buf, qseecom.dev); + if (IS_ERR_OR_NULL(new_attach)) { + pr_err("dma_buf_attach() for ion_fd %d failed\n", ion_fd); + ret = -ENOMEM; + goto err_put; + } + + new_sgt = dma_buf_map_attachment(new_attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(new_sgt)) { + ret = PTR_ERR(new_sgt); + pr_err("dma_buf_map_attachment for ion_fd %d failed ret = %d\n", + ion_fd, ret); + goto err_detach; + } + + ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt); + if (ret) { + pr_err("failed to create bridge for fd %d\n", ion_fd); + goto err_unmap_attachment; + } + *sgt = new_sgt; + *attach = new_attach; + *dmabuf = new_dma_buf; + return ret; + +err_unmap_attachment: + dma_buf_unmap_attachment(new_attach, new_sgt, DMA_BIDIRECTIONAL); +err_detach: + dma_buf_detach(new_dma_buf, new_attach); +err_put: + dma_buf_put(new_dma_buf); +err: + return ret; +} + +static void qseecom_dmabuf_unmap(struct sg_table *sgt, + struct dma_buf_attachment *attach, + struct dma_buf *dmabuf) +{ + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(dmabuf, attach); + dma_buf_put(dmabuf); +} + +/* convert ion_fd to phys_adds and virt_addr*/ +static int qseecom_vaddr_map(int ion_fd, + phys_addr_t *paddr, void **vaddr, + struct sg_table **sgt, + struct dma_buf_attachment **attach, + size_t *sb_length, struct dma_buf **dmabuf) +{ + struct dma_buf *new_dma_buf = NULL; + struct dma_buf_attachment *new_attach = NULL; + struct dma_buf_map new_dma_buf_map = {0}; + struct sg_table *new_sgt = NULL; + void *new_va = NULL; + int ret = 0; + + ret = qseecom_dmabuf_map(ion_fd, &new_sgt, &new_attach, &new_dma_buf); + if (ret) { + pr_err("qseecom_dmabuf_map for ion_fd %d failed ret = %d\n", + ion_fd, ret); + goto err; + } + ret = 0; + + *paddr = sg_dma_address(new_sgt->sgl); + *sb_length = new_sgt->sgl->length; + + dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL); + ret = dma_buf_vmap(new_dma_buf, &new_dma_buf_map); + new_va = ret ? NULL : new_dma_buf_map.vaddr; + if (!new_va) { + pr_err("dma_buf_vmap failed\n"); + ret = -ENOMEM; + goto err_unmap; + } + *dmabuf = new_dma_buf; + *attach = new_attach; + *sgt = new_sgt; + *vaddr = new_va; + return ret; + +err_unmap: + dma_buf_end_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL); + qseecom_dmabuf_unmap(new_sgt, new_attach, new_dma_buf); + MAKE_NULL(*sgt, *attach, *dmabuf); +err: + return ret; +} + +static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt, + struct dma_buf_attachment *attach, + struct dma_buf *dmabuf) +{ + struct dma_buf_map dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr); + + if (!dmabuf || !vaddr || !sgt || !attach) + return; + pr_err("SMITA trying to unmap vaddr"); + dma_buf_vunmap(dmabuf, &dmabufmap); + dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL); + qseecom_dmabuf_unmap(sgt, attach, dmabuf); +} + +static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, + struct qseecom_dev_handle *handle, + struct qseecom_register_listener_req *listener) +{ + int ret = 0; + struct qseecom_register_listener_ireq req; + struct qseecom_register_listener_64bit_ireq req_64bit; + struct qseecom_command_scm_resp resp; + void *cmd_buf = NULL; + size_t cmd_len; + + ret = qseecom_vaddr_map(listener->ifd_data_fd, + &svc->sb_phys, (void **)&svc->sb_virt, + &svc->sgt, &svc->attach, + &svc->sb_length, &svc->dmabuf); + if (ret) { + pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n", + listener->ifd_data_fd, svc->svc.listener_id, ret); + return -EINVAL; + } + + if (qseecom.qsee_version < QSEE_VERSION_40) { + req.qsee_cmd_id = QSEOS_REGISTER_LISTENER; + req.listener_id = svc->svc.listener_id; + req.sb_len = svc->sb_length; + req.sb_ptr = (uint32_t)svc->sb_phys; + cmd_buf = (void *)&req; + cmd_len = sizeof(struct qseecom_register_listener_ireq); + } else { + req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER; + req_64bit.listener_id = svc->svc.listener_id; + req_64bit.sb_len = svc->sb_length; + req_64bit.sb_ptr = (uint64_t)svc->sb_phys; + cmd_buf = (void *)&req_64bit; + cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq); + } + + resp.result = QSEOS_RESULT_INCOMPLETE; + + mutex_unlock(&listener_access_lock); + mutex_lock(&app_access_lock); + __qseecom_reentrancy_check_if_no_app_blocked( + TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID); + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + mutex_unlock(&app_access_lock); + mutex_lock(&listener_access_lock); + if (ret) { + pr_err("qseecom_scm_call failed with err: %d\n", ret); + ret = -EINVAL; + goto err; + } + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Error SB registration req: resp.result = %d\n", + resp.result); + ret = -EPERM; + goto err; + } + return 0; +err: + if (svc->dmabuf) { + qseecom_vaddr_unmap(svc->sb_virt, svc->sgt, svc->attach, + svc->dmabuf); + MAKE_NULL(svc->sgt, svc->attach, svc->dmabuf); + } + return ret; +} + +static int qseecom_register_listener(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + struct qseecom_register_listener_req rcvd_lstnr; + struct qseecom_registered_listener_list *new_entry; + struct qseecom_registered_listener_list *ptr_svc; + + ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + if (!access_ok((void __user *)rcvd_lstnr.virt_sb_base, + rcvd_lstnr.sb_size)) + return -EFAULT; + + ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id); + if (ptr_svc) { + if (!ptr_svc->unregister_pending) { + pr_err("Service %d is not unique\n", + rcvd_lstnr.listener_id); + data->released = true; + return -EBUSY; + } else { + /*wait until listener is unregistered*/ + pr_debug("register %d has to wait\n", + rcvd_lstnr.listener_id); + mutex_unlock(&listener_access_lock); + ret = wait_event_interruptible( + qseecom.register_lsnr_pending_wq, + list_empty( + &qseecom.unregister_lsnr_pending_list_head)); + if (ret) { + pr_err("interrupted register_pending_wq %d\n", + rcvd_lstnr.listener_id); + mutex_lock(&listener_access_lock); + return -ERESTARTSYS; + } + mutex_lock(&listener_access_lock); + } + } + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr)); + new_entry->rcv_req_flag = 0; + + new_entry->sglistinfo_ptr = + (struct sglist_info *)__qseecom_alloc_tzbuf( + sizeof(struct sglist_info) * MAX_ION_FD, + &new_entry->sglistinfo_shm.paddr, + &new_entry->sglistinfo_shm); + if (!new_entry->sglistinfo_ptr) { + kfree(new_entry); + return -ENOMEM; + } + new_entry->svc.listener_id = rcvd_lstnr.listener_id; + new_entry->sb_length = rcvd_lstnr.sb_size; + new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base; + if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) { + pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n", + rcvd_lstnr.listener_id, rcvd_lstnr.sb_size); + __qseecom_free_tzbuf(&new_entry->sglistinfo_shm); + kfree_sensitive(new_entry); + return -ENOMEM; + } + + init_waitqueue_head(&new_entry->rcv_req_wq); + init_waitqueue_head(&new_entry->listener_block_app_wq); + new_entry->send_resp_flag = 0; + new_entry->listener_in_use = false; + list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head); + + data->listener.id = rcvd_lstnr.listener_id; + pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id); + return ret; +} + +static int __qseecom_unregister_listener(struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *ptr_svc) +{ + int ret = 0; + struct qseecom_register_listener_ireq req; + struct qseecom_command_scm_resp resp; + + req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER; + req.listener_id = data->listener.id; + resp.result = QSEOS_RESULT_INCOMPLETE; + + mutex_unlock(&listener_access_lock); + mutex_lock(&app_access_lock); + __qseecom_reentrancy_check_if_no_app_blocked( + TZ_OS_DEREGISTER_LISTENER_ID); + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(req), &resp, sizeof(resp)); + mutex_unlock(&app_access_lock); + mutex_lock(&listener_access_lock); + if (ret) { + pr_err("scm_call() failed with err: %d (lstnr id=%d)\n", + ret, data->listener.id); + return ret; + } + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Failed resp.result=%d,(lstnr id=%d)\n", + resp.result, data->listener.id); + ret = -EPERM; + goto exit; + } + + while (atomic_read(&data->ioctl_count) > 1) { + if (wait_event_interruptible(data->abort_wq, + atomic_read(&data->ioctl_count) <= 1)) { + pr_err("Interrupted from abort\n"); + ret = -ERESTARTSYS; + } + } + +exit: + if (ptr_svc->dmabuf) { + qseecom_vaddr_unmap(ptr_svc->sb_virt, + ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf); + MAKE_NULL(ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf); + } + __qseecom_free_tzbuf(&ptr_svc->sglistinfo_shm); + list_del(&ptr_svc->list); + kfree_sensitive(ptr_svc); + + data->released = true; + pr_debug("Service %d is unregistered\n", data->listener.id); + return ret; +} + +static int qseecom_unregister_listener(struct qseecom_dev_handle *data) +{ + struct qseecom_registered_listener_list *ptr_svc = NULL; + struct qseecom_unregister_pending_list *entry = NULL; + + if (data->released) { + pr_err("Don't unregister lsnr %d\n", data->listener.id); + return -EINVAL; + } + + ptr_svc = __qseecom_find_svc(data->listener.id); + if (!ptr_svc) { + pr_err("Unregiser invalid listener ID %d\n", data->listener.id); + return -ENODATA; + } + /* stop CA thread waiting for listener response */ + ptr_svc->abort = 1; + wake_up_interruptible_all(&qseecom.send_resp_wq); + + /* stop listener thread waiting for listener request */ + data->abort = 1; + wake_up_all(&ptr_svc->rcv_req_wq); + + /* return directly if pending*/ + if (ptr_svc->unregister_pending) + return 0; + + /*add unregistration into pending list*/ + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + entry->data = data; + list_add_tail(&entry->list, + &qseecom.unregister_lsnr_pending_list_head); + ptr_svc->unregister_pending = true; + pr_debug("unregister %d pending\n", data->listener.id); + return 0; +} + +static void __qseecom_processing_pending_lsnr_unregister(void) +{ + struct qseecom_unregister_pending_list *entry = NULL; + struct qseecom_registered_listener_list *ptr_svc = NULL; + struct list_head *pos; + int ret = 0; + + mutex_lock(&listener_access_lock); + while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) { + pos = qseecom.unregister_lsnr_pending_list_head.next; + entry = list_entry(pos, + struct qseecom_unregister_pending_list, list); + if (entry && entry->data) { + pr_debug("process pending unregister %d\n", + entry->data->listener.id); + /* don't process the entry if qseecom_release is not called*/ + if (!entry->data->listener.release_called) { + list_del(pos); + list_add_tail(&entry->list, + &qseecom.unregister_lsnr_pending_list_head); + break; + } + ptr_svc = __qseecom_find_svc( + entry->data->listener.id); + if (ptr_svc) { + ret = __qseecom_unregister_listener( + entry->data, ptr_svc); + if (ret) { + pr_debug("unregister %d pending again\n", + entry->data->listener.id); + mutex_unlock(&listener_access_lock); + return; + } + } else + pr_err("invalid listener %d\n", + entry->data->listener.id); + __qseecom_free_tzbuf(&entry->data->sglistinfo_shm); + kfree_sensitive(entry->data); + } + list_del(pos); + kfree_sensitive(entry); + } + mutex_unlock(&listener_access_lock); + wake_up_interruptible(&qseecom.register_lsnr_pending_wq); +} + +static void __wakeup_unregister_listener_kthread(void) +{ + atomic_set(&qseecom.unregister_lsnr_kthread_state, + LSNR_UNREG_KT_WAKEUP); + wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq); +} + +static int __qseecom_unregister_listener_kthread_func(void *data) +{ + while (!kthread_should_stop()) { + wait_event_interruptible( + qseecom.unregister_lsnr_kthread_wq, + atomic_read(&qseecom.unregister_lsnr_kthread_state) + == LSNR_UNREG_KT_WAKEUP); + pr_debug("kthread to unregister listener is called %d\n", + atomic_read(&qseecom.unregister_lsnr_kthread_state)); + __qseecom_processing_pending_lsnr_unregister(); + atomic_set(&qseecom.unregister_lsnr_kthread_state, + LSNR_UNREG_KT_SLEEP); + } + pr_warn("kthread to unregister listener stopped\n"); + return 0; +} + +static int qseecom_bus_scale_update_request( + int client, int mode) +{ + pr_debug("client %d, mode %d\n", client, mode); + /*TODO: get ab/ib from device tree for different mode*/ + if (!mode) + return icc_set_bw(qseecom.icc_path, 0, 0); + else + return icc_set_bw(qseecom.icc_path, + qseecom.avg_bw, qseecom.peak_bw); +} + +static int __qseecom_set_msm_bus_request(uint32_t mode) +{ + int ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + if (qclk->ce_core_src_clk != NULL) { + if (mode == INACTIVE) { + __qseecom_disable_clk(CLK_QSEE); + } else { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + pr_err("CLK enabling failed (%d) MODE (%d)\n", + ret, mode); + } + } + + if ((!ret) && (qseecom.current_mode != mode)) { + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, mode); + if (ret) { + pr_err("Bandwidth req failed(%d) MODE (%d)\n", + ret, mode); + if (qclk->ce_core_src_clk != NULL) { + if (mode == INACTIVE) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + pr_err("CLK enable failed\n"); + } else + __qseecom_disable_clk(CLK_QSEE); + } + } + qseecom.current_mode = mode; + } + return ret; +} + +static void qseecom_bw_inactive_req_work(struct work_struct *work) +{ + mutex_lock(&app_access_lock); + mutex_lock(&qsee_bw_mutex); + if (qseecom.timer_running) + __qseecom_set_msm_bus_request(INACTIVE); + pr_debug("current_mode = %d, cumulative_mode = %d\n", + qseecom.current_mode, qseecom.cumulative_mode); + qseecom.timer_running = false; + mutex_unlock(&qsee_bw_mutex); + mutex_unlock(&app_access_lock); +} + +static void qseecom_scale_bus_bandwidth_timer_callback(struct timer_list *data) +{ + schedule_work(&qseecom.bw_inactive_req_ws); +} + +static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + int ret = 0; + + mutex_lock(&clk_access_lock); + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + if (qclk->clk_access_cnt > 0) { + qclk->clk_access_cnt--; + } else { + pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt); + ret = -EINVAL; + } + + mutex_unlock(&clk_access_lock); + return ret; +} + +static int qseecom_scale_bus_bandwidth_timer(uint32_t mode) +{ + int32_t ret = 0; + int32_t request_mode = INACTIVE; + + mutex_lock(&qsee_bw_mutex); + if (mode == 0) { + if (qseecom.cumulative_mode > MEDIUM) + request_mode = HIGH; + else + request_mode = qseecom.cumulative_mode; + } else { + request_mode = mode; + } + + ret = __qseecom_set_msm_bus_request(request_mode); + if (ret) { + pr_err("set msm bus request failed (%d),request_mode (%d)\n", + ret, request_mode); + goto err_scale_timer; + } + + if (qseecom.timer_running) { + ret = __qseecom_decrease_clk_ref_count(CLK_QSEE); + if (ret) { + pr_err("Failed to decrease clk ref count.\n"); + goto err_scale_timer; + } + del_timer_sync(&(qseecom.bw_scale_down_timer)); + qseecom.timer_running = false; + } +err_scale_timer: + mutex_unlock(&qsee_bw_mutex); + return ret; +} + + +static int qseecom_unregister_bus_bandwidth_needs( + struct qseecom_dev_handle *data) +{ + qseecom.cumulative_mode -= data->mode; + data->mode = INACTIVE; + + return 0; +} + +static int __qseecom_register_bus_bandwidth_needs( + struct qseecom_dev_handle *data, uint32_t request_mode) +{ + if (data->mode == INACTIVE) { + qseecom.cumulative_mode += request_mode; + data->mode = request_mode; + } else { + if (data->mode != request_mode) { + qseecom.cumulative_mode -= data->mode; + qseecom.cumulative_mode += request_mode; + data->mode = request_mode; + } + } + return 0; +} + +static int qseecom_perf_enable(struct qseecom_dev_handle *data) +{ + int ret = 0; + + ret = qsee_vote_for_clock(data, CLK_DFAB); + if (ret) { + pr_err("Failed to vote for DFAB clock with err %d\n", ret); + goto perf_enable_exit; + } + ret = qsee_vote_for_clock(data, CLK_SFPB); + if (ret) { + qsee_disable_clock_vote(data, CLK_DFAB); + pr_err("Failed to vote for SFPB clock with err %d\n", ret); + goto perf_enable_exit; + } + +perf_enable_exit: + return ret; +} + +static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data, + void __user *argp) +{ + int32_t ret = 0; + int32_t req_mode; + + if (qseecom.no_clock_support) + return 0; + + ret = copy_from_user(&req_mode, argp, sizeof(req_mode)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + if (req_mode > HIGH) { + pr_err("Invalid bandwidth mode (%d)\n", req_mode); + return -EINVAL; + } + + /* + * Register bus bandwidth needs if bus scaling feature is enabled; + * otherwise, qseecom enable/disable clocks for the client directly. + */ + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, req_mode); + mutex_unlock(&qsee_bw_mutex); + } else { + pr_debug("Bus scaling feature is NOT enabled\n"); + pr_debug("request bandwidth mode %d for the client\n", + req_mode); + if (req_mode != INACTIVE) { + ret = qseecom_perf_enable(data); + if (ret) + pr_err("Failed to vote for clock with err %d\n", + ret); + } else { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + } + return ret; +} + +static void __qseecom_add_bw_scale_down_timer(uint32_t duration) +{ + if (qseecom.no_clock_support) + return; + + mutex_lock(&qsee_bw_mutex); + qseecom.bw_scale_down_timer.expires = jiffies + + msecs_to_jiffies(duration); + mod_timer(&(qseecom.bw_scale_down_timer), + qseecom.bw_scale_down_timer.expires); + qseecom.timer_running = true; + mutex_unlock(&qsee_bw_mutex); +} + +static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data) +{ + if (!qseecom.support_bus_scaling) + qsee_disable_clock_vote(data, CLK_SFPB); + else + __qseecom_add_bw_scale_down_timer( + QSEECOM_LOAD_APP_CRYPTO_TIMEOUT); +} + +static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data) +{ + int ret = 0; + + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(MEDIUM); + if (ret) + pr_err("Failed to set bw MEDIUM.\n"); + } else { + ret = qsee_vote_for_clock(data, CLK_SFPB); + if (ret) + pr_err("Fail vote for clk SFPB ret %d\n", ret); + } + return ret; +} + +static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data, + void __user *argp) +{ + int32_t ret; + struct qseecom_set_sb_mem_param_req req; + size_t len; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&req, (void __user *)argp, sizeof(req))) + return -EFAULT; + + if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) || + (req.sb_len == 0)) { + pr_err("Invalid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n", + req.ifd_data_fd, req.sb_len, req.virt_sb_base); + return -EFAULT; + } + if (!access_ok((void __user *)req.virt_sb_base, + req.sb_len)) + return -EFAULT; + + ret = qseecom_vaddr_map(req.ifd_data_fd, &data->client.sb_phys, + (void **)&data->client.sb_virt, + &data->client.sgt, &data->client.attach, + &len, &data->client.dmabuf); + if (ret) { + pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n", + req.ifd_data_fd, data->client.app_id, ret); + return -EINVAL; + } + + if (len < req.sb_len) { + pr_err("Requested length (0x%x) is > allocated (%zu)\n", + req.sb_len, len); + ret = -EINVAL; + goto exit; + } + data->client.sb_length = req.sb_len; + data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base; + + return ret; +exit: + if (data->client.dmabuf) { + qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt, + data->client.attach, data->client.dmabuf); + MAKE_NULL(data->client.sgt, + data->client.attach, data->client.dmabuf); + } + return ret; +} + +static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *ptr_svc) +{ + int ret; + + ret = (qseecom.send_resp_flag != 0); + return ret || data->abort || ptr_svc->abort; +} + +static int __qseecom_reentrancy_listener_has_sent_rsp( + struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *ptr_svc) +{ + int ret; + + ret = (ptr_svc->send_resp_flag != 0); + return ret || data->abort || ptr_svc->abort; +} + +static void __qseecom_clean_listener_sglistinfo( + struct qseecom_registered_listener_list *ptr_svc) +{ + if (ptr_svc->sglist_cnt) { + memset(ptr_svc->sglistinfo_ptr, 0, + SGLISTINFO_TABLE_SIZE); + ptr_svc->sglist_cnt = 0; + } +} + +static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, + struct qseecom_command_scm_resp *resp) +{ + int ret = 0; + int rc = 0; + uint32_t lstnr; + struct qseecom_client_listener_data_irsp send_data_rsp = {0}; + struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit + = {0}; + struct qseecom_registered_listener_list *ptr_svc = NULL; + sigset_t new_sigset; + sigset_t old_sigset; + uint32_t status; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = NULL; + + qseecom.app_block_ref_cnt++; + while (resp->result == QSEOS_RESULT_INCOMPLETE) { + lstnr = resp->data; + /* + * Wake up blocking lsitener service with the lstnr id + */ + mutex_lock(&listener_access_lock); + list_for_each_entry(ptr_svc, + &qseecom.registered_listener_list_head, list) { + if (ptr_svc->svc.listener_id == lstnr) { + ptr_svc->listener_in_use = true; + ptr_svc->rcv_req_flag = 1; + ret = qseecom_dmabuf_cache_operations( + ptr_svc->dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + wake_up_interruptible(&ptr_svc->rcv_req_wq); + break; + } + } + + if (ptr_svc == NULL) { + pr_err("Listener Svc %d does not exist\n", lstnr); + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (!ptr_svc->dmabuf) { + pr_err("Client dmabuf is not initialized\n"); + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (ptr_svc->svc.listener_id != lstnr) { + pr_err("Service %d does not exist\n", + lstnr); + rc = -ERESTARTSYS; + ptr_svc = NULL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (ptr_svc->abort == 1) { + pr_debug("Service %d abort %d\n", + lstnr, ptr_svc->abort); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); + + /* initialize the new signal mask with all signals*/ + sigfillset(&new_sigset); + /* block all signals */ + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + mutex_unlock(&listener_access_lock); + do { + /* + * When reentrancy is not supported, check global + * send_resp_flag; otherwise, check this listener's + * send_resp_flag. + */ + if (!qseecom.qsee_reentrancy_support && + !wait_event_interruptible(qseecom.send_resp_wq, + __qseecom_listener_has_sent_rsp( + data, ptr_svc))) { + break; + } + + if (qseecom.qsee_reentrancy_support && + !wait_event_interruptible(qseecom.send_resp_wq, + __qseecom_reentrancy_listener_has_sent_rsp( + data, ptr_svc))) { + break; + } + } while (1); + mutex_lock(&listener_access_lock); + /* restore signal mask */ + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + if (data->abort || ptr_svc->abort) { + pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n", + data->client.app_id, lstnr, ret); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + } else { + status = QSEOS_RESULT_SUCCESS; + } +err_resp: + qseecom.send_resp_flag = 0; + if (ptr_svc) { + ptr_svc->send_resp_flag = 0; + table = ptr_svc->sglistinfo_ptr; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_rsp.listener_id = lstnr; + send_data_rsp.status = status; + if (table) { + send_data_rsp.sglistinfo_ptr = + (uint32_t)virt_to_phys(table); + send_data_rsp.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf( + &ptr_svc->sglistinfo_shm); + } + cmd_buf = (void *)&send_data_rsp; + cmd_len = sizeof(send_data_rsp); + } else { + send_data_rsp_64bit.listener_id = lstnr; + send_data_rsp_64bit.status = status; + if (table) { + send_data_rsp_64bit.sglistinfo_ptr = + virt_to_phys(table); + send_data_rsp_64bit.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf( + &ptr_svc->sglistinfo_shm); + } + cmd_buf = (void *)&send_data_rsp_64bit; + cmd_len = sizeof(send_data_rsp_64bit); + } + if (!qseecom.whitelist_support || table == NULL) + *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND; + else + *(uint32_t *)cmd_buf = + QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST; + + if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + goto exit; + } + + if (ptr_svc) { + ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) + goto exit; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + ptr_svc->listener_in_use = false; + __qseecom_clean_listener_sglistinfo(ptr_svc); + + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + + } else { + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + } + + pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n", + status, resp->result, data->client.app_id, lstnr); + if ((resp->result != QSEOS_RESULT_SUCCESS) && + (resp->result != QSEOS_RESULT_INCOMPLETE)) { + pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", + resp->result, data->client.app_id, lstnr); + ret = -EINVAL; + } +exit: + mutex_unlock(&listener_access_lock); + if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) + __qseecom_disable_clk(CLK_QSEE); + + } + qseecom.app_block_ref_cnt--; + wake_up_interruptible_all(&qseecom.app_block_wq); + if (rc) + return rc; + + return ret; +} + +static int __qseecom_process_reentrancy_blocked_on_listener( + struct qseecom_command_scm_resp *resp, + struct qseecom_registered_app_list *ptr_app, + struct qseecom_dev_handle *data) +{ + struct qseecom_registered_listener_list *list_ptr; + int ret = 0; + struct qseecom_continue_blocked_request_ireq ireq; + struct qseecom_command_scm_resp continue_resp; + unsigned int session_id; + sigset_t new_sigset; + sigset_t old_sigset; + unsigned long flags; + bool found_app = false; + struct qseecom_registered_app_list dummy_app_entry = { {NULL} }; + + if (!resp || !data) { + pr_err("invalid resp or data pointer\n"); + ret = -EINVAL; + goto exit; + } + + /* find app_id & img_name from list */ + if (!ptr_app) { + if (data->client.from_smcinvoke || data->client.from_loadapp) { + pr_debug("This request is from %s\n", + (data->client.from_smcinvoke ? "smcinvoke" : "load_app")); + ptr_app = &dummy_app_entry; + ptr_app->app_id = data->client.app_id; + } else { + spin_lock_irqsave(&qseecom.registered_app_list_lock, + flags); + list_for_each_entry(ptr_app, + &qseecom.registered_app_list_head, list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, + data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", + data->client.app_id, + (char *)data->client.app_name); + ret = -ENOENT; + goto exit; + } + } + } + + do { + session_id = resp->resp_type; + mutex_lock(&listener_access_lock); + list_ptr = __qseecom_find_svc(resp->data); + if (!list_ptr) { + pr_err("Invalid listener ID %d\n", resp->data); + ret = -ENODATA; + mutex_unlock(&listener_access_lock); + goto exit; + } + ptr_app->blocked_on_listener_id = resp->data; + + pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n", + resp->data, list_ptr->listener_in_use, + session_id, data->client.app_id); + + /* sleep until listener is available */ + sigfillset(&new_sigset); + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + do { + qseecom.app_block_ref_cnt++; + ptr_app->app_blocked = true; + mutex_unlock(&listener_access_lock); + mutex_unlock(&app_access_lock); + wait_event_interruptible( + list_ptr->listener_block_app_wq, + !list_ptr->listener_in_use); + mutex_lock(&app_access_lock); + mutex_lock(&listener_access_lock); + ptr_app->app_blocked = false; + qseecom.app_block_ref_cnt--; + } while (list_ptr->listener_in_use); + + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + + ptr_app->blocked_on_listener_id = 0; + pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n", + resp->data, session_id, data->client.app_id); + + /* notify TZ that listener is available */ + ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND; + + if (qseecom.smcinvoke_support) + ireq.app_or_session_id = session_id; + else + ireq.app_or_session_id = data->client.app_id; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + &ireq, sizeof(ireq), + &continue_resp, sizeof(continue_resp)); + + if (ret && qseecom.smcinvoke_support) { + /* retry with legacy cmd */ + pr_warn("falling back to legacy method\n"); + qseecom.smcinvoke_support = false; + ireq.app_or_session_id = data->client.app_id; + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + &ireq, sizeof(ireq), + &continue_resp, sizeof(continue_resp)); + qseecom.smcinvoke_support = true; + if (ret) { + pr_err("unblock app %d or session %d fail\n", + data->client.app_id, session_id); + mutex_unlock(&listener_access_lock); + goto exit; + } + } + mutex_unlock(&listener_access_lock); + resp->result = continue_resp.result; + resp->resp_type = continue_resp.resp_type; + resp->data = continue_resp.data; + pr_err("unblock resp = %d\n", resp->result); + } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER); + + if (resp->result != QSEOS_RESULT_INCOMPLETE) { + pr_err("Unexpected unblock resp %d\n", resp->result); + ret = -EINVAL; + } +exit: + return ret; +} + +static int __qseecom_reentrancy_process_incomplete_cmd( + struct qseecom_dev_handle *data, + struct qseecom_command_scm_resp *resp) +{ + int ret = 0; + int rc = 0; + uint32_t lstnr; + struct qseecom_client_listener_data_irsp send_data_rsp = {0}; + struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit + = {0}; + struct qseecom_registered_listener_list *ptr_svc = NULL; + sigset_t new_sigset; + sigset_t old_sigset; + uint32_t status; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = NULL; + + while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) { + lstnr = resp->data; + /* + * Wake up blocking lsitener service with the lstnr id + */ + mutex_lock(&listener_access_lock); + list_for_each_entry(ptr_svc, + &qseecom.registered_listener_list_head, list) { + if (ptr_svc->svc.listener_id == lstnr) { + ptr_svc->listener_in_use = true; + ptr_svc->rcv_req_flag = 1; + ret = qseecom_dmabuf_cache_operations( + ptr_svc->dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + wake_up_interruptible(&ptr_svc->rcv_req_wq); + break; + } + } + + if (ptr_svc == NULL) { + pr_err("Listener Svc %d does not exist\n", lstnr); + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (!ptr_svc->dmabuf) { + pr_err("Client dmabuf is not initialized\n"); + rc = -EINVAL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (ptr_svc->svc.listener_id != lstnr) { + pr_err("Service %d does not exist\n", + lstnr); + rc = -ERESTARTSYS; + ptr_svc = NULL; + table = NULL; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + if (ptr_svc->abort == 1) { + pr_debug("Service %d abort %d\n", + lstnr, ptr_svc->abort); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + goto err_resp; + } + + pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); + + /* initialize the new signal mask with all signals*/ + sigfillset(&new_sigset); + + /* block all signals */ + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + /* unlock mutex btw waking listener and sleep-wait */ + mutex_unlock(&listener_access_lock); + mutex_unlock(&app_access_lock); + do { + if (!wait_event_interruptible(qseecom.send_resp_wq, + __qseecom_reentrancy_listener_has_sent_rsp( + data, ptr_svc))) { + break; + } + } while (1); + /* lock mutex again after resp sent */ + mutex_lock(&app_access_lock); + mutex_lock(&listener_access_lock); + ptr_svc->send_resp_flag = 0; + qseecom.send_resp_flag = 0; + + /* restore signal mask */ + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + if (data->abort || ptr_svc->abort) { + pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n", + data->client.app_id, lstnr, ret); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + } else { + status = QSEOS_RESULT_SUCCESS; + } +err_resp: + if (ptr_svc) + table = ptr_svc->sglistinfo_ptr; + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_rsp.listener_id = lstnr; + send_data_rsp.status = status; + if (table) { + send_data_rsp.sglistinfo_ptr = + (uint32_t)virt_to_phys(table); + send_data_rsp.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf( + &ptr_svc->sglistinfo_shm); + } + cmd_buf = (void *)&send_data_rsp; + cmd_len = sizeof(send_data_rsp); + } else { + send_data_rsp_64bit.listener_id = lstnr; + send_data_rsp_64bit.status = status; + if (table) { + send_data_rsp_64bit.sglistinfo_ptr = + virt_to_phys(table); + send_data_rsp_64bit.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf( + &ptr_svc->sglistinfo_shm); + } + cmd_buf = (void *)&send_data_rsp_64bit; + cmd_len = sizeof(send_data_rsp_64bit); + } + if (!qseecom.whitelist_support || table == NULL) + *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND; + else + *(uint32_t *)cmd_buf = + QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST; + + if (lstnr == RPMB_SERVICE) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + goto exit; + } + + if (ptr_svc) { + ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) + goto exit; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + ptr_svc->listener_in_use = false; + __qseecom_clean_listener_sglistinfo(ptr_svc); + wake_up_interruptible(&ptr_svc->listener_block_app_wq); + + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + } else { + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + } + + switch (resp->result) { + case QSEOS_RESULT_BLOCKED_ON_LISTENER: + pr_warn("send lsr %d rsp, but app %d block on lsr %d\n", + lstnr, data->client.app_id, resp->data); + if (lstnr == resp->data) { + pr_err("lstnr %d should not be blocked!\n", + lstnr); + ret = -EINVAL; + goto exit; + } + mutex_unlock(&listener_access_lock); + ret = __qseecom_process_reentrancy_blocked_on_listener( + resp, NULL, data); + mutex_lock(&listener_access_lock); + if (ret) { + pr_err("failed to process App(%d) %s blocked on listener %d\n", + data->client.app_id, + data->client.app_name, resp->data); + goto exit; + } + case QSEOS_RESULT_SUCCESS: + case QSEOS_RESULT_INCOMPLETE: + break; + case QSEOS_RESULT_CBACK_REQUEST: + pr_warn("get cback req app_id = %d, resp->data = %d\n", + data->client.app_id, resp->data); + resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED; + break; + default: + pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", + resp->result, data->client.app_id, lstnr); + ret = -EINVAL; + goto exit; + } +exit: + mutex_unlock(&listener_access_lock); + if (lstnr == RPMB_SERVICE) + __qseecom_disable_clk(CLK_QSEE); + + } + if (rc) + return rc; + + return ret; +} + +/* + * QSEE doesn't support OS level cmds reentrancy until RE phase-3, + * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app. + * So, needs to first check if no app blocked before sending OS level scm call, + * then wait until all apps are unblocked. + */ +static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id) +{ + if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 && + qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 && + IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) { + /* thread sleep until this app unblocked */ + while (qseecom.app_block_ref_cnt > 0) { + mutex_unlock(&app_access_lock); + wait_event_interruptible(qseecom.app_block_wq, + (!qseecom.app_block_ref_cnt)); + mutex_lock(&app_access_lock); + } + } +} + +/* + * scm_call of send data will fail if this TA is blocked or there are more + * than one TA requesting listener services; So, first check to see if need + * to wait. + */ +static void __qseecom_reentrancy_check_if_this_app_blocked( + struct qseecom_registered_app_list *ptr_app) +{ + if (qseecom.qsee_reentrancy_support) { + ptr_app->check_block++; + while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) { + /* thread sleep until this app unblocked */ + mutex_unlock(&app_access_lock); + wait_event_interruptible(qseecom.app_block_wq, + (!ptr_app->app_blocked && + qseecom.app_block_ref_cnt <= 1)); + mutex_lock(&app_access_lock); + } + ptr_app->check_block--; + } +} + +static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req, + uint32_t *app_id) +{ + int32_t ret; + struct qseecom_command_scm_resp resp; + bool found_app = false; + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + + if (!app_id) { + pr_err("Null pointer to app_id\n"); + return -EINVAL; + } + *app_id = 0; + + /* check if app exists and has been registered locally */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list) { + if (!strcmp(entry->app_name, req.app_name)) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (found_app) { + pr_debug("Found app with id %d\n", entry->app_id); + *app_id = entry->app_id; + return 0; + } + + memset((void *)&resp, 0, sizeof(resp)); + + /* SCM_CALL to check if app_id for the mentioned app exists */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_check_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to check if app is already loaded failed\n"); + return -EINVAL; + } + + if (resp.result == QSEOS_RESULT_FAILURE) + return 0; + + switch (resp.resp_type) { + /*qsee returned listener type response */ + case QSEOS_LISTENER_ID: + pr_err("resp type is of listener type instead of app\n"); + return -EINVAL; + case QSEOS_APP_ID: + *app_id = resp.data; + return 0; + default: + pr_err("invalid resp type (%d) from qsee\n", + resp.resp_type); + return -ENODEV; + } +} + +static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) +{ + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + u32 app_id = 0; + struct qseecom_load_img_req load_img_req; + int32_t ret = 0; + phys_addr_t pa = 0; + void *vaddr = NULL; + struct dma_buf_attachment *attach = NULL; + struct dma_buf *dmabuf = NULL; + struct sg_table *sgt = NULL; + + size_t len; + struct qseecom_command_scm_resp resp; + struct qseecom_check_app_ireq req; + struct qseecom_load_app_ireq load_req; + struct qseecom_load_app_64bit_ireq load_req_64bit; + void *cmd_buf = NULL; + size_t cmd_len; + bool first_time = false; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&load_img_req, + (void __user *)argp, + sizeof(struct qseecom_load_img_req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + /* Check and load cmnlib */ + if (qseecom.qsee_version > QSEEE_VERSION_00) { + if (!qseecom.commonlib_loaded && + load_img_req.app_arch == ELFCLASS32) { + ret = qseecom_load_commonlib_image(data, "cmnlib"); + if (ret) { + pr_err("failed to load cmnlib\n"); + return -EIO; + } + qseecom.commonlib_loaded = true; + pr_debug("cmnlib is loaded\n"); + } + + if (!qseecom.commonlib64_loaded && + load_img_req.app_arch == ELFCLASS64) { + ret = qseecom_load_commonlib_image(data, "cmnlib64"); + if (ret) { + pr_err("failed to load cmnlib64\n"); + return -EIO; + } + qseecom.commonlib64_loaded = true; + pr_debug("cmnlib64 is loaded\n"); + } + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) + return ret; + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) + goto enable_clk_err; + + req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0'; + strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE); + + ret = __qseecom_check_app_exists(req, &app_id); + if (ret < 0) + goto checkapp_err; + + if (app_id) { + pr_debug("App id %d (%s) already exists\n", app_id, + (char *)(req.app_name)); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + if (entry->ref_cnt == U32_MAX) { + pr_err("App %d (%s) ref_cnt overflow\n", + app_id, req.app_name); + ret = -EINVAL; + goto loadapp_err; + } + entry->ref_cnt++; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + ret = 0; + } else { + first_time = true; + pr_warn("App (%s) does'nt exist, loading apps for first time\n", + (char *)(load_img_req.img_name)); + + ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, + &pa, &vaddr, &sgt, &attach, &len, &dmabuf); + if (ret) { + pr_err("Ion client could not retrieve the handle\n"); + ret = -ENOMEM; + goto loadapp_err; + } + + if (load_img_req.mdt_len > len || load_img_req.img_len > len) { + pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n", + len, load_img_req.mdt_len, + load_img_req.img_len); + ret = -EINVAL; + goto loadapp_err; + } + /* Populate the structure for sending scm call to load image */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req.mdt_len = load_img_req.mdt_len; + load_req.img_len = load_img_req.img_len; + strlcpy(load_req.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req_64bit.mdt_len = load_img_req.mdt_len; + load_req_64bit.img_len = load_img_req.img_len; + strlcpy(load_req_64bit.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto loadapp_err; + } + + /* SCM_CALL to load the app and get the app_id back */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, + cmd_len, &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load app failed\n"); + ret = -EINVAL; + goto loadapp_err; + } + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto loadapp_err; + } + + do { + if (resp.result == QSEOS_RESULT_FAILURE) { + pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n"); + ret = -EFAULT; + goto loadapp_err; + } + + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + /* TZ has created app_id, need to unload it */ + pr_err("incomp_cmd err %d, %d, unload %d %s\n", + ret, resp.result, resp.data, + load_img_req.img_name); + __qseecom_unload_app(data, resp.data); + ret = -EFAULT; + goto loadapp_err; + } + } + + if (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) { + pr_err("load app blocked on listener\n"); + data->client.app_id = resp.result; + data->client.from_loadapp = true; + ret = __qseecom_process_reentrancy_blocked_on_listener(&resp, + NULL, data); + if (ret) { + pr_err("load app fail proc block on listener,ret :%d\n", + ret); + ret = -EFAULT; + goto loadapp_err; + } + } + + } while ((resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) || + (resp.result == QSEOS_RESULT_INCOMPLETE)); + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("scm_call failed resp.result unknown, %d\n", + resp.result); + ret = -EFAULT; + goto loadapp_err; + } + + app_id = resp.data; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto loadapp_err; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + entry->app_arch = load_img_req.app_arch; + /* + * keymaster app may be first loaded as "keymaste" by qseecomd, + * and then used as "keymaster" on some targets. To avoid app + * name checking error, register "keymaster" into app_list and + * thread private data. + */ + if (!strcmp(load_img_req.img_name, "keymaste")) + strlcpy(entry->app_name, "keymaster", + MAX_APP_NAME_SIZE); + else + strlcpy(entry->app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + entry->check_block = 0; + + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_add_tail(&entry->list, &qseecom.registered_app_list_head); + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + + pr_warn("App with id %u (%s) now loaded\n", app_id, + (char *)(load_img_req.img_name)); + } + data->client.app_id = app_id; + data->client.app_arch = load_img_req.app_arch; + if (!strcmp(load_img_req.img_name, "keymaste")) + strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE); + else + strlcpy(data->client.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_img_req.app_id = app_id; + if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + if (first_time) { + spin_lock_irqsave( + &qseecom.registered_app_list_lock, flags); + list_del(&entry->list); + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + kfree_sensitive(entry); + } + } + +loadapp_err: + if (dmabuf) { + qseecom_vaddr_unmap(vaddr, sgt, attach, dmabuf); + MAKE_NULL(sgt, attach, dmabuf); + } +checkapp_err: + __qseecom_disable_clk_scale_down(data); +enable_clk_err: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + return ret; +} + +static int __qseecom_cleanup_app(struct qseecom_dev_handle *data) +{ + int ret = 0; /* Set unload app */ + + wake_up_all(&qseecom.send_resp_wq); + if (qseecom.qsee_reentrancy_support) + mutex_unlock(&app_access_lock); + while (atomic_read(&data->ioctl_count) > 1) { + if (wait_event_interruptible(data->abort_wq, + atomic_read(&data->ioctl_count) <= 1)) { + pr_err("Interrupted from abort\n"); + ret = -ERESTARTSYS; + break; + } + } + if (qseecom.qsee_reentrancy_support) + mutex_lock(&app_access_lock); + return ret; +} + +static int __qseecom_unload_app(struct qseecom_dev_handle *data, + uint32_t app_id) +{ + struct qseecom_unload_app_ireq req; + struct qseecom_command_scm_resp resp; + int ret = 0; + + /* Populate the structure for sending scm call to load image */ + req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND; + req.app_id = app_id; + /* SCM_CALL to unload the app */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_unload_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload app (id = %d) failed ret: %d\n", + app_id, ret); + return ret; + } + + do { + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + pr_warn("App (%d) is unloaded\n", app_id); + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n", + app_id, ret, resp.result, resp.data); + else + pr_warn("App (%d) is unloaded\n", app_id); + break; + case QSEOS_RESULT_FAILURE: + pr_err("app (%d) unload_failed!!\n", app_id); + ret = -EFAULT; + break; + case QSEOS_RESULT_BLOCKED_ON_LISTENER: + pr_err("unload app (%d) blocked on listener\n", app_id); + ret = __qseecom_process_reentrancy_blocked_on_listener(&resp, NULL, data); + if (ret) { + pr_err("unload app fail proc block on listener cmd,ret :%d\n", + ret); + ret = -EFAULT; + } + break; + default: + pr_err("unload app %d get unknown resp.result %d\n", + app_id, resp.result); + ret = -EFAULT; + break; + } + } while ((resp.result == QSEOS_RESULT_INCOMPLETE) || + (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER)); + return ret; +} +static int qseecom_unload_app(struct qseecom_dev_handle *data, + bool app_crash) +{ + unsigned long flags; + int ret = 0; + struct qseecom_registered_app_list *ptr_app = NULL; + bool found_app = false; + + if (!data) { + pr_err("Invalid/uninitialized device handle\n"); + return -EINVAL; + } + + pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id, + data->client.app_name, app_crash); + + if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) { + pr_debug("Do not unload keymaster app from tz\n"); + goto unload_exit; + } + + ret = __qseecom_cleanup_app(data); + if (ret && !app_crash) { + pr_err("cleanup app failed, pending ioctl:%d\n", data->ioctl_count); + return ret; + } + + __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID); + + /* ignore app_id 0, it happens when close qseecom_fd if load app fail*/ + if (!data->client.app_id) + goto unload_exit; + + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + pr_debug("unload app %d (%s), ref_cnt %d\n", + ptr_app->app_id, ptr_app->app_name, + ptr_app->ref_cnt); + ptr_app->ref_cnt--; + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + if (!found_app) { + pr_err("Cannot find app with id = %d (%s)\n", + data->client.app_id, data->client.app_name); + ret = -EINVAL; + goto unload_exit; + } + + if (!ptr_app->ref_cnt) { + ret = __qseecom_unload_app(data, data->client.app_id); + if (ret == -EBUSY) { + /* + * If unload failed due to EBUSY, don't free mem + * just restore app ref_cnt and return -EBUSY + */ + pr_warn("unload ta %d(%s) EBUSY\n", + data->client.app_id, data->client.app_name); + ptr_app->ref_cnt++; + return ret; + } + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_del(&ptr_app->list); + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + kfree_sensitive(ptr_app); + } + +unload_exit: + if (data->client.dmabuf) { + qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt, + data->client.attach, data->client.dmabuf); + MAKE_NULL(data->client.sgt, + data->client.attach, data->client.dmabuf); + } + data->released = true; + return ret; +} + +static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data) +{ + struct qseecom_unload_app_pending_list *entry = NULL; + + pr_debug("prepare to unload app(%d)(%s), pending %d\n", + data->client.app_id, data->client.app_name, + data->client.unload_pending); + if (data->client.unload_pending) + return 0; + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + entry->data = data; + list_add_tail(&entry->list, + &qseecom.unload_app_pending_list_head); + data->client.unload_pending = true; + pr_debug("unload ta %d pending\n", data->client.app_id); + return 0; +} + +static void __wakeup_unload_app_kthread(void) +{ + atomic_set(&qseecom.unload_app_kthread_state, + UNLOAD_APP_KT_WAKEUP); + wake_up_interruptible(&qseecom.unload_app_kthread_wq); +} + +static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name) +{ + struct qseecom_unload_app_pending_list *entry = NULL; + bool found = false; + + mutex_lock(&unload_app_pending_list_lock); + list_for_each_entry(entry, &qseecom.unload_app_pending_list_head, + list) { + if ((entry->data->client.app_id == app_id) && + (!strcmp(entry->data->client.app_name, app_name))) { + found = true; + break; + } + } + mutex_unlock(&unload_app_pending_list_lock); + return found; +} + +static void __qseecom_processing_pending_unload_app(void) +{ + struct qseecom_unload_app_pending_list *entry = NULL; + struct list_head *pos; + int ret = 0; + + mutex_lock(&unload_app_pending_list_lock); + while (!list_empty(&qseecom.unload_app_pending_list_head)) { + pos = qseecom.unload_app_pending_list_head.next; + entry = list_entry(pos, + struct qseecom_unload_app_pending_list, list); + if (entry && entry->data) { + pr_debug("process pending unload app %d (%s)\n", + entry->data->client.app_id, + entry->data->client.app_name); + mutex_unlock(&unload_app_pending_list_lock); + mutex_lock(&app_access_lock); + ret = qseecom_unload_app(entry->data, true); + if (ret) + pr_err("unload app %d pending failed %d\n", + entry->data->client.app_id, ret); + mutex_unlock(&app_access_lock); + mutex_lock(&unload_app_pending_list_lock); + __qseecom_free_tzbuf(&entry->data->sglistinfo_shm); + kfree_sensitive(entry->data); + } + list_del(pos); + kfree_sensitive(entry); + } + mutex_unlock(&unload_app_pending_list_lock); +} + +static int __qseecom_unload_app_kthread_func(void *data) +{ + while (!kthread_should_stop()) { + wait_event_interruptible( + qseecom.unload_app_kthread_wq, + atomic_read(&qseecom.unload_app_kthread_state) + == UNLOAD_APP_KT_WAKEUP); + pr_debug("kthread to unload app is called, state %d\n", + atomic_read(&qseecom.unload_app_kthread_state)); + __qseecom_processing_pending_unload_app(); + atomic_set(&qseecom.unload_app_kthread_state, + UNLOAD_APP_KT_SLEEP); + } + pr_warn("kthread to unload app stopped\n"); + return 0; +} + +static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data, + unsigned long virt) +{ + return data->client.sb_phys + (virt - data->client.user_virt_sb_base); +} + +static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data, + unsigned long virt) +{ + return (uintptr_t)data->client.sb_virt + + (virt - data->client.user_virt_sb_base); +} + +static int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr, + struct qseecom_send_svc_cmd_req *req_ptr, + struct qseecom_client_send_service_ireq *send_svc_ireq_ptr) +{ + int ret = 0; + void *req_buf = NULL; + + if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) { + pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n", + req_ptr, send_svc_ireq_ptr); + return -EINVAL; + } + + /* Clients need to ensure req_buf is at base offset of shared buffer */ + if ((uintptr_t)req_ptr->cmd_req_buf != + data_ptr->client.user_virt_sb_base) { + pr_err("cmd buf not pointing to base offset of shared buffer\n"); + return -EINVAL; + } + + if (data_ptr->client.sb_length < + sizeof(struct qseecom_rpmb_provision_key)) { + pr_err("shared buffer is too small to hold key type\n"); + return -EINVAL; + } + req_buf = data_ptr->client.sb_virt; + + send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id; + send_svc_ireq_ptr->key_type = + ((struct qseecom_rpmb_provision_key *)req_buf)->key_type; + send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len; + send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->resp_buf)); + send_svc_ireq_ptr->rsp_len = req_ptr->resp_len; + + return ret; +} + +static int __qseecom_process_fsm_key_svc_cmd( + struct qseecom_dev_handle *data_ptr, + struct qseecom_send_svc_cmd_req *req_ptr, + struct qseecom_client_send_fsm_diag_req *send_svc_ireq_ptr) +{ + int ret = 0; + uint32_t reqd_len_sb_in = 0; + + if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) { + pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n", + req_ptr, send_svc_ireq_ptr); + return -EINVAL; + } + + reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len; + if (reqd_len_sb_in > data_ptr->client.sb_length) { + pr_err("Not enough memory to fit cmd_buf and resp_buf.\n"); + pr_err("Required: %u, Available: %zu\n", + reqd_len_sb_in, data_ptr->client.sb_length); + return -ENOMEM; + } + send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id; + send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len; + send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->resp_buf)); + send_svc_ireq_ptr->rsp_len = req_ptr->resp_len; + + send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->cmd_req_buf)); + + + return ret; +} + +static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_svc_cmd_req *req) +{ + if (!req || !req->resp_buf || !req->cmd_req_buf) { + pr_err("req or cmd buffer or response buffer is null\n"); + return -EINVAL; + } + + if (!data || !data->client.sb_virt) { + pr_err("Client or client buf is not initialized\n"); + return -EINVAL; + } + + if (data->client.sb_virt == NULL) { + pr_err("sb_virt null\n"); + return -EINVAL; + } + + if (data->client.user_virt_sb_base == 0) { + pr_err("user_virt_sb_base is null\n"); + return -EINVAL; + } + + if (data->client.sb_length == 0) { + pr_err("sb_length is 0\n"); + return -EINVAL; + } + + if (((uintptr_t)req->cmd_req_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->cmd_req_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + if (((uintptr_t)req->resp_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + if ((req->cmd_req_len == 0) || (req->resp_len == 0) || + (req->cmd_req_len > data->client.sb_length) || + (req->resp_len > data->client.sb_length)) { + pr_err("cmd buf length or response buf length not valid\n"); + return -EINVAL; + } + if (req->cmd_req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->cmd_req_len + req->resp_len), + data->client.sb_length); + return -ENOMEM; + } + if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) { + pr_err("Integer overflow in req_len & cmd_req_buf\n"); + return -EINVAL; + } + if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_buf + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +static int qseecom_send_service_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + struct qseecom_client_send_service_ireq send_svc_ireq; + struct qseecom_client_send_fsm_diag_req send_fsm_diag_svc_ireq; + struct qseecom_command_scm_resp resp; + struct qseecom_send_svc_cmd_req req; + void *send_req_ptr; + size_t req_buf_size; + + /*struct qseecom_command_scm_resp resp;*/ + + if (copy_from_user(&req, + (void __user *)argp, + sizeof(req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + if (__validate_send_service_cmd_inputs(data, &req)) + return -EINVAL; + + data->type = QSEECOM_SECURE_SERVICE; + + switch (req.cmd_id) { + case QSEOS_RPMB_PROVISION_KEY_COMMAND: + case QSEOS_RPMB_ERASE_COMMAND: + case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: + send_req_ptr = &send_svc_ireq; + req_buf_size = sizeof(send_svc_ireq); + if (__qseecom_process_rpmb_svc_cmd(data, &req, + send_req_ptr)) + return -EINVAL; + break; + case QSEOS_FSM_LTEOTA_REQ_CMD: + case QSEOS_FSM_LTEOTA_REQ_RSP_CMD: + case QSEOS_FSM_IKE_REQ_CMD: + case QSEOS_FSM_IKE_REQ_RSP_CMD: + case QSEOS_FSM_OEM_FUSE_WRITE_ROW: + case QSEOS_FSM_OEM_FUSE_READ_ROW: + case QSEOS_FSM_ENCFS_REQ_CMD: + case QSEOS_FSM_ENCFS_REQ_RSP_CMD: + case QSEOS_DIAG_FUSE_REQ_CMD: + case QSEOS_DIAG_FUSE_REQ_RSP_CMD: + + send_req_ptr = &send_fsm_diag_svc_ireq; + req_buf_size = sizeof(send_fsm_diag_svc_ireq); + if (__qseecom_process_fsm_key_svc_cmd(data, &req, + send_req_ptr)) + return -EINVAL; + break; + default: + pr_err("Unsupported cmd_id %d\n", req.cmd_id); + return -EINVAL; + } + + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(HIGH); + if (ret) { + pr_err("Fail to set bw HIGH\n"); + return ret; + } + } else { + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clocks with err %d\n", ret); + return ret; + } + } + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + (const void *)send_req_ptr, + req_buf_size, &resp, sizeof(resp)); + + if (ret) { + pr_err("qseecom_scm_call failed with err: %d\n", ret); + goto exit; + } + + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + pr_debug("qseos_result_incomplete\n"); + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd fail with result: %d\n", + resp.result); + } + if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) { + pr_warn("RPMB key status is 0x%x\n", resp.result); + if (put_user(resp.result, + (uint32_t __user *)req.resp_buf)) { + ret = -EINVAL; + goto exit; + } + ret = 0; + } + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed with resp.result: %d\n", resp.result); + ret = -EINVAL; + break; + default: + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + break; + } + +exit: + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } else { + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + } + return ret; +} + +static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_cmd_req *req) + +{ + if (!data || !data->client.sb_virt) { + pr_err("Client or client buf is not initialized\n"); + return -EINVAL; + } + if (((req->resp_buf == NULL) && (req->resp_len != 0)) || + (req->cmd_req_buf == NULL)) { + pr_err("cmd buffer or response buffer is null\n"); + return -EINVAL; + } + if (((uintptr_t)req->cmd_req_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->cmd_req_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + if (((uintptr_t)req->resp_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + if ((req->cmd_req_len == 0) || + (req->cmd_req_len > data->client.sb_length) || + (req->resp_len > data->client.sb_length)) { + pr_err("cmd buf length or response buf length not valid\n"); + return -EINVAL; + } + if (req->cmd_req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->cmd_req_len + req->resp_len), + data->client.sb_length); + return -ENOMEM; + } + if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) { + pr_err("Integer overflow in req_len & cmd_req_buf\n"); + return -EINVAL; + } + if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_buf + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp, + struct qseecom_registered_app_list *ptr_app, + struct qseecom_dev_handle *data) +{ + int ret = 0; + + switch (resp->result) { + case QSEOS_RESULT_BLOCKED_ON_LISTENER: + pr_warn("App(%d) %s is blocked on listener %d\n", + data->client.app_id, data->client.app_name, + resp->data); + ret = __qseecom_process_reentrancy_blocked_on_listener( + resp, ptr_app, data); + if (ret) { + pr_err("failed to process App(%d) %s is blocked on listener %d\n", + data->client.app_id, data->client.app_name, resp->data); + return ret; + } + /* fall through to process incomplete request */ + case QSEOS_RESULT_INCOMPLETE: + qseecom.app_block_ref_cnt++; + ptr_app->app_blocked = true; + ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp); + ptr_app->app_blocked = false; + qseecom.app_block_ref_cnt--; + wake_up_interruptible_all(&qseecom.app_block_wq); + if (ret) + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + return ret; + case QSEOS_RESULT_SUCCESS: + return ret; + default: + pr_err("Response result %d not supported\n", + resp->result); + return -EINVAL; + } +} + +static int __qseecom_send_cmd(struct qseecom_dev_handle *data, + struct qseecom_send_cmd_req *req, + bool is_phys_adr) +{ + int ret = 0; + u32 reqd_len_sb_in = 0; + struct qseecom_client_send_data_ireq send_data_req = {0}; + struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0}; + struct qseecom_command_scm_resp resp; + unsigned long flags; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + void *cmd_buf = NULL; + size_t cmd_len; + + reqd_len_sb_in = req->cmd_req_len + req->resp_len; + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + + if (__qseecom_find_pending_unload_app(data->client.app_id, + data->client.app_name)) { + pr_err("app %d (%s) unload is pending\n", + data->client.app_id, data->client.app_name); + return -ENOENT; + } + + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_req.app_id = data->client.app_id; + + if (!is_phys_adr) { + send_data_req.req_ptr = + (uint32_t)(__qseecom_uvirt_to_kphys + (data, (uintptr_t)req->cmd_req_buf)); + send_data_req.rsp_ptr = + (uint32_t)(__qseecom_uvirt_to_kphys( + data, (uintptr_t)req->resp_buf)); + } else { + send_data_req.req_ptr = (uint32_t)(uintptr_t)req->cmd_req_buf; + send_data_req.rsp_ptr = (uint32_t)(uintptr_t)req->resp_buf; + } + + send_data_req.req_len = req->cmd_req_len; + send_data_req.rsp_len = req->resp_len; + send_data_req.sglistinfo_ptr = + (uint32_t)data->sglistinfo_shm.paddr; + send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + cmd_buf = (void *)&send_data_req; + cmd_len = sizeof(struct qseecom_client_send_data_ireq); + } else { + send_data_req_64bit.app_id = data->client.app_id; + + if (!is_phys_adr) { + send_data_req_64bit.req_ptr = + __qseecom_uvirt_to_kphys(data, + (uintptr_t)req->cmd_req_buf); + send_data_req_64bit.rsp_ptr = + __qseecom_uvirt_to_kphys(data, + (uintptr_t)req->resp_buf); + } else { + send_data_req_64bit.req_ptr = + (uintptr_t)req->cmd_req_buf; + send_data_req_64bit.rsp_ptr = + (uintptr_t)req->resp_buf; + } + send_data_req_64bit.req_len = req->cmd_req_len; + send_data_req_64bit.rsp_len = req->resp_len; + /* check if 32bit app's phys_addr region is under 4GB.*/ + if ((data->client.app_arch == ELFCLASS32) && + ((send_data_req_64bit.req_ptr >= + PHY_ADDR_4G - send_data_req_64bit.req_len) || + (send_data_req_64bit.rsp_ptr >= + PHY_ADDR_4G - send_data_req_64bit.rsp_len))){ + pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n", + data->client.app_name, + send_data_req_64bit.req_ptr, + send_data_req_64bit.req_len, + send_data_req_64bit.rsp_ptr, + send_data_req_64bit.rsp_len); + return -EFAULT; + } + send_data_req_64bit.sglistinfo_ptr = + (uint64_t)data->sglistinfo_shm.paddr; + send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + cmd_buf = (void *)&send_data_req_64bit; + cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq); + } + + if (!qseecom.whitelist_support || data->use_legacy_cmd) + *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND; + else + *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST; + + if (data->client.dmabuf) { + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + goto exit; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + goto exit; + } + } + } + + if (data->client.dmabuf) { + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit; + } + } +exit: + return ret; +} + +static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp) +{ + int ret = 0; + struct qseecom_send_cmd_req req; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (__validate_send_cmd_inputs(data, &req)) + return -EINVAL; + + ret = __qseecom_send_cmd(data, &req, false); + + return ret; +} + +static int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req, + struct qseecom_send_modfd_listener_resp *lstnr_resp, + struct qseecom_dev_handle *data, int i, size_t size) +{ + char *curr_field = NULL; + char *temp_field = NULL; + int j = 0; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + if ((req->cmd_req_len < size) || + (req->ifd_data[i].cmd_buf_offset > + req->cmd_req_len - size)) { + pr_err("Invalid offset (req len) 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + + curr_field = (char *) (req->cmd_req_buf + + req->ifd_data[i].cmd_buf_offset); + for (j = 0; j < MAX_ION_FD; j++) { + if ((req->ifd_data[j].fd > 0) && i != j) { + temp_field = (char *) (req->cmd_req_buf + + req->ifd_data[j].cmd_buf_offset); + if (temp_field >= curr_field && temp_field < + (curr_field + size)) { + pr_err("Invalid field offset 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + } + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + if ((lstnr_resp->resp_len < size) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + lstnr_resp->resp_len - size)) { + pr_err("Invalid offset (lstnr resp len) 0x%x\n", + lstnr_resp->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + + curr_field = (char *) (lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[i].cmd_buf_offset); + for (j = 0; j < MAX_ION_FD; j++) { + if ((lstnr_resp->ifd_data[j].fd > 0) && i != j) { + temp_field = (char *) lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[j].cmd_buf_offset; + if (temp_field >= curr_field && temp_field < + (curr_field + size)) { + pr_err("Invalid lstnr field offset 0x%x\n", + lstnr_resp->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + } + } + return 0; +} + +static int __qseecom_update_cmd_buf(void *msg, bool cleanup, + struct qseecom_dev_handle *data) +{ + char *field; + int ret = 0; + int i = 0; + uint32_t len = 0; + struct scatterlist *sg; + struct qseecom_send_modfd_cmd_req *req = NULL; + struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL; + struct qseecom_registered_listener_list *this_lstnr = NULL; + uint32_t offset; + struct sg_table *sg_ptr = NULL; + int ion_fd = -1; + struct dma_buf *dmabuf = NULL; + struct dma_buf_attachment *attach = NULL; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) + return -EFAULT; + + if (msg == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + if (data->type == QSEECOM_LISTENER_SERVICE) { + lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg; + this_lstnr = __qseecom_find_svc(data->listener.id); + if (IS_ERR_OR_NULL(this_lstnr)) { + pr_err("Invalid listener ID\n"); + return -ENOMEM; + } + } else { + req = (struct qseecom_send_modfd_cmd_req *)msg; + } + + for (i = 0; i < MAX_ION_FD; i++) { + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + ion_fd = req->ifd_data[i].fd; + field = (char *) req->cmd_req_buf + + req->ifd_data[i].cmd_buf_offset; + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + ion_fd = lstnr_resp->ifd_data[i].fd; + field = lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[i].cmd_buf_offset; + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf); + if (ret) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + if (sg_ptr->nents == 0) { + pr_err("Num of scattered entries is 0\n"); + goto err; + } + if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { + pr_err("Num of scattered entries\n"); + pr_err(" (%d) is greater than max supported %d\n", + sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); + goto err; + } + sg = sg_ptr->sgl; + if (sg_ptr->nents == 1) { + uint32_t *update; + + if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint32_t))) + goto err; + + if ((data->type == QSEECOM_CLIENT_APP && + (data->client.app_arch == ELFCLASS32 || + data->client.app_arch == ELFCLASS64)) || + (data->type == QSEECOM_LISTENER_SERVICE)) { + /* + * Check if sg list phy add region is under 4GB + */ + if ((qseecom.qsee_version >= QSEE_VERSION_40) && + (!cleanup) && + ((uint64_t)sg_dma_address(sg_ptr->sgl) + >= PHY_ADDR_4G - sg->length)) { + pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n", + data->client.app_name, + &(sg_dma_address(sg_ptr->sgl)), + sg->length); + goto err; + } + update = (uint32_t *) field; + *update = cleanup ? 0 : + (uint32_t)sg_dma_address(sg_ptr->sgl); + } else { + pr_err("QSEE app arch %u is not supported\n", + data->client.app_arch); + goto err; + } + len += (uint32_t)sg->length; + } else { + struct qseecom_sg_entry *update; + int j = 0; + + if (__boundary_checks_offset(req, lstnr_resp, data, i, + (SG_ENTRY_SZ * sg_ptr->nents))) + goto err; + + if ((data->type == QSEECOM_CLIENT_APP && + (data->client.app_arch == ELFCLASS32 || + data->client.app_arch == ELFCLASS64)) || + (data->type == QSEECOM_LISTENER_SERVICE)) { + update = (struct qseecom_sg_entry *)field; + for (j = 0; j < sg_ptr->nents; j++) { + /* + * Check if sg list PA is under 4GB + */ + if ((qseecom.qsee_version >= + QSEE_VERSION_40) && + (!cleanup) && + ((uint64_t)(sg_dma_address(sg)) + >= PHY_ADDR_4G - sg->length)) { + pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n", + data->client.app_name, + &(sg_dma_address(sg)), + sg->length); + goto err; + } + update->phys_addr = cleanup ? 0 : + (uint32_t)sg_dma_address(sg); + update->len = cleanup ? 0 : sg->length; + update++; + len += sg->length; + sg = sg_next(sg); + } + } else { + pr_err("QSEE app arch %u is not supported\n", + data->client.app_arch); + goto err; + } + } + + if (cleanup) { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + if (data->type == QSEECOM_CLIENT_APP) { + offset = req->ifd_data[i].cmd_buf_offset; + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } else { + offset = (lstnr_resp->ifd_data[i].cmd_buf_offset + + (uintptr_t)lstnr_resp->resp_buf_ptr - + (uintptr_t)this_lstnr->sb_virt); + this_lstnr->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, offset); + this_lstnr->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + this_lstnr->sglist_cnt = i + 1; + } + } + /* Deallocate the kbuf */ + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + sg_ptr = NULL; + dmabuf = NULL; + attach = NULL; + } + return ret; +err: + if (!IS_ERR_OR_NULL(sg_ptr)) { + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + MAKE_NULL(sg_ptr, attach, dmabuf); + } + return -ENOMEM; +} + +static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data, + char *field, uint32_t fd_idx, struct sg_table *sg_ptr) +{ + struct scatterlist *sg = sg_ptr->sgl; + struct qseecom_sg_entry_64bit *sg_entry; + struct qseecom_sg_list_buf_hdr_64bit *buf_hdr; + void *buf; + uint i; + size_t size; + dma_addr_t coh_pmem; + + if (fd_idx >= MAX_ION_FD) { + pr_err("fd_idx [%d] is invalid\n", fd_idx); + return -ENOMEM; + } + buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field; + memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT); + /* Allocate a contiguous kernel buffer */ + size = sg_ptr->nents * SG_ENTRY_SZ_64BIT; + size = (size + PAGE_SIZE) & PAGE_MASK; + buf = dma_alloc_coherent(qseecom.dev, + size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + /* update qseecom_sg_list_buf_hdr_64bit */ + buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2; + buf_hdr->new_buf_phys_addr = coh_pmem; + buf_hdr->nents_total = sg_ptr->nents; + /* save the left sg entries into new allocated buf */ + sg_entry = (struct qseecom_sg_entry_64bit *)buf; + for (i = 0; i < sg_ptr->nents; i++) { + sg_entry->phys_addr = (uint64_t)sg_dma_address(sg); + sg_entry->len = sg->length; + sg_entry++; + sg = sg_next(sg); + } + + data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; + data->client.sec_buf_fd[fd_idx].vbase = buf; + data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; + data->client.sec_buf_fd[fd_idx].size = size; + + return 0; +} + +static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, + struct qseecom_dev_handle *data) +{ + char *field; + int ret = 0; + int i = 0; + uint32_t len = 0; + struct scatterlist *sg; + struct qseecom_send_modfd_cmd_req *req = NULL; + struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL; + struct qseecom_registered_listener_list *this_lstnr = NULL; + uint32_t offset; + struct sg_table *sg_ptr; + int ion_fd = -1; + struct dma_buf *dmabuf = NULL; + struct dma_buf_attachment *attach = NULL; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) + return -EFAULT; + + if (msg == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + if (data->type == QSEECOM_LISTENER_SERVICE) { + lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg; + this_lstnr = __qseecom_find_svc(data->listener.id); + if (IS_ERR_OR_NULL(this_lstnr)) { + pr_err("Invalid listener ID\n"); + return -ENOMEM; + } + } else { + req = (struct qseecom_send_modfd_cmd_req *)msg; + } + + for (i = 0; i < MAX_ION_FD; i++) { + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + ion_fd = req->ifd_data[i].fd; + field = (char *) req->cmd_req_buf + + req->ifd_data[i].cmd_buf_offset; + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + ion_fd = lstnr_resp->ifd_data[i].fd; + field = lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[i].cmd_buf_offset; + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf); + if (ret) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + if (sg_ptr->nents == 0) { + pr_err("Num of scattered entries is 0\n"); + goto err; + } + if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { + pr_warn("Num of scattered entries\n"); + pr_warn(" (%d) is greater than %d\n", + sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); + if (cleanup) { + if (data->client.sec_buf_fd[i].is_sec_buf_fd && + data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.dev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + } else { + ret = __qseecom_allocate_sg_list_buffer(data, + field, i, sg_ptr); + if (ret) { + pr_err("Failed to allocate sg list buffer\n"); + goto err; + } + } + len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT; + sg = sg_ptr->sgl; + goto cleanup; + } + sg = sg_ptr->sgl; + if (sg_ptr->nents == 1) { + uint64_t *update_64bit; + + if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint64_t))) + goto err; + + /* 64bit app uses 64bit address */ + update_64bit = (uint64_t *) field; + *update_64bit = cleanup ? 0 : + (uint64_t)sg_dma_address(sg_ptr->sgl); + len += (uint32_t)sg->length; + } else { + struct qseecom_sg_entry_64bit *update_64bit; + int j = 0; + + if (__boundary_checks_offset(req, lstnr_resp, data, i, + (SG_ENTRY_SZ_64BIT * sg_ptr->nents))) + goto err; + /* 64bit app uses 64bit address */ + update_64bit = (struct qseecom_sg_entry_64bit *)field; + for (j = 0; j < sg_ptr->nents; j++) { + update_64bit->phys_addr = cleanup ? 0 : + (uint64_t)sg_dma_address(sg); + update_64bit->len = cleanup ? 0 : + (uint32_t)sg->length; + update_64bit++; + len += sg->length; + sg = sg_next(sg); + } + } +cleanup: + if (cleanup) { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + if (data->type == QSEECOM_CLIENT_APP) { + offset = req->ifd_data[i].cmd_buf_offset; + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 1, offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } else { + offset = (lstnr_resp->ifd_data[i].cmd_buf_offset + + (uintptr_t)lstnr_resp->resp_buf_ptr - + (uintptr_t)this_lstnr->sb_virt); + this_lstnr->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 1, offset); + this_lstnr->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + this_lstnr->sglist_cnt = i + 1; + } + } + /* unmap the dmabuf */ + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + sg_ptr = NULL; + dmabuf = NULL; + attach = NULL; + } + return ret; +err: + for (i = 0; i < MAX_ION_FD; i++) + if (data->client.sec_buf_fd[i].is_sec_buf_fd && + data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.dev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + if (!IS_ERR_OR_NULL(sg_ptr)) { + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + MAKE_NULL(sg_ptr, attach, dmabuf); + } + return -ENOMEM; +} + +static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp, + bool is_64bit_addr) +{ + int ret = 0; + int i; + struct qseecom_send_modfd_cmd_req req; + struct qseecom_send_cmd_req send_cmd_req; + void *origin_req_buf_kvirt, *origin_rsp_buf_kvirt; + phys_addr_t pa; + u8 *va = NULL; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + send_cmd_req.cmd_req_buf = req.cmd_req_buf; + send_cmd_req.cmd_req_len = req.cmd_req_len; + send_cmd_req.resp_buf = req.resp_buf; + send_cmd_req.resp_len = req.resp_len; + + if (__validate_send_cmd_inputs(data, &send_cmd_req)) + return -EINVAL; + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) { + pr_err("Invalid offset %d = 0x%x\n", + i, req.ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + + /*Back up original address */ + origin_req_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.cmd_req_buf); + origin_rsp_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.resp_buf); + + /* Allocate kernel buffer for request and response*/ + ret = __qseecom_alloc_coherent_buf(req.cmd_req_len + req.resp_len, + &va, &pa); + if (ret) { + pr_err("Failed to allocate coherent buf, ret %d\n", ret); + return ret; + } + + req.cmd_req_buf = va; + send_cmd_req.cmd_req_buf = (void *)pa; + + req.resp_buf = va + req.cmd_req_len; + send_cmd_req.resp_buf = (void *)pa + req.cmd_req_len; + + /* Copy the data to kernel request and response buffers*/ + memcpy(req.cmd_req_buf, origin_req_buf_kvirt, req.cmd_req_len); + memcpy(req.resp_buf, origin_rsp_buf_kvirt, req.resp_len); + + if (!is_64bit_addr) { + ret = __qseecom_update_cmd_buf(&req, false, data); + if (ret) + goto out; + ret = __qseecom_send_cmd(data, &send_cmd_req, true); + if (ret) + goto out; + ret = __qseecom_update_cmd_buf(&req, true, data); + if (ret) + goto out; + } else { + ret = __qseecom_update_cmd_buf_64(&req, false, data); + if (ret) + goto out; + ret = __qseecom_send_cmd(data, &send_cmd_req, true); + if (ret) + goto out; + ret = __qseecom_update_cmd_buf_64(&req, true, data); + if (ret) + goto out; + } + + /*Copy the response back to the userspace buffer*/ + memcpy(origin_rsp_buf_kvirt, req.resp_buf, req.resp_len); + memcpy(origin_req_buf_kvirt, req.cmd_req_buf, req.cmd_req_len); + +out: + if (req.cmd_req_buf) + __qseecom_free_coherent_buf(req.cmd_req_len + req.resp_len, + req.cmd_req_buf, (phys_addr_t)send_cmd_req.cmd_req_buf); + + return ret; +} + +static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_cmd(data, argp, false); +} + +static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_cmd(data, argp, true); +} + + + +static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *svc) +{ + int ret; + + ret = (svc->rcv_req_flag == 1); + return ret || data->abort; +} + +static int qseecom_receive_req(struct qseecom_dev_handle *data) +{ + int ret = 0; + struct qseecom_registered_listener_list *this_lstnr; + + mutex_lock(&listener_access_lock); + this_lstnr = __qseecom_find_svc(data->listener.id); + if (!this_lstnr) { + pr_err("Invalid listener ID\n"); + mutex_unlock(&listener_access_lock); + return -ENODATA; + } + mutex_unlock(&listener_access_lock); + + while (1) { + if (wait_event_interruptible(this_lstnr->rcv_req_wq, + __qseecom_listener_has_rcvd_req(data, + this_lstnr))) { + pr_debug("Interrupted: exiting Listener Service = %d\n", + (uint32_t)data->listener.id); + /* woken up for different reason */ + return -ERESTARTSYS; + } + + if (data->abort) { + pr_err("Aborting Listener Service = %d\n", + (uint32_t)data->listener.id); + return -ENODEV; + } + mutex_lock(&listener_access_lock); + this_lstnr->rcv_req_flag = 0; + mutex_unlock(&listener_access_lock); + break; + } + return ret; +} + +static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry) +{ + unsigned char app_arch = 0; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + + app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + + switch (app_arch) { + case ELFCLASS32: { + ehdr = (struct elf32_hdr *)fw_entry->data; + if (fw_entry->size < sizeof(*ehdr)) { + pr_err("%s: Not big enough to be an elf32 header\n", + qseecom.pdev->init_name); + return false; + } + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { + pr_err("%s: Not an elf32 header\n", + qseecom.pdev->init_name); + return false; + } + if (ehdr->e_phnum == 0) { + pr_err("%s: No loadable segments\n", + qseecom.pdev->init_name); + return false; + } + if (sizeof(struct elf32_phdr) * ehdr->e_phnum + + sizeof(struct elf32_hdr) > fw_entry->size) { + pr_err("%s: Program headers not within mdt\n", + qseecom.pdev->init_name); + return false; + } + break; + } + case ELFCLASS64: { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + if (fw_entry->size < sizeof(*ehdr64)) { + pr_err("%s: Not big enough to be an elf64 header\n", + qseecom.pdev->init_name); + return false; + } + if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) { + pr_err("%s: Not an elf64 header\n", + qseecom.pdev->init_name); + return false; + } + if (ehdr64->e_phnum == 0) { + pr_err("%s: No loadable segments\n", + qseecom.pdev->init_name); + return false; + } + if (sizeof(struct elf64_phdr) * ehdr64->e_phnum + + sizeof(struct elf64_hdr) > fw_entry->size) { + pr_err("%s: Program headers not within mdt\n", + qseecom.pdev->init_name); + return false; + } + break; + } + default: { + pr_err("QSEE app arch %u is not supported\n", app_arch); + return false; + } + } + return true; +} + +static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size, + uint32_t *app_arch) +{ + int ret = -1; + int i = 0, rc = 0; + const struct firmware *fw_entry = NULL; + char fw_name[MAX_APP_NAME_SIZE]; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + int num_images = 0; + + snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname); + rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev); + if (rc) { + pr_err("error with firmware_request_nowarn, rc = %d\n", rc); + ret = -EIO; + goto err; + } + if (!__qseecom_is_fw_image_valid(fw_entry)) { + ret = -EIO; + goto err; + } + *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + *fw_size = fw_entry->size; + if (*app_arch == ELFCLASS32) { + ehdr = (struct elf32_hdr *)fw_entry->data; + num_images = ehdr->e_phnum; + } else if (*app_arch == ELFCLASS64) { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + num_images = ehdr64->e_phnum; + } else { + pr_err("QSEE %s app, arch %u is not supported\n", + appname, *app_arch); + ret = -EIO; + goto err; + } + pr_debug("QSEE %s app, arch %u\n", appname, *app_arch); + release_firmware(fw_entry); + fw_entry = NULL; + for (i = 0; i < num_images; i++) { + memset(fw_name, 0, sizeof(fw_name)); + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i); + ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev); + if (ret) + goto err; + if (*fw_size > U32_MAX - fw_entry->size) { + pr_err("QSEE %s app file size overflow\n", appname); + ret = -EINVAL; + goto err; + } + *fw_size += fw_entry->size; + release_firmware(fw_entry); + fw_entry = NULL; + } + + return ret; +err: + if (fw_entry) + release_firmware(fw_entry); + *fw_size = 0; + return ret; +} + +static int __qseecom_get_fw_data(const char *appname, u8 *img_data, + uint32_t fw_size, + struct qseecom_load_app_ireq *load_req) +{ + int ret = -1; + int i = 0, rc = 0; + const struct firmware *fw_entry = NULL; + char fw_name[MAX_APP_NAME_SIZE]; + u8 *img_data_ptr = img_data; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + int num_images = 0; + unsigned char app_arch = 0; + + snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname); + rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev); + if (rc) { + ret = -EIO; + goto err; + } + + load_req->img_len = fw_entry->size; + if (load_req->img_len > fw_size) { + pr_err("app %s size %zu is larger than buf size %u\n", + appname, fw_entry->size, fw_size); + ret = -EINVAL; + goto err; + } + memcpy(img_data_ptr, fw_entry->data, fw_entry->size); + img_data_ptr = img_data_ptr + fw_entry->size; + load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/ + + app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + if (app_arch == ELFCLASS32) { + ehdr = (struct elf32_hdr *)fw_entry->data; + num_images = ehdr->e_phnum; + } else if (app_arch == ELFCLASS64) { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + num_images = ehdr64->e_phnum; + } else { + pr_err("QSEE %s app, arch %u is not supported\n", + appname, app_arch); + ret = -EIO; + goto err; + } + release_firmware(fw_entry); + fw_entry = NULL; + for (i = 0; i < num_images; i++) { + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i); + ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev); + if (ret) { + pr_err("Failed to locate blob %s\n", fw_name); + goto err; + } + if ((fw_entry->size > U32_MAX - load_req->img_len) || + (fw_entry->size + load_req->img_len > fw_size)) { + pr_err("Invalid file size for %s\n", fw_name); + ret = -EINVAL; + goto err; + } + memcpy(img_data_ptr, fw_entry->data, fw_entry->size); + img_data_ptr = img_data_ptr + fw_entry->size; + load_req->img_len += fw_entry->size; + release_firmware(fw_entry); + fw_entry = NULL; + } + return ret; +err: + release_firmware(fw_entry); + return ret; +} + +static int __qseecom_alloc_coherent_buf( + uint32_t size, u8 **vaddr, phys_addr_t *paddr) +{ + dma_addr_t coh_pmem; + void *buf = NULL; + + /* Allocate a contiguous kernel buffer */ + size = (size + PAGE_SIZE) & PAGE_MASK; + buf = dma_alloc_coherent(qseecom.dev, + size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + *vaddr = buf; + *paddr = coh_pmem; + return 0; +} + +static void __qseecom_free_coherent_buf(uint32_t size, + u8 *vaddr, phys_addr_t paddr) +{ + if (!vaddr) + return; + size = (size + PAGE_SIZE) & PAGE_MASK; + dma_free_coherent(qseecom.dev, size, vaddr, paddr); +} + +static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname, + uint32_t *app_id) +{ + int ret = -1; + uint32_t fw_size = 0; + struct qseecom_load_app_ireq load_req = {0, 0, 0, 0}; + struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0}; + struct qseecom_command_scm_resp resp; + u8 *img_data = NULL; + phys_addr_t pa = 0; + void *cmd_buf = NULL; + size_t cmd_len; + uint32_t app_arch = 0; + + if (!data || !appname || !app_id) { + pr_err("Null pointer to data or appname or appid\n"); + return -EINVAL; + } + *app_id = 0; + if (__qseecom_get_fw_size(appname, &fw_size, &app_arch)) + return -EIO; + data->client.app_arch = app_arch; + + /* Check and load cmnlib */ + if (qseecom.qsee_version > QSEEE_VERSION_00) { + if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) { + ret = qseecom_load_commonlib_image(data, "cmnlib"); + if (ret) { + pr_err("failed to load cmnlib\n"); + return -EIO; + } + qseecom.commonlib_loaded = true; + pr_debug("cmnlib is loaded\n"); + } + + if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) { + ret = qseecom_load_commonlib_image(data, "cmnlib64"); + if (ret) { + pr_err("failed to load cmnlib64\n"); + return -EIO; + } + qseecom.commonlib64_loaded = true; + pr_debug("cmnlib64 is loaded\n"); + } + } + + ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa); + if (ret) + return ret; + + ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + + /* Populate the load_req parameters */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req.mdt_len = load_req.mdt_len; + load_req.img_len = load_req.img_len; + strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE); + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req_64bit.mdt_len = load_req.mdt_len; + load_req_64bit.img_len = load_req.img_len; + strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE); + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + } + + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_unregister_bus_bw_need; + } + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", ret); + ret = -EIO; + goto exit_disable_clk_vote; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + *app_id = resp.data; + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("incomp_cmd err %d, %d, unload %d %s\n", + ret, resp.result, resp.data, appname); + __qseecom_unload_app(data, resp.data); + ret = -EFAULT; + } else { + *app_id = resp.data; + } + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed with response QSEOS_RESULT FAILURE\n"); + break; + default: + pr_err("scm call return unknown response %d\n", resp.result); + ret = -EINVAL; + break; + } + +exit_disable_clk_vote: + __qseecom_disable_clk_scale_down(data); + +exit_unregister_bus_bw_need: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + +exit_free_img_data: + if (img_data) + __qseecom_free_coherent_buf(fw_size, img_data, pa); + return ret; +} + +static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data, + char *cmnlib_name) +{ + int ret = 0; + uint32_t fw_size = 0; + struct qseecom_load_app_ireq load_req = {0, 0, 0, 0}; + struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0}; + struct qseecom_command_scm_resp resp; + u8 *img_data = NULL; + phys_addr_t pa = 0; + void *cmd_buf = NULL; + size_t cmd_len; + uint32_t app_arch = 0; + + if (!cmnlib_name) { + pr_err("cmnlib_name is NULL\n"); + return -EINVAL; + } + if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) { + pr_err("The cmnlib_name (%s) with length %zu is not valid\n", + cmnlib_name, strlen(cmnlib_name)); + return -EINVAL; + } + + if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch)) + return -EIO; + + ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa); + if (ret) + return -EIO; + + ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.phy_addr = (uint32_t)pa; + load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_lib_image_ireq); + } else { + load_req_64bit.phy_addr = (uint64_t)pa; + load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND; + load_req_64bit.img_len = load_req.img_len; + load_req_64bit.mdt_len = load_req.mdt_len; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_unregister_bus_bw_need; + } + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", ret); + ret = -EIO; + goto exit_disable_clk_vote; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed w/response result%d\n", resp.result); + ret = -EINVAL; + goto exit_disable_clk_vote; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", ret); + goto exit_disable_clk_vote; + } + break; + default: + pr_err("scm call return unknown response %d\n", resp.result); + ret = -EINVAL; + goto exit_disable_clk_vote; + } + +exit_disable_clk_vote: + __qseecom_disable_clk_scale_down(data); + +exit_unregister_bus_bw_need: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + +exit_free_img_data: + if (img_data) + __qseecom_free_coherent_buf(fw_size, img_data, pa); + return ret; +} + +static int qseecom_unload_commonlib_image(void) +{ + int ret = -EINVAL; + struct qseecom_unload_lib_image_ireq unload_req = {0}; + struct qseecom_command_scm_resp resp; + + /* Populate the remaining parameters */ + unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND; + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req, + sizeof(struct qseecom_unload_lib_image_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload lib failed : ret %d\n", ret); + ret = -EIO; + } else { + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n"); + break; + default: + pr_err("scm call return unknown response %d\n", + resp.result); + ret = -EINVAL; + break; + } + } + + return ret; +} + +int qseecom_start_app(struct qseecom_handle **handle, + char *app_name, uint32_t size) +{ + int32_t ret = 0; + unsigned long flags = 0; + struct qseecom_dev_handle *data = NULL; + struct qseecom_check_app_ireq app_ireq; + struct qseecom_registered_app_list *entry = NULL; + struct qseecom_registered_kclient_list *kclient_entry = NULL; + bool found_app = false; + phys_addr_t pa = 0; + u8 *va = NULL; + uint32_t fw_size, app_arch; + uint32_t app_id = 0; + + __wakeup_unregister_listener_kthread(); + __wakeup_unload_app_kthread(); + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + if (!app_name) { + pr_err("failed to get the app name\n"); + return -EINVAL; + } + + if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) { + pr_err("The app_name (%s) with length %zu is not valid\n", + app_name, strnlen(app_name, MAX_APP_NAME_SIZE)); + return -EINVAL; + } + + *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL); + if (!(*handle)) + return -ENOMEM; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + kfree(*handle); + *handle = NULL; + return -ENOMEM; + } + + mutex_lock(&app_access_lock); + + data->abort = 0; + data->type = QSEECOM_CLIENT_APP; + data->released = false; + data->client.sb_length = size; + data->client.user_virt_sb_base = 0; + data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf( + sizeof(struct sglist_info) * MAX_ION_FD, + &data->sglistinfo_shm.paddr, + &data->sglistinfo_shm); + if (!data->sglistinfo_ptr) { + ret = -ENOMEM; + goto err; + } + + init_waitqueue_head(&data->abort_wq); + + app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE); + ret = __qseecom_check_app_exists(app_ireq, &app_id); + if (ret) + goto err; + + strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE); + if (app_id) { + pr_warn("App id %d for [%s] app exists\n", app_id, + (char *)app_ireq.app_name); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + if (entry->ref_cnt == U32_MAX) { + pr_err("App %d (%s) ref_cnt overflow\n", + app_id, app_ireq.app_name); + ret = -EINVAL; + goto err; + } + entry->ref_cnt++; + found_app = true; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + if (!found_app) + pr_warn("App_id %d [%s] was loaded but not registered\n", + ret, (char *)app_ireq.app_name); + } else { + /* load the app and get the app_id */ + pr_debug("%s: Loading app for the first time'\n", + qseecom.pdev->init_name); + ret = __qseecom_load_fw(data, app_name, &app_id); + if (ret < 0) + goto err; + } + data->client.app_id = app_id; + if (!found_app) { + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto err; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE); + if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) { + ret = -EIO; + kfree(entry); + goto err; + } + entry->app_arch = app_arch; + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + entry->check_block = 0; + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_add_tail(&entry->list, &qseecom.registered_app_list_head); + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + } + + /* Get the physical address of the req/resp buffer */ + ret = __qseecom_alloc_coherent_buf(size, &va, &pa); + if (ret) { + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + goto err; + } + + /* Populate the structure for sending scm call to load image */ + data->client.sb_virt = va; + data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt; + data->client.sb_phys = (phys_addr_t)pa; + (*handle)->dev = (void *)data; + (*handle)->sbuf = (unsigned char *)data->client.sb_virt; + (*handle)->sbuf_len = data->client.sb_length; + + kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL); + if (!kclient_entry) { + ret = -ENOMEM; + goto err; + } + kclient_entry->handle = *handle; + + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + list_add_tail(&kclient_entry->list, + &qseecom.registered_kclient_list_head); + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + + mutex_unlock(&app_access_lock); + __wakeup_unload_app_kthread(); + return 0; + +err: + __qseecom_free_coherent_buf(size, va, pa); + __qseecom_free_tzbuf(&data->sglistinfo_shm); + kfree(data); + kfree(*handle); + *handle = NULL; + mutex_unlock(&app_access_lock); + __wakeup_unload_app_kthread(); + return ret; +} +EXPORT_SYMBOL(qseecom_start_app); + +int qseecom_shutdown_app(struct qseecom_handle **handle) +{ + int ret = -EINVAL; + struct qseecom_dev_handle *data; + + struct qseecom_registered_kclient_list *kclient = NULL; + unsigned long flags = 0; + bool found_handle = false; + + __wakeup_unregister_listener_kthread(); + __wakeup_unload_app_kthread(); + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + + if ((handle == NULL) || (*handle == NULL)) { + pr_err("Handle is not initialized\n"); + return -EINVAL; + } + data = (struct qseecom_dev_handle *) ((*handle)->dev); + mutex_lock(&app_access_lock); + + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + list_for_each_entry(kclient, &qseecom.registered_kclient_list_head, + list) { + if (kclient->handle == (*handle)) { + list_del(&kclient->list); + found_handle = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + if (!found_handle) + pr_err("Unable to find the handle, exiting\n"); + else + ret = qseecom_unload_app(data, false); + + mutex_unlock(&app_access_lock); + if (ret == 0) { + if (data->client.sb_virt) + __qseecom_free_coherent_buf(data->client.sb_length, + data->client.sb_virt, data->client.sb_phys); + __qseecom_free_tzbuf(&data->sglistinfo_shm); + kfree_sensitive(data); + kfree_sensitive(*handle); + kfree_sensitive(kclient); + *handle = NULL; + } + __wakeup_unload_app_kthread(); + return ret; +} +EXPORT_SYMBOL(qseecom_shutdown_app); + +int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, + uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len) +{ + int ret = 0; + struct qseecom_send_cmd_req req = {NULL, 0, NULL, 0}; + struct qseecom_dev_handle *data; + bool perf_enabled = false; + + __wakeup_unregister_listener_kthread(); + __wakeup_unload_app_kthread(); + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + + if (handle == NULL) { + pr_err("Handle is not initialized\n"); + return -EINVAL; + } + data = handle->dev; + + req.cmd_req_len = sbuf_len; + req.resp_len = rbuf_len; + req.cmd_req_buf = send_buf; + req.resp_buf = resp_buf; + + if (__validate_send_cmd_inputs(data, &req)) + return -EINVAL; + + mutex_lock(&app_access_lock); + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(INACTIVE); + if (ret) { + pr_err("Failed to set bw.\n"); + mutex_unlock(&app_access_lock); + return ret; + } + } + /* + * On targets where crypto clock is handled by HLOS, + * if clk_access_cnt is zero and perf_enabled is false, + * then the crypto clock was not enabled before sending cmd + * to tz, qseecom will enable the clock to avoid service failure. + */ + if (!qseecom.no_clock_support && + !qseecom.qsee.clk_access_cnt && !data->perf_enabled) { + pr_debug("ce clock is not enabled!\n"); + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clock with err %d\n", + ret); + mutex_unlock(&app_access_lock); + return -EINVAL; + } + perf_enabled = true; + } + if (!strcmp(data->client.app_name, "securemm")) + data->use_legacy_cmd = true; + + ret = __qseecom_send_cmd(data, &req, false); + + data->use_legacy_cmd = false; + if (qseecom.support_bus_scaling) + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + + if (perf_enabled) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + + mutex_unlock(&app_access_lock); + + if (ret) + return ret; + + pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n", + req.resp_len, req.resp_buf); + return ret; +} +EXPORT_SYMBOL(qseecom_send_command); + +int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high) +{ + int ret = 0; + + if ((handle == NULL) || (handle->dev == NULL)) { + pr_err("No valid kernel client\n"); + return -EINVAL; + } + if (high) { + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs(handle->dev, + HIGH); + mutex_unlock(&qsee_bw_mutex); + } else { + ret = qseecom_perf_enable(handle->dev); + if (ret) + pr_err("Failed to vote for clock with err %d\n", + ret); + } + } else { + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(handle->dev, CLK_DFAB); + qsee_disable_clock_vote(handle->dev, CLK_SFPB); + } else { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(handle->dev); + mutex_unlock(&qsee_bw_mutex); + } + } + return ret; +} +EXPORT_SYMBOL(qseecom_set_bandwidth); + +int qseecom_process_listener_from_smcinvoke(uint32_t *result, + u64 *response_type, unsigned int *data) +{ + struct qseecom_registered_app_list dummy_app_entry; + struct qseecom_dev_handle dummy_private_data = {0}; + struct qseecom_command_scm_resp resp; + int ret = 0; + + if (!result || !response_type || !data) { + pr_err("input parameter NULL\n"); + return -EINVAL; + } + + memset((void *)&dummy_app_entry, 0, sizeof(dummy_app_entry)); + /* + * smcinvoke expects result in scm call resp.ret[1] and type in ret[0], + * while qseecom expects result in ret[0] and type in ret[1]. + * To simplify API interface and code changes in smcinvoke, here + * internally switch result and resp_type to let qseecom work with + * smcinvoke and upstream scm driver protocol. + */ + resp.result = *response_type; + resp.resp_type = *result; + resp.data = *data; + + dummy_private_data.client.app_id = *response_type; + dummy_private_data.client.from_smcinvoke = true; + dummy_app_entry.app_id = *response_type; + + mutex_lock(&app_access_lock); + if (qseecom.qsee_reentrancy_support) + ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry, + &dummy_private_data); + else + ret = __qseecom_process_incomplete_cmd(&dummy_private_data, + &resp); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n", + resp.result, resp.data, resp.resp_type, ret); + *result = resp.resp_type; + *response_type = resp.result; + *data = resp.data; + return ret; +} +EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke); + +static int qseecom_send_resp(void) +{ + qseecom.send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data) +{ + struct qseecom_registered_listener_list *this_lstnr = NULL; + + pr_debug("lstnr %d send resp, wakeup\n", data->listener.id); + this_lstnr = __qseecom_find_svc(data->listener.id); + if (this_lstnr == NULL) + return -EINVAL; + qseecom.send_resp_flag = 1; + this_lstnr->send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_modfd_listener_resp *resp, + struct qseecom_registered_listener_list *this_lstnr) +{ + int i; + + if (!data || !resp || !this_lstnr) { + pr_err("listener handle or resp msg is null\n"); + return -EINVAL; + } + + if (resp->resp_buf_ptr == NULL) { + pr_err("resp buffer is null\n"); + return -EINVAL; + } + /* validate resp buf length */ + if ((resp->resp_len == 0) || + (resp->resp_len > this_lstnr->sb_length)) { + pr_err("resp buf length %d not valid\n", resp->resp_len); + return -EINVAL; + } + + if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if ((uintptr_t)this_lstnr->user_virt_sb_base > + (ULONG_MAX - this_lstnr->sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + /* validate resp buf */ + if (((uintptr_t)resp->resp_buf_ptr < + (uintptr_t)this_lstnr->user_virt_sb_base) || + ((uintptr_t)resp->resp_buf_ptr >= + ((uintptr_t)this_lstnr->user_virt_sb_base + + this_lstnr->sb_length)) || + (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) > + ((uintptr_t)this_lstnr->user_virt_sb_base + + this_lstnr->sb_length))) { + pr_err("resp buf is out of shared buffer region\n"); + return -EINVAL; + } + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) { + pr_err("Invalid offset %d = 0x%x\n", + i, resp->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + + return 0; +} + +static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data, + void __user *argp, bool is_64bit_addr) +{ + struct qseecom_send_modfd_listener_resp resp; + struct qseecom_registered_listener_list *this_lstnr = NULL; + + if (copy_from_user(&resp, argp, sizeof(resp))) { + pr_err("copy_from_user failed\n"); + return -EINVAL; + } + + this_lstnr = __qseecom_find_svc(data->listener.id); + if (this_lstnr == NULL) + return -EINVAL; + + if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr)) + return -EINVAL; + + resp.resp_buf_ptr = this_lstnr->sb_virt + + (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base); + + if (!is_64bit_addr) + __qseecom_update_cmd_buf(&resp, false, data); + else + __qseecom_update_cmd_buf_64(&resp, false, data); + qseecom.send_resp_flag = 1; + this_lstnr->send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_resp(data, argp, false); +} + +static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_resp(data, argp, true); +} + +static int qseecom_get_qseos_version(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qseos_version_req req; + + if (copy_from_user(&req, argp, sizeof(req))) { + pr_err("copy_from_user failed\n"); + return -EINVAL; + } + req.qseos_version = qseecom.qseos_version; + if (copy_to_user(argp, &req, sizeof(req))) { + pr_err("copy_to_user failed\n"); + return -EINVAL; + } + return 0; +} + +static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce) +{ + int rc = 0; + struct qseecom_clk *qclk = NULL; + + if (qseecom.no_clock_support) + return 0; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + if (ce == CLK_CE_DRV) + qclk = &qseecom.ce_drv; + + if (qclk == NULL) { + pr_err("CLK type not supported\n"); + return -EINVAL; + } + mutex_lock(&clk_access_lock); + + if (qclk->clk_access_cnt == ULONG_MAX) { + pr_err("clk_access_cnt beyond limitation\n"); + goto err; + } + if (qclk->clk_access_cnt > 0) { + qclk->clk_access_cnt++; + mutex_unlock(&clk_access_lock); + return rc; + } + + /* Enable CE core clk */ + if (qclk->ce_core_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_core_clk); + if (rc) { + pr_err("Unable to enable/prepare CE core clk\n"); + goto err; + } + } + /* Enable CE clk */ + if (qclk->ce_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_clk); + if (rc) { + pr_err("Unable to enable/prepare CE iface clk\n"); + goto ce_clk_err; + } + } + /* Enable AXI clk */ + if (qclk->ce_bus_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_bus_clk); + if (rc) { + pr_err("Unable to enable/prepare CE bus clk\n"); + goto ce_bus_clk_err; + } + } + qclk->clk_access_cnt++; + mutex_unlock(&clk_access_lock); + return 0; + +ce_bus_clk_err: + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); +ce_clk_err: + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); +err: + mutex_unlock(&clk_access_lock); + return -EIO; +} + +static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + + if (qseecom.no_clock_support) + return; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + mutex_lock(&clk_access_lock); + + if (qclk->clk_access_cnt == 0) { + mutex_unlock(&clk_access_lock); + return; + } + + if (qclk->clk_access_cnt == 1) { + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); + if (qclk->ce_bus_clk != NULL) + clk_disable_unprepare(qclk->ce_bus_clk); + } + qclk->clk_access_cnt--; + mutex_unlock(&clk_access_lock); +} + +static int qsee_vote_for_clock(struct qseecom_dev_handle *data, + int32_t clk_type) +{ + int ret = 0; + struct qseecom_clk *qclk; + + if (qseecom.no_clock_support) + return 0; + + qclk = &qseecom.qsee; + if (!qseecom.qsee_perf_client) + return ret; + + switch (clk_type) { + case CLK_DFAB: + mutex_lock(&qsee_bw_mutex); + if (!qseecom.qsee_bw_count) { + if (qseecom.qsee_sfpb_bw_count > 0) + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 3); + else { + if (qclk->ce_core_src_clk != NULL) + ret = __qseecom_enable_clk(CLK_QSEE); + if (!ret) { + ret = + qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 1); + if ((ret) && + (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + } + if (ret) + pr_err("DFAB Bandwidth req failed (%d)\n", + ret); + else { + qseecom.qsee_bw_count++; + data->perf_enabled = true; + } + } else { + qseecom.qsee_bw_count++; + data->perf_enabled = true; + } + mutex_unlock(&qsee_bw_mutex); + break; + case CLK_SFPB: + mutex_lock(&qsee_bw_mutex); + if (!qseecom.qsee_sfpb_bw_count) { + if (qseecom.qsee_bw_count > 0) + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 3); + else { + if (qclk->ce_core_src_clk != NULL) + ret = __qseecom_enable_clk(CLK_QSEE); + if (!ret) { + ret = + qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 2); + if ((ret) && + (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + } + + if (ret) + pr_err("SFPB Bandwidth req failed (%d)\n", + ret); + else { + qseecom.qsee_sfpb_bw_count++; + data->fast_load_enabled = true; + } + } else { + qseecom.qsee_sfpb_bw_count++; + data->fast_load_enabled = true; + } + mutex_unlock(&qsee_bw_mutex); + break; + default: + pr_err("Clock type not defined\n"); + break; + } + return ret; +} + +static void qsee_disable_clock_vote(struct qseecom_dev_handle *data, + int32_t clk_type) +{ + int32_t ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + + if (qseecom.no_clock_support) + return; + if (!qseecom.qsee_perf_client) + return; + + switch (clk_type) { + case CLK_DFAB: + mutex_lock(&qsee_bw_mutex); + if (qseecom.qsee_bw_count == 0) { + pr_err("Client error.Extra call to disable DFAB clk\n"); + mutex_unlock(&qsee_bw_mutex); + return; + } + + if (qseecom.qsee_bw_count == 1) { + if (qseecom.qsee_sfpb_bw_count > 0) + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 2); + else { + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 0); + if ((!ret) && (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + if (ret) + pr_err("SFPB Bandwidth req fail (%d)\n", + ret); + else { + qseecom.qsee_bw_count--; + data->perf_enabled = false; + } + } else { + qseecom.qsee_bw_count--; + data->perf_enabled = false; + } + mutex_unlock(&qsee_bw_mutex); + break; + case CLK_SFPB: + mutex_lock(&qsee_bw_mutex); + if (qseecom.qsee_sfpb_bw_count == 0) { + pr_err("Client error.Extra call to disable SFPB clk\n"); + mutex_unlock(&qsee_bw_mutex); + return; + } + if (qseecom.qsee_sfpb_bw_count == 1) { + if (qseecom.qsee_bw_count > 0) + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 1); + else { + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, 0); + if ((!ret) && (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + if (ret) + pr_err("SFPB Bandwidth req fail (%d)\n", + ret); + else { + qseecom.qsee_sfpb_bw_count--; + data->fast_load_enabled = false; + } + } else { + qseecom.qsee_sfpb_bw_count--; + data->fast_load_enabled = false; + } + mutex_unlock(&qsee_bw_mutex); + break; + default: + pr_err("Clock type not defined\n"); + break; + } + +} + +static int qseecom_load_external_elf(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_load_img_req load_img_req; + int uret = 0; + int ret = 0; + phys_addr_t pa = 0; + size_t len; + struct qseecom_load_app_ireq load_req; + struct qseecom_load_app_64bit_ireq load_req_64bit; + struct qseecom_command_scm_resp resp; + void *cmd_buf = NULL; + size_t cmd_len; + struct sg_table *sgt = NULL; + struct dma_buf_attachment *attach = NULL; + struct dma_buf *dmabuf = NULL; + void *va = NULL; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&load_img_req, + (void __user *)argp, + sizeof(struct qseecom_load_img_req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + /* Get the handle of the shared fd */ + ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, &pa, &va, + &sgt, &attach, &len, &dmabuf); + if (ret) { + pr_err("Failed to map vaddr for ion_fd %d\n", + load_img_req.ifd_data_fd); + return -ENOMEM; + } + if (load_img_req.mdt_len > len || load_img_req.img_len > len) { + pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n", + len, load_img_req.mdt_len, + load_img_req.img_len); + ret = -EINVAL; + goto exit_cpu_restore; + } + + /* Populate the structure for sending scm call to load image */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND; + load_req.mdt_len = load_img_req.mdt_len; + load_req.img_len = load_img_req.img_len; + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND; + load_req_64bit.mdt_len = load_img_req.mdt_len; + load_req_64bit.img_len = load_img_req.img_len; + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_cpu_restore; + } + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_register_bus_bandwidth_needs; + } + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit_disable_clock; + } + /* SCM_CALL to load the external elf */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", + ret); + ret = -EFAULT; + goto exit_disable_clock; + } + + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit_disable_clock; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + pr_err("%s: qseos result incomplete\n", __func__); + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("process_incomplete_cmd failed: err: %d\n", ret); + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n"); + ret = -EFAULT; + break; + default: + pr_err("scm_call response result %d not supported\n", + resp.result); + ret = -EFAULT; + break; + } + +exit_disable_clock: + __qseecom_disable_clk_scale_down(data); + +exit_register_bus_bandwidth_needs: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + uret = qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + if (uret) + pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n", + uret, ret); + } + +exit_cpu_restore: + if (dmabuf) { + qseecom_vaddr_unmap(va, sgt, attach, dmabuf); + MAKE_NULL(sgt, attach, dmabuf); + } + return ret; +} + +static int qseecom_unload_external_elf(struct qseecom_dev_handle *data) +{ + int ret = 0; + struct qseecom_command_scm_resp resp; + struct qseecom_unload_app_ireq req; + + /* unavailable client app */ + data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; + + /* Populate the structure for sending scm call to unload image */ + req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND; + + /* SCM_CALL to unload the external elf */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_unload_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload failed : ret %d\n", + ret); + ret = -EFAULT; + goto qseecom_unload_external_elf_scm_err; + } + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("process_incomplete_cmd fail err: %d\n", + ret); + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("scm_call to unload image failed resp.result =%d\n", + resp.result); + ret = -EFAULT; + } + } + +qseecom_unload_external_elf_scm_err: + return ret; +} + +static int qseecom_query_app_loaded(struct qseecom_dev_handle *data, + void __user *argp) +{ + int32_t ret = 0; + struct qseecom_qseos_app_load_query query_req = { {0} }; + struct qseecom_check_app_ireq req; + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + uint32_t app_arch = 0, app_id = 0; + bool found_app = false; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&query_req, (void __user *)argp, + sizeof(struct qseecom_qseos_app_load_query))) { + pr_err("copy_from_user failed\n"); + ret = -EFAULT; + goto exit_free; + } + + req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0'; + strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE); + + ret = __qseecom_check_app_exists(req, &app_id); + if (ret) { + pr_err(" scm call to check if app is loaded failed\n"); + goto exit_free; + } + if (app_id) { + pr_debug("App id %d (%s) already exists\n", app_id, + (char *)(req.app_name)); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + app_arch = entry->app_arch; + if (entry->ref_cnt == U32_MAX) { + pr_err("App %d (%s) ref_cnt overflow\n", + app_id, req.app_name); + ret = -EINVAL; + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, + flags); + goto exit_free; + } + entry->ref_cnt++; + found_app = true; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + data->client.app_id = app_id; + query_req.app_id = app_id; + if (app_arch) { + data->client.app_arch = app_arch; + query_req.app_arch = app_arch; + } else { + data->client.app_arch = 0; + query_req.app_arch = 0; + } + strlcpy(data->client.app_name, query_req.app_name, + MAX_APP_NAME_SIZE); + /* + * If app was loaded by appsbl before and was not registered, + * regiser this app now. + */ + if (!found_app) { + pr_debug("Register app %d [%s] which was loaded before\n", + ret, (char *)query_req.app_name); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto exit_free; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + entry->app_arch = data->client.app_arch; + strlcpy(entry->app_name, data->client.app_name, + MAX_APP_NAME_SIZE); + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + entry->check_block = 0; + spin_lock_irqsave(&qseecom.registered_app_list_lock, + flags); + list_add_tail(&entry->list, + &qseecom.registered_app_list_head); + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + } + if (copy_to_user(argp, &query_req, sizeof(query_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + goto exit_free; + } + ret = -EEXIST; /* app already loaded */ + goto exit_free; + } + +exit_free: + return ret; /* app not loaded */ +} + +static int __qseecom_get_ce_pipe_info( + enum qseecom_key_management_usage_type usage, + uint32_t *pipe, uint32_t **ce_hw, uint32_t unit) +{ + int ret = -EINVAL; + int i, j; + struct qseecom_ce_info_use *p = NULL; + int total = 0; + struct qseecom_ce_pipe_entry *pcepipe; + + switch (usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", usage); + return -EINVAL; + } + + for (j = 0; j < total; j++) { + if (p->unit_num == unit) { + pcepipe = p->ce_pipe_entry; + for (i = 0; i < p->num_ce_pipe_entries; i++) { + (*ce_hw)[i] = pcepipe->ce_num; + *pipe = pcepipe->ce_pipe_pair; + pcepipe++; + } + ret = 0; + break; + } + p++; + } + return ret; +} + +static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_generate_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_generate_ireq), + &resp, sizeof(resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) { + pr_debug("Key ID exists.\n"); + ret = 0; + } else { + pr_err("scm call to generate key failed : %d\n", ret); + ret = -EFAULT; + } + goto generate_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAIL_KEY_ID_EXISTS: + pr_debug("Key ID exists.\n"); + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) { + pr_debug("Key ID exists.\n"); + ret = 0; + } else { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + } + } + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("gen key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } +generate_key_exit: + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + +static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_delete_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_delete_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } else { + pr_err("scm call to delete key failed : %d\n", ret); + ret = -EFAULT; + } + goto del_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } + } + break; + case QSEOS_RESULT_FAIL_MAX_ATTEMPT: + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Delete key scm call failed resp.result %d\n", + resp.result); + ret = -EINVAL; + break; + } +del_key_exit: + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + +static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_select_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + if (qseecom.qsee.instance != qseecom.ce_drv.instance) { + ret = __qseecom_enable_clk(CLK_CE_DRV); + if (ret) + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_select_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } else if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } else { + pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n", + ret); + ret = -EFAULT; + } + goto set_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + if (resp.result == + QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } + if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } + } + break; + case QSEOS_RESULT_FAIL_MAX_ATTEMPT: + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + break; + case QSEOS_RESULT_FAIL_PENDING_OPERATION: + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Set key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } +set_key_exit: + __qseecom_disable_clk(CLK_QSEE); + if (qseecom.qsee.instance != qseecom.ce_drv.instance) + __qseecom_disable_clk(CLK_CE_DRV); + return ret; +} + +static int __qseecom_update_current_key_user_info( + struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_userinfo_update_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_userinfo_update_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } else { + pr_err("scm call to update key userinfo failed: %d\n", + ret); + __qseecom_disable_clk(CLK_QSEE); + return -EFAULT; + } + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (resp.result == + QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } + if (ret) + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + break; + case QSEOS_RESULT_FAIL_PENDING_OPERATION: + pr_debug("Update Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Set key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } + + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + + +static int qseecom_enable_ice_setup(int usage) +{ + int ret = 0; + + if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("ufs", true); + else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("sdcc", true); + + return ret; +} + +static int qseecom_disable_ice_setup(int usage) +{ + int ret = 0; + + if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("ufs", false); + else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("sdcc", false); + + return ret; +} + +static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage) +{ + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + int i; + + switch (usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + break; + default: + pr_err("unsupported usage %d\n", usage); + return -EINVAL; + } + + pce_info_use = NULL; + + for (i = 0; i < total; i++) { + if (p->unit_num == unit) { + pce_info_use = p; + break; + } + p++; + } + if (!pce_info_use) { + pr_err("can not find %d\n", unit); + return -EINVAL; + } + return pce_info_use->num_ce_pipe_entries; +} + +static int qseecom_create_key(struct qseecom_dev_handle *data, + void __user *argp) +{ + int i; + uint32_t *ce_hw = NULL; + uint32_t pipe = 0; + int ret = 0; + uint32_t flags = 0; + struct qseecom_create_key_req create_key_req; + struct qseecom_key_generate_ireq generate_key_ireq; + struct qseecom_key_select_ireq set_key_ireq; + uint32_t entries = 0; + + ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + create_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("unsupported usage %d\n", create_key_req.usage); + ret = -EFAULT; + return ret; + } + entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT, + create_key_req.usage); + if (entries <= 0) { + pr_err("no ce instance for usage %d instance %d\n", + DEFAULT_CE_INFO_UNIT, create_key_req.usage); + ret = -EINVAL; + return ret; + } + + ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL); + if (!ce_hw) { + ret = -ENOMEM; + return ret; + } + ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw, + DEFAULT_CE_INFO_UNIT); + if (ret) { + pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret); + ret = -EINVAL; + goto free_buf; + } + + if (qseecom.fde_key_size) + flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE; + else + flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE; + + if (qseecom.enable_key_wrap_in_ks) + flags |= ENABLE_KEY_WRAP_IN_KS; + + generate_key_ireq.flags = flags; + generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY; + memset((void *)generate_key_ireq.key_id, + 0, QSEECOM_KEY_ID_SIZE); + memset((void *)generate_key_ireq.hash32, + 0, QSEECOM_HASH_SIZE); + memcpy((void *)generate_key_ireq.key_id, + (void *)key_id_array[create_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)generate_key_ireq.hash32, + (void *)create_key_req.hash32, + QSEECOM_HASH_SIZE); + + ret = __qseecom_generate_and_save_key(data, + create_key_req.usage, &generate_key_ireq); + if (ret) { + pr_err("Failed to generate key on storage: %d\n", ret); + goto free_buf; + } + + for (i = 0; i < entries; i++) { + set_key_ireq.qsee_command_id = QSEOS_SET_KEY; + if (create_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) { + set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM; + set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + + } else if (create_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) { + set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM; + set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + + } else { + set_key_ireq.ce = ce_hw[i]; + set_key_ireq.pipe = pipe; + } + set_key_ireq.flags = flags; + + /* set both PIPE_ENC and PIPE_ENC_XTS*/ + set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS; + memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + memcpy((void *)set_key_ireq.key_id, + (void *)key_id_array[create_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)set_key_ireq.hash32, + (void *)create_key_req.hash32, + QSEECOM_HASH_SIZE); + /* + * It will return false if it is GPCE based crypto instance or + * ICE is setup properly + */ + ret = qseecom_enable_ice_setup(create_key_req.usage); + if (ret) + goto free_buf; + + do { + ret = __qseecom_set_clear_ce_key(data, + create_key_req.usage, + &set_key_ireq); + /* + * wait a little before calling scm again to let other + * processes run + */ + if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION) + msleep(50); + + } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION); + + qseecom_disable_ice_setup(create_key_req.usage); + + if (ret) { + pr_err("Failed to create key: pipe %d, ce %d: %d\n", + pipe, ce_hw[i], ret); + goto free_buf; + } else { + pr_err("Set the key successfully\n"); + if ((create_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) || + (create_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)) + goto free_buf; + } + } + +free_buf: + kfree_sensitive(ce_hw); + return ret; +} + +static int qseecom_wipe_key(struct qseecom_dev_handle *data, + void __user *argp) +{ + uint32_t *ce_hw = NULL; + uint32_t pipe = 0; + int ret = 0; + uint32_t flags = 0; + int i, j; + struct qseecom_wipe_key_req wipe_key_req; + struct qseecom_key_delete_ireq delete_key_ireq; + struct qseecom_key_select_ireq clear_key_ireq; + uint32_t entries = 0; + + ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("unsupported usage %d\n", wipe_key_req.usage); + ret = -EFAULT; + return ret; + } + + entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT, + wipe_key_req.usage); + if (entries <= 0) { + pr_err("no ce instance for usage %d instance %d\n", + DEFAULT_CE_INFO_UNIT, wipe_key_req.usage); + ret = -EINVAL; + return ret; + } + + ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL); + if (!ce_hw) { + ret = -ENOMEM; + return ret; + } + + ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw, + DEFAULT_CE_INFO_UNIT); + if (ret) { + pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret); + ret = -EINVAL; + goto free_buf; + } + + if (wipe_key_req.wipe_key_flag) { + delete_key_ireq.flags = flags; + delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY; + memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memcpy((void *)delete_key_ireq.key_id, + (void *)key_id_array[wipe_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + + ret = __qseecom_delete_saved_key(data, wipe_key_req.usage, + &delete_key_ireq); + if (ret) { + pr_err("Failed to delete key from ssd storage: %d\n", + ret); + ret = -EFAULT; + goto free_buf; + } + } + + for (j = 0; j < entries; j++) { + clear_key_ireq.qsee_command_id = QSEOS_SET_KEY; + if (wipe_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) { + clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM; + clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + } else if (wipe_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) { + clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM; + clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + } else { + clear_key_ireq.ce = ce_hw[j]; + clear_key_ireq.pipe = pipe; + } + clear_key_ireq.flags = flags; + clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS; + for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++) + clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID; + memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + + /* + * It will return false if it is GPCE based crypto instance or + * ICE is setup properly + */ + ret = qseecom_enable_ice_setup(wipe_key_req.usage); + if (ret) + goto free_buf; + + ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage, + &clear_key_ireq); + + qseecom_disable_ice_setup(wipe_key_req.usage); + + if (ret) { + pr_err("Failed to wipe key: pipe %d, ce %d: %d\n", + pipe, ce_hw[j], ret); + ret = -EFAULT; + goto free_buf; + } + } + +free_buf: + kfree_sensitive(ce_hw); + return ret; +} + +static int qseecom_update_key_user_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + uint32_t flags = 0; + struct qseecom_update_key_userinfo_req update_key_req; + struct qseecom_key_userinfo_update_ireq ireq; + + ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + update_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", update_key_req.usage); + return -EFAULT; + } + + ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO; + + if (qseecom.fde_key_size) + flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE; + else + flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE; + + ireq.flags = flags; + memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE); + memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE); + memcpy((void *)ireq.key_id, + (void *)key_id_array[update_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)ireq.current_hash32, + (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE); + memcpy((void *)ireq.new_hash32, + (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE); + + do { + ret = __qseecom_update_current_key_user_info(data, + update_key_req.usage, + &ireq); + /* + * wait a little before calling scm again to let other + * processes run + */ + if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION) + msleep(50); + + } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION); + if (ret) { + pr_err("Failed to update key info: %d\n", ret); + return ret; + } + return ret; + +} +static int qseecom_is_es_activated(void __user *argp) +{ + struct qseecom_is_es_activated_req req = {0}; + struct qseecom_command_scm_resp resp; + int ret; + + if (qseecom.qsee_version < QSEE_VERSION_04) { + pr_err("invalid qsee version\n"); + return -ENODEV; + } + + if (argp == NULL) { + pr_err("arg is null\n"); + return -EINVAL; + } + + ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call failed\n"); + return ret; + } + + req.is_activated = resp.result; + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("copy_to_user failed\n"); + return ret; + } + + return 0; +} + +static int qseecom_save_partition_hash(void __user *argp) +{ + struct qseecom_save_partition_hash_req req; + struct qseecom_command_scm_resp resp; + int ret; + + memset(&resp, 0x00, sizeof(resp)); + + if (qseecom.qsee_version < QSEE_VERSION_04) { + pr_err("invalid qsee version\n"); + return -ENODEV; + } + + if (argp == NULL) { + pr_err("arg is null\n"); + return -EINVAL; + } + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID, + (void *)&req, sizeof(req), (void *)&resp, sizeof(resp)); + if (ret) { + pr_err("qseecom_scm_call failed\n"); + return ret; + } + + return 0; +} + +static int qseecom_mdtp_cipher_dip(void __user *argp) +{ + struct qseecom_mdtp_cipher_dip_req req; + u32 tzbuflenin, tzbuflenout; + char *tzbufin = NULL, *tzbufout = NULL; + struct qseecom_scm_desc desc = {0}; + int ret; + phys_addr_t pain, paout; + struct qtee_shm shmin = {0}, shmout = {0}; + + do { + /* Copy the parameters from userspace */ + if (argp == NULL) { + pr_err("arg is null\n"); + ret = -EINVAL; + break; + } + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed, ret= %d\n", ret); + break; + } + + if (req.in_buf == NULL || req.out_buf == NULL || + req.in_buf_size == 0 || req.in_buf_size > MAX_DIP || + req.out_buf_size == 0 || req.out_buf_size > MAX_DIP || + req.direction > 1) { + pr_err("invalid parameters\n"); + ret = -EINVAL; + break; + } + + /* Copy the input buffer from userspace to kernel space */ + tzbuflenin = PAGE_ALIGN(req.in_buf_size); + tzbufin = __qseecom_alloc_tzbuf(tzbuflenin, &pain, &shmin); + if (!tzbufin) { + pr_err("error allocating in buffer\n"); + ret = -ENOMEM; + break; + } + + ret = copy_from_user(tzbufin, (void __user *)req.in_buf, + req.in_buf_size); + if (ret) { + pr_err("copy_from_user failed, ret=%d\n", ret); + break; + } + + qtee_shmbridge_flush_shm_buf(&shmin); + + /* Prepare the output buffer in kernel space */ + tzbuflenout = PAGE_ALIGN(req.out_buf_size); + tzbufout = __qseecom_alloc_tzbuf(tzbuflenout, &paout, &shmout); + if (!tzbufout) { + pr_err("error allocating out buffer\n"); + ret = -ENOMEM; + break; + } + + qtee_shmbridge_flush_shm_buf(&shmout); + + /* Send the command to TZ */ + desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID; + desc.args[0] = pain; + desc.args[1] = req.in_buf_size; + desc.args[2] = paout; + desc.args[3] = req.out_buf_size; + desc.args[4] = req.direction; + + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + break; + + ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc); + + __qseecom_disable_clk(CLK_QSEE); + + if (ret) { + pr_err("failed for SCM_SVC_MDTP, ret=%d\n", + ret); + break; + } + + /* Copy the output buffer from kernel space to userspace */ + qtee_shmbridge_flush_shm_buf(&shmout); + ret = copy_to_user((void __user *)req.out_buf, + tzbufout, req.out_buf_size); + if (ret) { + pr_err("copy_to_user failed, ret=%d\n", ret); + break; + } + } while (0); + + __qseecom_free_tzbuf(&shmin); + __qseecom_free_tzbuf(&shmout); + + return ret; +} + +static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data, + struct qseecom_qteec_req *req) +{ + if (!data || !data->client.sb_virt) { + pr_err("Client or client buf is not initialized\n"); + return -EINVAL; + } + + if (data->type != QSEECOM_CLIENT_APP) + return -EFAULT; + + if (req->req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if (req->req_len + req->resp_len > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->req_len + req->resp_len), data->client.sb_length); + return -ENOMEM; + } + + if (req->req_ptr == NULL || req->resp_ptr == NULL) { + pr_err("cmd buffer or response buffer is null\n"); + return -EINVAL; + } + if (((uintptr_t)req->req_ptr < + data->client.user_virt_sb_base) || + ((uintptr_t)req->req_ptr >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + + if (((uintptr_t)req->resp_ptr < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_ptr >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + + if ((req->req_len == 0) || (req->resp_len == 0)) { + pr_err("cmd buf lengtgh/response buf length not valid\n"); + return -EINVAL; + } + + if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) { + pr_err("Integer overflow in req_len & req_ptr\n"); + return -EINVAL; + } + + if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_ptr\n"); + return -EINVAL; + } + + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->req_ptr + req->req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_ptr + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data, + uint32_t fd_idx, struct sg_table *sg_ptr) +{ + struct scatterlist *sg = sg_ptr->sgl; + struct qseecom_sg_entry *sg_entry; + void *buf; + uint i; + size_t size; + dma_addr_t coh_pmem; + + if (fd_idx >= MAX_ION_FD) { + pr_err("fd_idx [%d] is invalid\n", fd_idx); + return -ENOMEM; + } + /* + * Allocate a buffer, populate it with number of entry plus + * each sg entry's phy addr and length; then return the + * phy_addr of the buffer. + */ + size = sizeof(uint32_t) + + sizeof(struct qseecom_sg_entry) * sg_ptr->nents; + size = (size + PAGE_SIZE) & PAGE_MASK; + buf = dma_alloc_coherent(qseecom.dev, + size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + *(uint32_t *)buf = sg_ptr->nents; + sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t)); + for (i = 0; i < sg_ptr->nents; i++) { + sg_entry->phys_addr = (uint32_t)sg_dma_address(sg); + sg_entry->len = sg->length; + sg_entry++; + sg = sg_next(sg); + } + data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; + data->client.sec_buf_fd[fd_idx].vbase = buf; + data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; + data->client.sec_buf_fd[fd_idx].size = size; + return 0; +} + +static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req, + struct qseecom_dev_handle *data, bool cleanup) +{ + int ret = 0; + int i = 0; + uint32_t *update; + struct sg_table *sg_ptr = NULL; + struct scatterlist *sg; + struct qseecom_param_memref *memref; + int ion_fd = -1; + struct dma_buf *dmabuf = NULL; + struct dma_buf_attachment *attach = NULL; + + if (req == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + for (i = 0; i < MAX_ION_FD; i++) { + if (req->ifd_data[i].fd > 0) { + ion_fd = req->ifd_data[i].fd; + if ((req->req_len < + sizeof(struct qseecom_param_memref)) || + (req->ifd_data[i].cmd_buf_offset > + req->req_len - + sizeof(struct qseecom_param_memref))) { + pr_err("Invalid offset/req len 0x%x/0x%x\n", + req->req_len, + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + update = (uint32_t *)((char *) req->req_ptr + + req->ifd_data[i].cmd_buf_offset); + if (!update) { + pr_err("update pointer is NULL\n"); + return -EINVAL; + } + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf); + if (ret) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + sg = sg_ptr->sgl; + if (sg == NULL) { + pr_err("sg is NULL\n"); + goto err; + } + if ((sg_ptr->nents == 0) || (sg->length == 0)) { + pr_err("Num of scat entr (%d)or length(%d) invalid\n", + sg_ptr->nents, sg->length); + goto err; + } + /* clean up buf for pre-allocated fd */ + if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd && + (*update)) { + if (data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.dev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + memset((void *)update, 0, + sizeof(struct qseecom_param_memref)); + memset(&(data->client.sec_buf_fd[i]), 0, + sizeof(struct qseecom_sec_buf_fd_info)); + goto clean; + } + + if (*update == 0) { + /* update buf for pre-allocated fd from secure heap*/ + ret = __qseecom_qteec_handle_pre_alc_fd(data, i, + sg_ptr); + if (ret) { + pr_err("Failed to handle buf for fd[%d]\n", i); + goto err; + } + memref = (struct qseecom_param_memref *)update; + memref->buffer = + (uint32_t)(data->client.sec_buf_fd[i].pbase); + memref->size = + (uint32_t)(data->client.sec_buf_fd[i].size); + } else { + /* update buf for fd from non-secure qseecom heap */ + if (sg_ptr->nents != 1) { + pr_err("Num of scat entr (%d) invalid\n", + sg_ptr->nents); + goto err; + } + if (cleanup) + *update = 0; + else + *update = (uint32_t)sg_dma_address(sg_ptr->sgl); + } +clean: + if (cleanup) { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = qseecom_dmabuf_cache_operations(dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, + req->ifd_data[i].cmd_buf_offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } + /* unmap the dmabuf */ + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + sg_ptr = NULL; + dmabuf = NULL; + attach = NULL; + } + return ret; +err: + if (!IS_ERR_OR_NULL(sg_ptr)) { + qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf); + MAKE_NULL(sg_ptr, attach, dmabuf); + } + return -ENOMEM; +} + +static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, + struct qseecom_qteec_req *req, uint32_t cmd_id) +{ + struct qseecom_command_scm_resp resp; + struct qseecom_qteec_ireq ireq; + struct qseecom_qteec_64bit_ireq ireq_64bit; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + unsigned long flags; + int ret = 0; + int ret2 = 0; + uint32_t reqd_len_sb_in = 0; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = data->sglistinfo_ptr; + void *req_ptr = NULL; + void *resp_ptr = NULL; + + ret = __qseecom_qteec_validate_msg(data, req); + if (ret) + return ret; + + req_ptr = req->req_ptr; + resp_ptr = req->resp_ptr; + + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + if (__qseecom_find_pending_unload_app(data->client.app_id, + data->client.app_name)) { + pr_err("app %d (%s) unload is pending\n", + data->client.app_id, data->client.app_name); + return -ENOENT; + } + + req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->req_ptr); + req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->resp_ptr); + + if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || + (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { + ret = __qseecom_update_qteec_req_buf( + (struct qseecom_qteec_modfd_req *)req, data, false); + if (ret) + return ret; + } + + if (qseecom.qsee_version < QSEE_VERSION_40) { + ireq.app_id = data->client.app_id; + ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq.req_len = req->req_len; + ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq.resp_len = req->resp_len; + ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table); + ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + cmd_buf = (void *)&ireq; + cmd_len = sizeof(struct qseecom_qteec_ireq); + } else { + ireq_64bit.app_id = data->client.app_id; + ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq_64bit.req_len = req->req_len; + ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq_64bit.resp_len = req->resp_len; + if ((data->client.app_arch == ELFCLASS32) && + ((ireq_64bit.req_ptr >= + PHY_ADDR_4G - ireq_64bit.req_len) || + (ireq_64bit.resp_ptr >= + PHY_ADDR_4G - ireq_64bit.resp_len))){ + pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n", + data->client.app_name, data->client.app_id); + pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n", + ireq_64bit.req_ptr, ireq_64bit.req_len, + ireq_64bit.resp_ptr, ireq_64bit.resp_len); + return -EFAULT; + } + ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table); + ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + cmd_buf = (void *)&ireq_64bit; + cmd_len = sizeof(struct qseecom_qteec_64bit_ireq); + } + if (qseecom.whitelist_support + && cmd_id == QSEOS_TEE_OPEN_SESSION) + *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST; + else + *(uint32_t *)cmd_buf = cmd_id; + + reqd_len_sb_in = req->req_len + req->resp_len; + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + goto exit; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + goto exit; + } + } + } +exit: + if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || + (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { + ret2 = __qseecom_update_qteec_req_buf( + (struct qseecom_qteec_modfd_req *)req, data, true); + if (ret2) + return ret2; + } + return ret; +} + +static int qseecom_qteec_open_session(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req, + QSEOS_TEE_OPEN_SESSION); + + return ret; +} + +static int qseecom_qteec_close_session(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION); + return ret; +} + +static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + struct qseecom_command_scm_resp resp; + struct qseecom_qteec_ireq ireq; + struct qseecom_qteec_64bit_ireq ireq_64bit; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + unsigned long flags; + int ret = 0; + int i = 0; + uint32_t reqd_len_sb_in = 0; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = data->sglistinfo_ptr; + void *req_ptr = NULL; + void *resp_ptr = NULL; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_validate_msg(data, + (struct qseecom_qteec_req *)(&req)); + if (ret) + return ret; + req_ptr = req.req_ptr; + resp_ptr = req.resp_ptr; + + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + if (__qseecom_find_pending_unload_app(data->client.app_id, + data->client.app_name)) { + pr_err("app %d (%s) unload is pending\n", + data->client.app_id, data->client.app_name); + return -ENOENT; + } + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (req.ifd_data[i].fd) { + if (req.ifd_data[i].cmd_buf_offset >= req.req_len) + return -EINVAL; + } + } + req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.req_ptr); + req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.resp_ptr); + ret = __qseecom_update_qteec_req_buf(&req, data, false); + if (ret) + return ret; + + if (qseecom.qsee_version < QSEE_VERSION_40) { + ireq.app_id = data->client.app_id; + ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq.req_len = req.req_len; + ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq.resp_len = req.resp_len; + cmd_buf = (void *)&ireq; + cmd_len = sizeof(struct qseecom_qteec_ireq); + ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table); + ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + } else { + ireq_64bit.app_id = data->client.app_id; + ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq_64bit.req_len = req.req_len; + ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq_64bit.resp_len = req.resp_len; + cmd_buf = (void *)&ireq_64bit; + cmd_len = sizeof(struct qseecom_qteec_64bit_ireq); + ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table); + ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm); + } + reqd_len_sb_in = req.req_len + req.resp_len; + if (qseecom.whitelist_support) + *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST; + else + *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND; + + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_CLEAN); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + return ret; + } + ret = qseecom_dmabuf_cache_operations(data->client.dmabuf, + QSEECOM_CACHE_INVALIDATE); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + return ret; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + } + } + } + ret = __qseecom_update_qteec_req_buf(&req, data, true); + if (ret) + return ret; + + return 0; +} + +static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req, + QSEOS_TEE_REQUEST_CANCELLATION); + + return ret; +} + +static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data) +{ + if (data->sglist_cnt) { + memset(data->sglistinfo_ptr, 0, + SGLISTINFO_TABLE_SIZE); + data->sglist_cnt = 0; + } +} + +long qseecom_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + struct qseecom_dev_handle *data = file->private_data; + void __user *argp = (void __user *) arg; + bool perf_enabled = false; + if (!data) { + pr_err("Invalid/uninitialized device handle\n"); + return -EINVAL; + } + + if (data->abort) { + pr_err("Aborting qseecom driver\n"); + return -ENODEV; + } + if (cmd != QSEECOM_IOCTL_RECEIVE_REQ && + cmd != QSEECOM_IOCTL_SEND_RESP_REQ && + cmd != QSEECOM_IOCTL_SEND_MODFD_RESP && + cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64) + __wakeup_unregister_listener_kthread(); + __wakeup_unload_app_kthread(); + + switch (cmd) { + case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("reg lstnr req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + pr_debug("ioctl register_listener_req()\n"); + mutex_lock(&listener_access_lock); + atomic_inc(&data->ioctl_count); + data->type = QSEECOM_LISTENER_SERVICE; + ret = qseecom_register_listener(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&listener_access_lock); + if (ret) + pr_err("failed qseecom_register_listener: %d\n", ret); + break; + } + case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + pr_debug("ioctl unregister_listener_req()\n"); + mutex_lock(&listener_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unregister_listener(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&listener_access_lock); + if (ret) + pr_err("failed qseecom_unregister_listener: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_CMD_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("send cmd req: invalid handle (%d) app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + if (qseecom.support_bus_scaling) { + /* register bus bw in case the client doesn't do it */ + if (!data->mode) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs( + data, HIGH); + mutex_unlock(&qsee_bw_mutex); + } + ret = qseecom_scale_bus_bandwidth_timer(INACTIVE); + if (ret) { + pr_err("Failed to set bw.\n"); + ret = -EINVAL; + mutex_unlock(&app_access_lock); + break; + } + } + /* + * On targets where crypto clock is handled by HLOS, + * if clk_access_cnt is zero and perf_enabled is false, + * then the crypto clock was not enabled before sending cmd to + * tz, qseecom will enable the clock to avoid service failure. + */ + if (!qseecom.no_clock_support && + !qseecom.qsee.clk_access_cnt && !data->perf_enabled) { + pr_debug("ce clock is not enabled!\n"); + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clock with err %d\n", + ret); + mutex_unlock(&app_access_lock); + ret = -EINVAL; + break; + } + perf_enabled = true; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_send_cmd(data, argp); + if (qseecom.support_bus_scaling) + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + if (perf_enabled) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_send_cmd: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: + case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + if (qseecom.support_bus_scaling) { + if (!data->mode) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs( + data, HIGH); + mutex_unlock(&qsee_bw_mutex); + } + ret = qseecom_scale_bus_bandwidth_timer(INACTIVE); + if (ret) { + pr_err("Failed to set bw.\n"); + mutex_unlock(&app_access_lock); + ret = -EINVAL; + break; + } + } + /* + * On targets where crypto clock is handled by HLOS, + * if clk_access_cnt is zero and perf_enabled is false, + * then the crypto clock was not enabled before sending cmd to + * tz, qseecom will enable the clock to avoid service failure. + */ + if (!qseecom.no_clock_support && + !qseecom.qsee.clk_access_cnt && !data->perf_enabled) { + pr_debug("ce clock is not enabled!\n"); + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clock with err %d\n", + ret); + mutex_unlock(&app_access_lock); + ret = -EINVAL; + break; + } + perf_enabled = true; + } + atomic_inc(&data->ioctl_count); + if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ) + ret = qseecom_send_modfd_cmd(data, argp); + else + ret = qseecom_send_modfd_cmd_64(data, argp); + if (qseecom.support_bus_scaling) + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + if (perf_enabled) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_send_cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_IOCTL_RECEIVE_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("receive req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_receive_req(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + if (ret && (ret != -ERESTARTSYS)) + pr_err("failed qseecom_receive_req: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_RESP_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("send resp req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + mutex_lock(&listener_access_lock); + atomic_inc(&data->ioctl_count); + if (!qseecom.qsee_reentrancy_support) + ret = qseecom_send_resp(); + else + ret = qseecom_reentrancy_send_resp(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&listener_access_lock); + if (ret) + pr_err("failed qseecom_send_resp: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: { + if ((data->type != QSEECOM_CLIENT_APP) && + (data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_SECURE_SERVICE)) { + pr_err("set mem param req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_set_client_mem_param(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed Qqseecom_set_mem_param request: %d\n", + ret); + break; + } + case QSEECOM_IOCTL_LOAD_APP_REQ: { + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("load app req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_CLIENT_APP; + pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_load_app(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed load_app request: %d\n", ret); + __wakeup_unload_app_kthread(); + break; + } + case QSEECOM_IOCTL_UNLOAD_APP_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("unload app req:invalid handle(%d) app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unload_app(data, false); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed unload_app request: %d\n", ret); + __wakeup_unload_app_kthread(); + break; + } + case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: { + atomic_inc(&data->ioctl_count); + ret = qseecom_get_qseos_version(data, argp); + if (ret) + pr_err("qseecom_get_qseos_version: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_PERF_ENABLE_REQ:{ + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("perf enable req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if ((data->type == QSEECOM_CLIENT_APP) && + (data->client.app_id == 0)) { + pr_err("perf enable req:invalid handle(%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs(data, HIGH); + mutex_unlock(&qsee_bw_mutex); + } else { + ret = qseecom_perf_enable(data); + if (ret) + pr_err("Fail to vote for clocks %d\n", ret); + } + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_PERF_DISABLE_REQ:{ + if ((data->type != QSEECOM_SECURE_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("perf disable req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if ((data->type == QSEECOM_CLIENT_APP) && + (data->client.app_id == 0)) { + pr_err("perf disable: invalid handle (%d)app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } else { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + atomic_dec(&data->ioctl_count); + break; + } + + case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: { + /* If crypto clock is not handled by HLOS, return directly. */ + if (qseecom.no_clock_support) { + pr_debug("crypto clock is not handled by HLOS\n"); + break; + } + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("set bus scale: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_scale_bus_bandwidth(data, argp); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("load ext elf req: invalid client handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_load_external_elf(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed load_external_elf request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: { + if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) { + pr_err("unload ext elf req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unload_external_elf(data); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed unload_app request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("app loaded query req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_CLIENT_APP; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data); + ret = qseecom_query_app_loaded(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("send cmd svc req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_SECURE_SERVICE; + if (qseecom.qsee_version < QSEE_VERSION_03) { + pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_send_service_cmd(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_CREATE_KEY_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("create key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Create Key feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_create_key(data, argp); + if (ret) + pr_err("failed to create encryption key: %d\n", ret); + + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_WIPE_KEY_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("wipe key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Wipe Key feature unsupported in qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_wipe_key(data, argp); + if (ret) + pr_err("failed to wipe encryption key: %d\n", ret); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("update key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Update Key feature unsupported in qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_update_key_user_info(data, argp); + if (ret) + pr_err("failed to update key user info: %d\n", ret); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("save part hash req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_save_partition_hash(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("ES activated req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_is_es_activated(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("MDTP cipher DIP req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_mdtp_cipher_dip(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SEND_MODFD_RESP: + case QSEECOM_IOCTL_SEND_MODFD_RESP_64: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("receive req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + mutex_lock(&listener_access_lock); + atomic_inc(&data->ioctl_count); + if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP) + ret = qseecom_send_modfd_resp(data, argp); + else + ret = qseecom_send_modfd_resp_64(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&listener_access_lock); + if (ret) + pr_err("failed qseecom_send_mod_resp: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Open session: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_open_session(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed open_session_cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Close session: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_close_session(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed close_session_cmd: %d\n", ret); + break; + } + case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_invoke_modfd_cmd(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed Invoke cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Cancel req: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_request_cancellation(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed request_cancellation: %d\n", ret); + break; + } + case QSEECOM_IOCTL_GET_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_get_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_free_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_query_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_SET_ICE_INFO: { + struct qseecom_ice_data_t ice_data; + + ret = copy_from_user(&ice_data, argp, sizeof(ice_data)); + if (ret) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + qcom_ice_set_fde_flag(ice_data.flag); + break; + } + case QSEECOM_IOCTL_FBE_CLEAR_KEY: { + pr_err("QSEECOM_IOCTL_FBE_CLEAR_KEY IOCTL is deprecated\n"); + return -EINVAL; + } + default: + pr_err("Invalid IOCTL: 0x%x\n", cmd); + return -ENOIOCTLCMD; + } + return ret; +} + +static int qseecom_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct qseecom_dev_handle *data; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + { + return -ENOMEM; + } + file->private_data = data; + data->abort = 0; + data->type = QSEECOM_GENERIC; + data->released = false; + memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE); + data->mode = INACTIVE; + init_waitqueue_head(&data->abort_wq); + atomic_set(&data->ioctl_count, 0); + data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf( + sizeof(struct sglist_info) * MAX_ION_FD, + &data->sglistinfo_shm.paddr, + &data->sglistinfo_shm); + if (!data->sglistinfo_ptr) + { + return -ENOMEM; + } + return ret; +} + +static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data) +{ + if (qseecom.no_clock_support) + return; + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + if (data->mode != INACTIVE) { + qseecom_unregister_bus_bandwidth_needs(data); + if (qseecom.cumulative_mode == INACTIVE) + __qseecom_set_msm_bus_request(INACTIVE); + } + mutex_unlock(&qsee_bw_mutex); + } else { + if (data->fast_load_enabled) + qsee_disable_clock_vote(data, CLK_SFPB); + if (data->perf_enabled) + qsee_disable_clock_vote(data, CLK_DFAB); + } +} + +static int qseecom_release(struct inode *inode, struct file *file) +{ + struct qseecom_dev_handle *data = file->private_data; + int ret = 0; + bool free_private_data = true; + + __qseecom_release_disable_clk(data); + if (!data->released) { + pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n", + data->type, data->mode, data); + switch (data->type) { + case QSEECOM_LISTENER_SERVICE: + pr_debug("release lsnr svc %d\n", data->listener.id); + mutex_lock(&listener_access_lock); + ret = qseecom_unregister_listener(data); + if (!ret) + free_private_data = false; + data->listener.release_called = true; + mutex_unlock(&listener_access_lock); + __wakeup_unregister_listener_kthread(); + break; + case QSEECOM_CLIENT_APP: + pr_debug("release app %d (%s)\n", + data->client.app_id, data->client.app_name); + if (data->client.app_id) { + free_private_data = false; + mutex_lock(&unload_app_pending_list_lock); + ret = qseecom_prepare_unload_app(data); + mutex_unlock(&unload_app_pending_list_lock); + __wakeup_unload_app_kthread(); + } + break; + case QSEECOM_SECURE_SERVICE: + case QSEECOM_GENERIC: + if (data->client.dmabuf) { + qseecom_vaddr_unmap(data->client.sb_virt, + data->client.sgt, data->client.attach, + data->client.dmabuf); + MAKE_NULL(data->client.sgt, data->client.attach, + data->client.dmabuf); + } + break; + case QSEECOM_UNAVAILABLE_CLIENT_APP: + break; + default: + pr_err("Unsupported clnt_handle_type %d\n", + data->type); + break; + } + } + + if (free_private_data) { + __qseecom_free_tzbuf(&data->sglistinfo_shm); + kfree(data); + } + return ret; +} + +static const struct file_operations qseecom_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qseecom_ioctl, + .open = qseecom_open, + .release = qseecom_release +}; + +static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce) +{ + int rc = 0; + struct device *pdev; + struct qseecom_clk *qclk; + char *core_clk_src = NULL; + char *core_clk = NULL; + char *iface_clk = NULL; + char *bus_clk = NULL; + + switch (ce) { + case CLK_QSEE: { + core_clk_src = "core_clk_src"; + core_clk = "core_clk"; + iface_clk = "iface_clk"; + bus_clk = "bus_clk"; + qclk = &qseecom.qsee; + qclk->instance = CLK_QSEE; + break; + }; + case CLK_CE_DRV: { + core_clk_src = "ce_drv_core_clk_src"; + core_clk = "ce_drv_core_clk"; + iface_clk = "ce_drv_iface_clk"; + bus_clk = "ce_drv_bus_clk"; + qclk = &qseecom.ce_drv; + qclk->instance = CLK_CE_DRV; + break; + }; + default: + pr_err("Invalid ce hw instance: %d!\n", ce); + return -EIO; + } + + if (qseecom.no_clock_support) { + qclk->ce_core_clk = NULL; + qclk->ce_clk = NULL; + qclk->ce_bus_clk = NULL; + qclk->ce_core_src_clk = NULL; + return 0; + } + + pdev = qseecom.pdev; + + /* Get CE3 src core clk. */ + qclk->ce_core_src_clk = clk_get(pdev, core_clk_src); + if (!IS_ERR(qclk->ce_core_src_clk)) { + rc = clk_set_rate(qclk->ce_core_src_clk, + qseecom.ce_opp_freq_hz); + if (rc) { + clk_put(qclk->ce_core_src_clk); + qclk->ce_core_src_clk = NULL; + pr_err("Unable to set the core src clk @%uMhz.\n", + qseecom.ce_opp_freq_hz/CE_CLK_DIV); + return -EIO; + } + } else { + pr_warn("Unable to get CE core src clk, set to NULL\n"); + qclk->ce_core_src_clk = NULL; + } + + /* Get CE core clk */ + qclk->ce_core_clk = clk_get(pdev, core_clk); + if (IS_ERR(qclk->ce_core_clk)) { + rc = PTR_ERR(qclk->ce_core_clk); + pr_err("Unable to get CE core clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + return -EIO; + } + + /* Get CE Interface clk */ + qclk->ce_clk = clk_get(pdev, iface_clk); + if (IS_ERR(qclk->ce_clk)) { + rc = PTR_ERR(qclk->ce_clk); + pr_err("Unable to get CE interface clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + clk_put(qclk->ce_core_clk); + return -EIO; + } + + /* Get CE AXI clk */ + qclk->ce_bus_clk = clk_get(pdev, bus_clk); + if (IS_ERR(qclk->ce_bus_clk)) { + rc = PTR_ERR(qclk->ce_bus_clk); + pr_err("Unable to get CE BUS interface clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + clk_put(qclk->ce_core_clk); + clk_put(qclk->ce_clk); + return -EIO; + } + + return rc; +} + +static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + if (qclk->ce_clk != NULL) { + clk_put(qclk->ce_clk); + qclk->ce_clk = NULL; + } + if (qclk->ce_core_clk != NULL) { + clk_put(qclk->ce_core_clk); + qclk->ce_core_clk = NULL; + } + if (qclk->ce_bus_clk != NULL) { + clk_put(qclk->ce_bus_clk); + qclk->ce_bus_clk = NULL; + } + if (qclk->ce_core_src_clk != NULL) { + clk_put(qclk->ce_core_src_clk); + qclk->ce_core_src_clk = NULL; + } + qclk->instance = CLK_INVALID; +} + +static int qseecom_retrieve_ce_data(struct platform_device *pdev) +{ + int rc = 0; + uint32_t hlos_num_ce_hw_instances; + uint32_t disk_encrypt_pipe; + uint32_t file_encrypt_pipe; + uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0}; + int i; + const int *tbl; + int size; + int entry; + struct qseecom_crypto_info *pfde_tbl = NULL; + struct qseecom_crypto_info *p; + int tbl_size; + int j; + bool old_db = true; + struct qseecom_ce_info_use *pce_info_use; + uint32_t *unit_tbl = NULL; + int total_units = 0; + struct qseecom_ce_pipe_entry *pce_entry; + + qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL; + qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0; + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,qsee-ce-hw-instance", + &qseecom.ce_info.qsee_ce_hw_instance)) { + pr_err("Fail to get qsee ce hw instance information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("qsee-ce-hw-instance=0x%x\n", + qseecom.ce_info.qsee_ce_hw_instance); + } + + qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-fde"); + qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-pfe"); + + if (!qseecom.support_pfe && !qseecom.support_fde) { + pr_warn("Device does not support PFE/FDE\n"); + goto out; + } + + if (qseecom.support_fde) + tbl = of_get_property((&pdev->dev)->of_node, + "qcom,full-disk-encrypt-info", &size); + else + tbl = NULL; + if (tbl) { + old_db = false; + if (size % sizeof(struct qseecom_crypto_info)) { + pr_err("full-disk-encrypt-info tbl size(%d)\n", + size); + rc = -EINVAL; + goto out; + } + tbl_size = size / sizeof + (struct qseecom_crypto_info); + + pfde_tbl = kzalloc(size, GFP_KERNEL); + unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL); + total_units = 0; + + if (!pfde_tbl || !unit_tbl) { + rc = -ENOMEM; + goto out; + } + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,full-disk-encrypt-info", + (u32 *)pfde_tbl, size/sizeof(u32))) { + pr_err("failed to read full-disk-encrypt-info tbl\n"); + rc = -EINVAL; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + for (j = 0; j < total_units; j++) { + if (p->unit_num == *(unit_tbl + j)) + break; + } + if (j == total_units) { + *(unit_tbl + total_units) = p->unit_num; + total_units++; + } + } + + qseecom.ce_info.num_fde = total_units; + pce_info_use = qseecom.ce_info.fde = kcalloc( + total_units, sizeof(struct qseecom_ce_info_use), + GFP_KERNEL); + if (!pce_info_use) { + rc = -ENOMEM; + goto out; + } + + for (j = 0; j < total_units; j++, pce_info_use++) { + pce_info_use->unit_num = *(unit_tbl + j); + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE; + pce_info_use->num_ce_pipe_entries = 0; + pce_info_use->ce_pipe_entry = NULL; + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) + pce_info_use->num_ce_pipe_entries++; + } + + entry = pce_info_use->num_ce_pipe_entries; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + rc = -ENOMEM; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) { + pce_entry->ce_num = p->ce; + pce_entry->ce_pipe_pair = + p->pipe_pair; + pce_entry->valid = true; + pce_entry++; + } + } + } + kfree(unit_tbl); + unit_tbl = NULL; + kfree(pfde_tbl); + pfde_tbl = NULL; + } + + if (qseecom.support_pfe) + tbl = of_get_property((&pdev->dev)->of_node, + "qcom,per-file-encrypt-info", &size); + else + tbl = NULL; + if (tbl) { + old_db = false; + if (size % sizeof(struct qseecom_crypto_info)) { + pr_err("per-file-encrypt-info tbl size(%d)\n", + size); + rc = -EINVAL; + goto out; + } + tbl_size = size / sizeof + (struct qseecom_crypto_info); + + pfde_tbl = kzalloc(size, GFP_KERNEL); + unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL); + total_units = 0; + if (!pfde_tbl || !unit_tbl) { + rc = -ENOMEM; + goto out; + } + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,per-file-encrypt-info", + (u32 *)pfde_tbl, size/sizeof(u32))) { + pr_err("failed to read per-file-encrypt-info tbl\n"); + rc = -EINVAL; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + for (j = 0; j < total_units; j++) { + if (p->unit_num == *(unit_tbl + j)) + break; + } + if (j == total_units) { + *(unit_tbl + total_units) = p->unit_num; + total_units++; + } + } + + qseecom.ce_info.num_pfe = total_units; + pce_info_use = qseecom.ce_info.pfe = kcalloc( + total_units, sizeof(struct qseecom_ce_info_use), + GFP_KERNEL); + if (!pce_info_use) { + rc = -ENOMEM; + goto out; + } + + for (j = 0; j < total_units; j++, pce_info_use++) { + pce_info_use->unit_num = *(unit_tbl + j); + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE; + pce_info_use->num_ce_pipe_entries = 0; + pce_info_use->ce_pipe_entry = NULL; + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) + pce_info_use->num_ce_pipe_entries++; + } + + entry = pce_info_use->num_ce_pipe_entries; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + rc = -ENOMEM; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) { + pce_entry->ce_num = p->ce; + pce_entry->ce_pipe_pair = + p->pipe_pair; + pce_entry->valid = true; + pce_entry++; + } + } + } + kfree(unit_tbl); + unit_tbl = NULL; + kfree(pfde_tbl); + pfde_tbl = NULL; + } + + if (!old_db) + goto out1; + + if (of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-multiple-ce-hw-instance")) { + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,hlos-num-ce-hw-instances", + &hlos_num_ce_hw_instances)) { + pr_err("Fail: get hlos number of ce hw instance\n"); + rc = -EINVAL; + goto out; + } + } else { + hlos_num_ce_hw_instances = 1; + } + + if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) { + pr_err("Fail: hlos number of ce hw instance exceeds %d\n", + MAX_CE_PIPE_PAIR_PER_UNIT); + rc = -EINVAL; + goto out; + } + + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance, + hlos_num_ce_hw_instances)) { + pr_err("Fail: get hlos ce hw instance info\n"); + rc = -EINVAL; + goto out; + } + + if (qseecom.support_fde) { + pce_info_use = qseecom.ce_info.fde = + kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL); + if (!pce_info_use) { + rc = -ENOMEM; + goto out; + } + /* by default for old db */ + qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT; + pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT; + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE; + pce_info_use->ce_pipe_entry = NULL; + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,disk-encrypt-pipe-pair", + &disk_encrypt_pipe)) { + pr_err("Fail to get FDE pipe information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("disk-encrypt-pipe-pair=0x%x\n", + disk_encrypt_pipe); + } + entry = pce_info_use->num_ce_pipe_entries = + hlos_num_ce_hw_instances; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + rc = -ENOMEM; + goto out; + } + for (i = 0; i < entry; i++) { + pce_entry->ce_num = hlos_ce_hw_instance[i]; + pce_entry->ce_pipe_pair = disk_encrypt_pipe; + pce_entry->valid = 1; + pce_entry++; + } + } else { + pr_warn("Device does not support FDE\n"); + disk_encrypt_pipe = 0xff; + } + if (qseecom.support_pfe) { + pce_info_use = qseecom.ce_info.pfe = + kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL); + if (!pce_info_use) { + rc = -ENOMEM; + goto out; + } + /* by default for old db */ + qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT; + pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT; + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE; + pce_info_use->ce_pipe_entry = NULL; + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,file-encrypt-pipe-pair", + &file_encrypt_pipe)) { + pr_err("Fail to get PFE pipe information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("file-encrypt-pipe-pair=0x%x\n", + file_encrypt_pipe); + } + entry = pce_info_use->num_ce_pipe_entries = + hlos_num_ce_hw_instances; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + rc = -ENOMEM; + goto out; + } + for (i = 0; i < entry; i++) { + pce_entry->ce_num = hlos_ce_hw_instance[i]; + pce_entry->ce_pipe_pair = file_encrypt_pipe; + pce_entry->valid = 1; + pce_entry++; + } + } else { + pr_warn("Device does not support PFE\n"); + file_encrypt_pipe = 0xff; + } + +out1: + qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance; + qseecom.ce_drv.instance = hlos_ce_hw_instance[0]; +out: + if (rc) { + if (qseecom.ce_info.fde) { + pce_info_use = qseecom.ce_info.fde; + for (i = 0; i < qseecom.ce_info.num_fde; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.fde); + qseecom.ce_info.fde = NULL; + if (qseecom.ce_info.pfe) { + pce_info_use = qseecom.ce_info.pfe; + for (i = 0; i < qseecom.ce_info.num_pfe; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.pfe); + qseecom.ce_info.pfe = NULL; + } + kfree(unit_tbl); + kfree(pfde_tbl); + return rc; +} + +static int qseecom_get_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + int i; + unsigned int entries; + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + bool found = false; + struct qseecom_ce_pipe_entry *pce_entry; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + pce_info_use = NULL; + for (i = 0; i < total; i++) { + if (!p->alloc) + pce_info_use = p; + else if (!memcmp(p->handle, pinfo->handle, + MAX_CE_INFO_HANDLE_SIZE)) { + pce_info_use = p; + found = true; + break; + } + p++; + } + + if (pce_info_use == NULL) + return -EBUSY; + + pinfo->unit_num = pce_info_use->unit_num; + if (!pce_info_use->alloc) { + pce_info_use->alloc = true; + memcpy(pce_info_use->handle, + pinfo->handle, MAX_CE_INFO_HANDLE_SIZE); + } + if (pce_info_use->num_ce_pipe_entries > + MAX_CE_PIPE_PAIR_PER_UNIT) + entries = MAX_CE_PIPE_PAIR_PER_UNIT; + else + entries = pce_info_use->num_ce_pipe_entries; + pinfo->num_ce_pipe_entries = entries; + pce_entry = pce_info_use->ce_pipe_entry; + for (i = 0; i < entries; i++, pce_entry++) + pinfo->ce_pipe_entry[i] = *pce_entry; + for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; + + if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + } + return ret; +} + +static int qseecom_free_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + struct qseecom_ce_info_use *p; + int total = 0; + int i; + bool found = false; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) + return ret; + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + for (i = 0; i < total; i++) { + if (p->alloc && + !memcmp(p->handle, pinfo->handle, + MAX_CE_INFO_HANDLE_SIZE)) { + memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE); + p->alloc = false; + found = true; + break; + } + p++; + } + return ret; +} + +static int qseecom_query_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + int i; + unsigned int entries; + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + bool found = false; + struct qseecom_ce_pipe_entry *pce_entry; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) + return ret; + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + pce_info_use = NULL; + pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM; + pinfo->num_ce_pipe_entries = 0; + for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; + + for (i = 0; i < total; i++) { + + if (p->alloc && !memcmp(p->handle, + pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) { + pce_info_use = p; + found = true; + break; + } + p++; + } + if (!pce_info_use) + goto out; + pinfo->unit_num = pce_info_use->unit_num; + if (pce_info_use->num_ce_pipe_entries > + MAX_CE_PIPE_PAIR_PER_UNIT) + entries = MAX_CE_PIPE_PAIR_PER_UNIT; + else + entries = pce_info_use->num_ce_pipe_entries; + pinfo->num_ce_pipe_entries = entries; + pce_entry = pce_info_use->ce_pipe_entry; + for (i = 0; i < entries; i++, pce_entry++) + pinfo->ce_pipe_entry[i] = *pce_entry; + for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; +out: + if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + } + return ret; +} + +/* + * Check whitelist feature, and if TZ feature version is < 1.0.0, + * then whitelist feature is not supported. + */ +#define GET_FEAT_VERSION_CMD 3 +static int qseecom_check_whitelist_feature(void) +{ + struct qseecom_scm_desc desc = {0}; + int version = 0; + int ret = 0; + + desc.args[0] = FEATURE_ID_WHITELIST; + desc.arginfo = SCM_ARGS(1); + mutex_lock(&app_access_lock); + ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO, + GET_FEAT_VERSION_CMD), &desc); + mutex_unlock(&app_access_lock); + if (!ret) + version = desc.ret[0]; + + return version >= MAKE_WHITELIST_VERSION(1, 0, 0); +} + +static int qseecom_init_clk(void) +{ + int rc; + + if (qseecom.no_clock_support) + return 0; + + rc = __qseecom_init_clk(CLK_QSEE); + if (rc) + return rc; + + if ((qseecom.qsee.instance != qseecom.ce_drv.instance) && + (qseecom.support_pfe || qseecom.support_fde)) { + rc = __qseecom_init_clk(CLK_CE_DRV); + if (rc) { + __qseecom_deinit_clk(CLK_QSEE); + return rc; + } + } else { + qseecom.ce_drv.ce_core_clk = qseecom.qsee.ce_core_clk; + qseecom.ce_drv.ce_clk = qseecom.qsee.ce_clk; + qseecom.ce_drv.ce_core_src_clk = qseecom.qsee.ce_core_src_clk; + qseecom.ce_drv.ce_bus_clk = qseecom.qsee.ce_bus_clk; + } + + return rc; +} + +static void qseecom_deinit_clk(void) +{ + if (qseecom.no_clock_support) + return; + __qseecom_deinit_clk(CLK_QSEE); + if ((qseecom.qsee.instance != qseecom.ce_drv.instance) && + (qseecom.support_pfe || qseecom.support_fde)) + __qseecom_deinit_clk(CLK_CE_DRV); +} + +static int qseecom_init_bus(struct platform_device *pdev) +{ + int ret = 0; + + if (!qseecom.support_bus_scaling) + return 0; + + if (qseecom.no_clock_support) { + pr_err("Can not support bus_scalling if no clock support\n"); + return -EINVAL; + } + + timer_setup(&(qseecom.bw_scale_down_timer), + qseecom_scale_bus_bandwidth_timer_callback, 0); + INIT_WORK(&qseecom.bw_inactive_req_ws, + qseecom_bw_inactive_req_work); + qseecom.timer_running = false; + qseecom.icc_path = of_icc_get(&pdev->dev, "data_path"); + if (IS_ERR(qseecom.icc_path)) { + ret = PTR_ERR(qseecom.icc_path); + if (ret != -EPROBE_DEFER) + pr_err("Unable to get Interconnect path\n"); + return ret; + } + return 0; +} + +static void qseecom_deinit_bus(void) +{ + if (!qseecom.support_bus_scaling || qseecom.no_clock_support) + return; + qseecom_bus_scale_update_request(qseecom.qsee_perf_client, 0); + icc_put(qseecom.icc_path); + cancel_work_sync(&qseecom.bw_inactive_req_ws); + del_timer_sync(&qseecom.bw_scale_down_timer); +} + +static int qseecom_send_app_region(struct platform_device *pdev) +{ + struct resource *resource = NULL; + struct qsee_apps_region_info_64bit_ireq req_64bit; + struct qseecom_command_scm_resp resp; + void *cmd_buf = NULL; + size_t cmd_len; + int rc = 0; + + if (qseecom.qsee_version < QSEE_VERSION_02 || + qseecom.is_apps_region_protected || + qseecom.appsbl_qseecom_support) + return 0; + + resource = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "secapp-region"); + if (!resource) { + pr_err("Fail to get secure app region info\n"); + return -ENOMEM; + } + + req_64bit.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION; + req_64bit.addr = resource->start; + req_64bit.size = resource_size(resource); + cmd_buf = (void *)&req_64bit; + cmd_len = sizeof(struct qsee_apps_region_info_64bit_ireq); + pr_warn("secure app region addr=0x%llx size=0x%x\n", + req_64bit.addr, req_64bit.size); + + rc = __qseecom_enable_clk(CLK_QSEE); + if (rc) { + pr_err("CLK_QSEE enabling failed (%d)\n", rc); + return rc; + } + mutex_lock(&app_access_lock); + rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + mutex_unlock(&app_access_lock); + __qseecom_disable_clk(CLK_QSEE); + if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) { + pr_err("send secapp reg fail %d resp.res %d\n", + rc, resp.result); + return -EINVAL; + } + return rc; +} + +static void qseecom_release_ce_data(void) +{ + int i; + struct qseecom_ce_info_use *pce_info_use = NULL; + + if (qseecom.ce_info.fde) { + pce_info_use = qseecom.ce_info.fde; + for (i = 0; i < qseecom.ce_info.num_fde; i++) { + kfree_sensitive(pce_info_use->ce_pipe_entry); + pce_info_use++; + } + kfree(qseecom.ce_info.fde); + } + if (qseecom.ce_info.pfe) { + pce_info_use = qseecom.ce_info.pfe; + for (i = 0; i < qseecom.ce_info.num_pfe; i++) { + kfree_sensitive(pce_info_use->ce_pipe_entry); + pce_info_use++; + } + kfree(qseecom.ce_info.pfe); + } +} + +static int qseecom_init_dev(struct platform_device *pdev) +{ + int rc = 0; + + rc = alloc_chrdev_region(&qseecom.qseecom_device_no, + 0, 1, QSEECOM_DEV); + if (rc < 0) { + pr_err("alloc_chrdev_region failed %d\n", rc); + return rc; + } + qseecom.driver_class = class_create(THIS_MODULE, QSEECOM_DEV); + if (IS_ERR(qseecom.driver_class)) { + rc = PTR_ERR(qseecom.driver_class); + pr_err("class_create failed %x\n", rc); + goto exit_unreg_chrdev_region; + } + qseecom.pdev = device_create(qseecom.driver_class, NULL, + qseecom.qseecom_device_no, NULL, + QSEECOM_DEV); + if (IS_ERR(qseecom.pdev)) { + pr_err("class_device_create failed %d\n", rc); + rc = PTR_ERR(qseecom.pdev); + goto exit_destroy_class; + } + cdev_init(&qseecom.cdev, &qseecom_fops); + qseecom.cdev.owner = THIS_MODULE; + + rc = cdev_add(&qseecom.cdev, + MKDEV(MAJOR(qseecom.qseecom_device_no), 0), 1); + if (rc < 0) { + pr_err("cdev_add failed %d\n", rc); + goto exit_destroy_device; + } + qseecom.dev = &pdev->dev; + rc = dma_set_mask(qseecom.dev, DMA_BIT_MASK(64)); + if (rc) { + pr_err("qseecom failed to set dma mask %d\n", rc); + goto exit_del_cdev; + } + if (!qseecom.dev->dma_parms) { + qseecom.dev->dma_parms = + kzalloc(sizeof(*qseecom.dev->dma_parms), GFP_KERNEL); + if (!qseecom.dev->dma_parms) { + rc = -ENOMEM; + goto exit_del_cdev; + } + } + dma_set_max_seg_size(qseecom.dev, DMA_BIT_MASK(32)); + rc = of_reserved_mem_device_init_by_idx(&pdev->dev, + (&pdev->dev)->of_node, 0); + if (rc) { + pr_err("Failed to initialize reserved mem, ret %d\n", rc); + goto exit_del_cdev; + } + return 0; + +exit_del_cdev: + cdev_del(&qseecom.cdev); +exit_destroy_device: + device_destroy(qseecom.driver_class, qseecom.qseecom_device_no); +exit_destroy_class: + class_destroy(qseecom.driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(qseecom.qseecom_device_no, 1); + + return rc; +} + +static void qseecom_deinit_dev(void) +{ + kfree(qseecom.dev->dma_parms); + qseecom.dev->dma_parms = NULL; + cdev_del(&qseecom.cdev); + device_destroy(qseecom.driver_class, qseecom.qseecom_device_no); + class_destroy(qseecom.driver_class); + unregister_chrdev_region(qseecom.qseecom_device_no, 1); +} + +static int qseecom_init_control(void) +{ + uint32_t feature = 10; + struct qseecom_command_scm_resp resp; + int rc = 0; + + qseecom.qsee_version = QSEEE_VERSION_00; + mutex_lock(&app_access_lock); + rc = qseecom_scm_call(6, 3, &feature, sizeof(feature), + &resp, sizeof(resp)); + mutex_unlock(&app_access_lock); + pr_info("qseecom.qsee_version = 0x%x\n", resp.result); + if (rc) { + pr_err("Failed to get QSEE version info %d\n", rc); + return rc; + } + qseecom.qsee_version = resp.result; + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY); + init_waitqueue_head(&qseecom.app_block_wq); + qseecom.whitelist_support = true; + INIT_LIST_HEAD(&qseecom.registered_listener_list_head); + INIT_LIST_HEAD(&qseecom.registered_app_list_head); + spin_lock_init(&qseecom.registered_app_list_lock); + INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head); + INIT_LIST_HEAD(&qseecom.registered_kclient_list_head); + spin_lock_init(&qseecom.registered_kclient_list_lock); + init_waitqueue_head(&qseecom.send_resp_wq); + init_waitqueue_head(&qseecom.register_lsnr_pending_wq); + init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq); + INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head); + init_waitqueue_head(&qseecom.unload_app_kthread_wq); + qseecom.send_resp_flag = 0; + qseecom.qseos_version = QSEOS_VERSION_14; + qseecom.commonlib_loaded = false; + qseecom.commonlib64_loaded = false; + qseecom.whitelist_support = qseecom_check_whitelist_feature(); + + return rc; +} + +static int qseecom_parse_dt(struct platform_device *pdev) +{ + if (!pdev->dev.of_node) { + pr_err("NULL of_node\n"); + return -ENODEV; + } + qseecom.pdev->of_node = pdev->dev.of_node; + qseecom.support_bus_scaling = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-bus-scaling"); + qseecom.appsbl_qseecom_support = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,appsbl-qseecom-support"); + qseecom.commonlib64_loaded = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,commonlib64-loaded-by-uefi"); + qseecom.fde_key_size = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,fde-key-size"); + qseecom.no_clock_support = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,no-clock-support"); + qseecom.enable_key_wrap_in_ks = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,enable-key-wrap-in-ks"); + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,qsee-reentrancy-support", + &qseecom.qsee_reentrancy_support)) { + pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n"); + qseecom.qsee_reentrancy_support = 0; + } + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,ce-opp-freq", &qseecom.ce_opp_freq_hz)) { + pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n"); + qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ; + } + + /* + * By default, appsbl only loads cmnlib. If OEM changes appsbl to + * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin, + * Pls add "qseecom.commonlib64_loaded = true" here too. + */ + if (qseecom.is_apps_region_protected || + qseecom.appsbl_qseecom_support) + qseecom.commonlib_loaded = true; + + return 0; +} + +static int qseecom_create_kthreads(void) +{ + int rc = 0; + + qseecom.unregister_lsnr_kthread_task = kthread_run( + __qseecom_unregister_listener_kthread_func, + NULL, "qseecom-unreg-lsnr"); + if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) { + rc = PTR_ERR(qseecom.unregister_lsnr_kthread_task); + pr_err("fail to create kthread to unreg lsnr, rc = %x\n", rc); + return rc; + } + atomic_set(&qseecom.unregister_lsnr_kthread_state, + LSNR_UNREG_KT_SLEEP); + + /*create a kthread to process pending ta unloading task */ + qseecom.unload_app_kthread_task = kthread_run( + __qseecom_unload_app_kthread_func, + NULL, "qseecom-unload-ta"); + if (IS_ERR(qseecom.unload_app_kthread_task)) { + rc = PTR_ERR(qseecom.unload_app_kthread_task); + pr_err("failed to create kthread to unload ta, rc = %x\n", rc); + kthread_stop(qseecom.unregister_lsnr_kthread_task); + return rc; + } + atomic_set(&qseecom.unload_app_kthread_state, + UNLOAD_APP_KT_SLEEP); + return 0; +} + +static int qseecom_register_heap_shmbridge(struct platform_device *pdev, + char *heap_mem_region_name, + uint64_t *handle) +{ + phys_addr_t heap_pa = 0; + size_t heap_size = 0; + struct device_node *node = NULL; + struct reserved_mem *rmem = NULL; + uint32_t ns_vmids[] = {VMID_HLOS}; + uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE}; + + node = of_parse_phandle(pdev->dev.of_node, heap_mem_region_name, 0); + if (!node) { + pr_err("unable to parse memory-region of heap %d\n", heap_mem_region_name); + return -EINVAL; + } + rmem = of_reserved_mem_lookup(node); + if (!rmem) { + pr_err("unable to acquire memory-region of heap %d\n", heap_mem_region_name); + return -EINVAL; + } + + heap_pa = rmem->base; + heap_size = (size_t)rmem->size; + + pr_debug("get heap %d info: shmbridge created\n", heap_mem_region_name); + return qtee_shmbridge_register(heap_pa, + heap_size, ns_vmids, ns_vm_perms, 1, + PERM_READ | PERM_WRITE, handle); +} + +static int qseecom_register_shmbridge(struct platform_device *pdev) +{ + int ret = 0; + if (!qtee_shmbridge_is_enabled()) + return 0; + ret = qseecom_register_heap_shmbridge(pdev, "qseecom_ta_mem", + &qseecom.ta_bridge_handle); + if (ret) + return ret; + ret = qseecom_register_heap_shmbridge(pdev, "qseecom_mem", + &qseecom.qseecom_bridge_handle); + if (ret) { + qtee_shmbridge_deregister(qseecom.ta_bridge_handle); + return ret; + } + ret = qseecom_register_heap_shmbridge(pdev, "user_contig_mem", + &qseecom.user_contig_bridge_handle); + if (ret) { + qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle); + qtee_shmbridge_deregister(qseecom.ta_bridge_handle); + return ret; + } + return 0; +} + +static void qseecom_deregister_shmbridge(void) +{ + qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle); + qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle); + qtee_shmbridge_deregister(qseecom.ta_bridge_handle); +} + +static int qseecom_probe(struct platform_device *pdev) +{ + int rc; + + rc = qseecom_register_shmbridge(pdev); + if (rc) + return rc; + rc = qseecom_init_dev(pdev); + if (rc) + goto exit_unregister_bridge; + rc = qseecom_init_control(); + if (rc) + goto exit_deinit_dev; + rc = qseecom_parse_dt(pdev); + if (rc) + goto exit_deinit_dev; + rc = qseecom_retrieve_ce_data(pdev); + if (rc) + goto exit_deinit_dev; + rc = qseecom_init_clk(); + if (rc) + goto exit_release_ce_data; + rc = qseecom_init_bus(pdev); + if (rc) + goto exit_deinit_clock; + rc = qseecom_send_app_region(pdev); + if (rc) + goto exit_deinit_bus; + rc = qseecom_create_kthreads(); + if (rc) + goto exit_deinit_bus; + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY); + return 0; + +exit_deinit_bus: + qseecom_deinit_bus(); +exit_deinit_clock: + qseecom_deinit_clk(); +exit_release_ce_data: + qseecom_release_ce_data(); +exit_deinit_dev: + qseecom_deinit_dev(); +exit_unregister_bridge: + qseecom_deregister_shmbridge(); + return rc; +} + + +static int qseecom_remove(struct platform_device *pdev) +{ + struct qseecom_registered_kclient_list *kclient = NULL; + struct qseecom_registered_kclient_list *kclient_tmp = NULL; + unsigned long flags = 0; + int ret = 0; + + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY); + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + + list_for_each_entry_safe(kclient, kclient_tmp, + &qseecom.registered_kclient_list_head, list) { + + /* Break the loop if client handle is NULL */ + if (!kclient->handle) { + list_del(&kclient->list); + kfree_sensitive(kclient); + break; + } + + list_del(&kclient->list); + mutex_lock(&app_access_lock); + ret = qseecom_unload_app(kclient->handle->dev, false); + mutex_unlock(&app_access_lock); + if (!ret) { + kfree_sensitive(kclient->handle->dev); + kfree_sensitive(kclient->handle); + kfree_sensitive(kclient); + } + } + + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + + if (qseecom.qseos_version > QSEEE_VERSION_00) + qseecom_unload_commonlib_image(); + + qseecom_deregister_shmbridge(); + kthread_stop(qseecom.unload_app_kthread_task); + kthread_stop(qseecom.unregister_lsnr_kthread_task); + qseecom_deinit_bus(); + qseecom_deinit_clk(); + qseecom_release_ce_data(); + qseecom_deinit_dev(); + return ret; +} + +static int qseecom_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND); + if (qseecom.no_clock_support) + return 0; + + mutex_lock(&qsee_bw_mutex); + mutex_lock(&clk_access_lock); + + if (qseecom.current_mode != INACTIVE) { + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, INACTIVE); + if (ret) + pr_err("Fail to scale down bus\n"); + else + qseecom.current_mode = INACTIVE; + } + + if (qclk->clk_access_cnt) { + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); + if (qclk->ce_bus_clk != NULL) + clk_disable_unprepare(qclk->ce_bus_clk); + } + + del_timer_sync(&(qseecom.bw_scale_down_timer)); + qseecom.timer_running = false; + + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + cancel_work_sync(&qseecom.bw_inactive_req_ws); + + return 0; +} + +static int qseecom_resume(struct platform_device *pdev) +{ + int mode = 0; + int ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + if (qseecom.no_clock_support) + goto exit; + + mutex_lock(&qsee_bw_mutex); + mutex_lock(&clk_access_lock); + if (qseecom.cumulative_mode >= HIGH) + mode = HIGH; + else + mode = qseecom.cumulative_mode; + + if (qseecom.cumulative_mode != INACTIVE) { + ret = qseecom_bus_scale_update_request( + qseecom.qsee_perf_client, mode); + if (ret) + pr_err("Fail to scale up bus to %d\n", mode); + else + qseecom.current_mode = mode; + } + + if (qclk->clk_access_cnt) { + if (qclk->ce_core_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_core_clk); + if (ret) { + pr_err("Unable to enable/prep CE core clk\n"); + qclk->clk_access_cnt = 0; + goto err; + } + } + if (qclk->ce_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_clk); + if (ret) { + pr_err("Unable to enable/prep CE iface clk\n"); + qclk->clk_access_cnt = 0; + goto ce_clk_err; + } + } + if (qclk->ce_bus_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_bus_clk); + if (ret) { + pr_err("Unable to enable/prep CE bus clk\n"); + qclk->clk_access_cnt = 0; + goto ce_bus_clk_err; + } + } + } + + if (qclk->clk_access_cnt || qseecom.cumulative_mode) { + qseecom.bw_scale_down_timer.expires = jiffies + + msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + mod_timer(&(qseecom.bw_scale_down_timer), + qseecom.bw_scale_down_timer.expires); + qseecom.timer_running = true; + } + + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + goto exit; + +ce_bus_clk_err: + if (qclk->ce_clk) + clk_disable_unprepare(qclk->ce_clk); +ce_clk_err: + if (qclk->ce_core_clk) + clk_disable_unprepare(qclk->ce_core_clk); +err: + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + ret = -EIO; +exit: + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY); + return ret; +} + +static const struct of_device_id qseecom_match[] = { + { + .compatible = "qcom,qseecom", + }, + {} +}; + +static struct platform_driver qseecom_plat_driver = { + .probe = qseecom_probe, + .remove = qseecom_remove, + .suspend = qseecom_suspend, + .resume = qseecom_resume, + .driver = { + .name = "qseecom", + .of_match_table = qseecom_match, + }, +}; + +static int qseecom_init(void) +{ + return platform_driver_register(&qseecom_plat_driver); +} + +static void qseecom_exit(void) +{ + platform_driver_unregister(&qseecom_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator"); + +module_init(qseecom_init); +module_exit(qseecom_exit); diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 7b15db3408..0bd5f828a9 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -1,12 +1,16 @@ #Build ssg kernel driver -PRODUCT_PACKAGES += smcinvoke_dlkm.ko PRODUCT_PACKAGES += tz_log_dlkm.ko PRODUCT_PACKAGES += qcedev-mod_dlkm.ko PRODUCT_PACKAGES += qce50_dlkm.ko PRODUCT_PACKAGES += qcrypto-msm_dlkm.ko PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko PRODUCT_PACKAGES += qrng_dlkm.ko +ifeq ($(TARGET_BOARD_AUTO),true) +PRODUCT_PACKAGES += qseecom_dlkm.ko +else +PRODUCT_PACKAGES += smcinvoke_dlkm.ko +endif #TARGET_BOARD_AUTO diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index de877a7fb3..02aba51998 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -1,8 +1,12 @@ -BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \ - $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ $(KERNEL_MODULES_OUT)/qce50_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ $(KERNEL_MODULES_OUT)/qrng_dlkm.ko \ +ifeq ($(TARGET_BOARD_AUTO),true) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +else +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko +endif From 4f96dd41ec7c25ac0e49c40c598257f865fc63ea Mon Sep 17 00:00:00 2001 From: Sonal Aggarwal Date: Mon, 21 Feb 2022 15:45:54 +0530 Subject: [PATCH 026/202] securemsm-kernel : Add new functionality in HLOS for TZ to sleep for certain amount of time. Change-Id: I6352bbe201ffcf81fde6ac7fc65e6f8eaeb0c64e --- smcinvoke/smcinvoke.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 95798de9a9..6bf13bf1fb 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -990,6 +990,23 @@ out: return ret; } +static int32_t smcinvoke_sleep(void *buf, size_t buf_len) +{ + struct smcinvoke_tzcb_req *msg = buf; + uint32_t sleepTimeMs_val = 0; + + if (msg->hdr.counts != OBJECT_COUNTS_PACK(1, 0, 0, 0) || + (buf_len - msg->args[0].b.offset < msg->args[0].b.size)) { + pr_err("Invalid counts received for sleeping in hlos\n"); + return OBJECT_ERROR_INVALID; + } + + /* Time in miliseconds is expected from tz */ + sleepTimeMs_val = *((uint32_t *)(buf + msg->args[0].b.offset)); + msleep(sleepTimeMs_val); + return OBJECT_OK; +} + static void process_kernel_obj(void *buf, size_t buf_len) { struct smcinvoke_tzcb_req *cb_req = buf; @@ -1001,6 +1018,9 @@ static void process_kernel_obj(void *buf, size_t buf_len) case OBJECT_OP_YIELD: cb_req->result = OBJECT_OK; break; + case OBJECT_OP_SLEEP: + cb_req->result = smcinvoke_sleep(buf, buf_len); + break; default: pr_err(" invalid operation for tz kernel object\n"); cb_req->result = OBJECT_ERROR_INVALID; From ce09152acc6ebcc53874fabc1e15c6f703b5b870 Mon Sep 17 00:00:00 2001 From: Jeevan Shriram Date: Tue, 26 Apr 2022 14:38:10 -0700 Subject: [PATCH 027/202] smcinvoke: Add MODULE_IMPORT_NS for smcinvoke driver Add MODULE_IMPORT_NS for smcinvoke driver to ignore consider it as as filesystem and not a driver. Change-Id: I839b972f68f45f184f6a46de6779ac6adace9d38 --- smcinvoke/smcinvoke.c | 1 + 1 file changed, 1 insertion(+) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 6bf13bf1fb..d05a8362a4 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -2505,3 +2505,4 @@ module_exit(smcinvoke_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("SMC Invoke driver"); +MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); From 0d3eabb246a8350b1d01325d8657d27bc9c37d90 Mon Sep 17 00:00:00 2001 From: Indranil Pradhan Date: Fri, 13 May 2022 20:30:47 +0530 Subject: [PATCH 028/202] securemsm-kernel: Removal of export error and ununsed variable. Disabling export error and commenting out ununsed variable which will be resolved with correct export symbol in future. Test: Compile tested for complete apps build. Change-Id: I51c45b522de1437d341c7b3fce0aaa82fd233e55 --- qseecom/qseecom.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/qseecom/qseecom.c b/qseecom/qseecom.c index ffa14cdcc3..cfeb6dff7f 100644 --- a/qseecom/qseecom.c +++ b/qseecom/qseecom.c @@ -2086,7 +2086,6 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, = {0}; struct qseecom_registered_listener_list *ptr_svc = NULL; sigset_t new_sigset; - sigset_t old_sigset; uint32_t status; void *cmd_buf = NULL; size_t cmd_len; @@ -2153,8 +2152,6 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, /* initialize the new signal mask with all signals*/ sigfillset(&new_sigset); /* block all signals */ - sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); - mutex_unlock(&listener_access_lock); do { /* @@ -2178,7 +2175,6 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, } while (1); mutex_lock(&listener_access_lock); /* restore signal mask */ - sigprocmask(SIG_SETMASK, &old_sigset, NULL); if (data->abort || ptr_svc->abort) { pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n", data->client.app_id, lstnr, ret); @@ -2292,7 +2288,6 @@ static int __qseecom_process_reentrancy_blocked_on_listener( struct qseecom_command_scm_resp continue_resp; unsigned int session_id; sigset_t new_sigset; - sigset_t old_sigset; unsigned long flags; bool found_app = false; struct qseecom_registered_app_list dummy_app_entry = { {NULL} }; @@ -2352,8 +2347,6 @@ static int __qseecom_process_reentrancy_blocked_on_listener( /* sleep until listener is available */ sigfillset(&new_sigset); - sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); - do { qseecom.app_block_ref_cnt++; ptr_app->app_blocked = true; @@ -2367,9 +2360,6 @@ static int __qseecom_process_reentrancy_blocked_on_listener( ptr_app->app_blocked = false; qseecom.app_block_ref_cnt--; } while (list_ptr->listener_in_use); - - sigprocmask(SIG_SETMASK, &old_sigset, NULL); - ptr_app->blocked_on_listener_id = 0; pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n", resp->data, session_id, data->client.app_id); @@ -2429,7 +2419,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd( = {0}; struct qseecom_registered_listener_list *ptr_svc = NULL; sigset_t new_sigset; - sigset_t old_sigset; uint32_t status; void *cmd_buf = NULL; size_t cmd_len; @@ -2497,8 +2486,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd( sigfillset(&new_sigset); /* block all signals */ - sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); - /* unlock mutex btw waking listener and sleep-wait */ mutex_unlock(&listener_access_lock); mutex_unlock(&app_access_lock); @@ -2516,7 +2503,6 @@ static int __qseecom_reentrancy_process_incomplete_cmd( qseecom.send_resp_flag = 0; /* restore signal mask */ - sigprocmask(SIG_SETMASK, &old_sigset, NULL); if (data->abort || ptr_svc->abort) { pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n", data->client.app_id, lstnr, ret); From 828d87565bd57d682531269d99263dd10d59f165 Mon Sep 17 00:00:00 2001 From: Pavan Bobba Date: Tue, 17 May 2022 14:19:00 +0530 Subject: [PATCH 029/202] smcinvoke: SHM create/delete handling in memory objects simplify handling of shm bridge create and delete Signed-off-by: Pavan Bobba Change-Id: I231d982ff27725bd20efc0080c6871f079e1be2a --- smcinvoke/smcinvoke.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index d05a8362a4..7a8bc7bcb4 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -264,7 +264,7 @@ struct smcinvoke_mem_obj { uint64_t p_addr; size_t p_addr_len; struct list_head list; - bool bridge_created_by_others; + bool is_smcinvoke_created_shmbridge; uint64_t shmbridge_handle; }; @@ -373,7 +373,7 @@ static uint32_t next_mem_map_obj_id_locked(void) static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) { int ret = 0; - bool is_bridge_created_by_others = mem_obj->bridge_created_by_others; + bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge; struct dma_buf *dmabuf_to_free = mem_obj->dma_buf; uint64_t shmbridge_handle = mem_obj->shmbridge_handle; @@ -382,7 +382,7 @@ static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) mem_obj = NULL; mutex_unlock(&g_smcinvoke_lock); - if (!is_bridge_created_by_others) + if (is_bridge_created) ret = qtee_shmbridge_deregister(shmbridge_handle); if (ret) pr_err("Error:%d delete bridge failed leaking memory 0x%x\n", @@ -848,17 +848,18 @@ static int smcinvoke_create_bridge(struct smcinvoke_mem_obj *mem_obj) ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems, tz_perm, &mem_obj->shmbridge_handle); - if (ret && ret != -EEXIST) { + if (ret == 0) { + /* In case of ret=0/success handle has to be freed in memobj release */ + mem_obj->is_smcinvoke_created_shmbridge = true; + } else if (ret == -EEXIST) { + ret = 0; + goto exit; + } else { pr_err("creation of shm bridge for mem_region_id %d failed ret %d\n", mem_obj->mem_region_id, ret); goto exit; } - if (ret == -EEXIST) { - mem_obj->bridge_created_by_others = true; - ret = 0; - } - trace_smcinvoke_create_bridge(mem_obj->shmbridge_handle, mem_obj->mem_region_id); exit: kfree(perms_list); From d577197ff2438d62ca78437b5384d2c452b87b68 Mon Sep 17 00:00:00 2001 From: Pavan Bobba Date: Fri, 13 May 2022 16:11:17 +0530 Subject: [PATCH 030/202] smcinvoke : file private data validation which is sent by userspace a validation added to check whether retrieved struct smcinvoke_file_data inside the function get_server_id belongs to g_smcinvoke_fops or not. Change-Id: I50bce93ab89759b4fdcb76e41f699d8199771fbd Signed-off-by: Pavan Bobba --- smcinvoke/smcinvoke.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 7a8bc7bcb4..048556f0a1 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -625,15 +625,13 @@ static uint16_t get_server_id(int cb_server_fd) struct smcinvoke_file_data *svr_cxt = NULL; struct file *tmp_filp = fget(cb_server_fd); - if (!tmp_filp) + if (!tmp_filp || !FILE_IS_REMOTE_OBJ(tmp_filp)) return server_id; svr_cxt = tmp_filp->private_data; if (svr_cxt && svr_cxt->context_type == SMCINVOKE_OBJ_TYPE_SERVER) server_id = svr_cxt->server_id; - - if (tmp_filp) - fput(tmp_filp); + fput(tmp_filp); return server_id; } From e6550eb67e7c8c2fbcb26ac1f80971eba1ec879f Mon Sep 17 00:00:00 2001 From: Nicholas Pelham Date: Tue, 26 Apr 2022 10:27:48 -0700 Subject: [PATCH 031/202] securemsm-kernel: Export get_client_env_object Expose basic smcinvoke support for kernel clients. Change-Id: I754ff56c8c20579d5c824170d0e1b61d0a22535c --- smcinvoke/smcinvoke_kernel.c | 3 ++- smcinvoke/smcinvoke_object.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c index c4e764d87b..96fe0af47a 100644 --- a/smcinvoke/smcinvoke_kernel.c +++ b/smcinvoke/smcinvoke_kernel.c @@ -280,7 +280,7 @@ static int get_root_obj(struct Object *rootObj) * Get a client environment using CBOR encoded credentials * with UID of SYSTEM_UID (1000) */ -static int32_t get_client_env_object(struct Object *clientEnvObj) +int32_t get_client_env_object(struct Object *clientEnvObj) { int32_t ret = OBJECT_ERROR; struct Object rootObj = Object_NULL; @@ -310,6 +310,7 @@ static int32_t get_client_env_object(struct Object *clientEnvObj) Object_release(rootObj); return ret; } +EXPORT_SYMBOL(get_client_env_object); static int load_app(struct qseecom_compat_context *cxt, const char *app_name) { diff --git a/smcinvoke/smcinvoke_object.h b/smcinvoke/smcinvoke_object.h index 74005ab22e..27c66da91e 100644 --- a/smcinvoke/smcinvoke_object.h +++ b/smcinvoke/smcinvoke_object.h @@ -194,4 +194,6 @@ int process_invoke_request_from_kernel_client( char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm); +int32_t get_client_env_object(struct Object *clientEnvObj); + #endif /* __SMCINVOKE_OBJECT_H */ From 3f33efa2c97f8b587cc2ebfef133b268368df7be Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 26 May 2022 12:24:51 -0700 Subject: [PATCH 032/202] qcedev: assign pattern info correctly When non-pattern tests are run after pattern tests, there is stale data from previous tests that never get erased, which depending on crypto behavior might cause decrypt discrepancies. Make the change to correctly set the pattern info to the command descriptors. Also, convert all debug logs to pr_info to avoid flooding serial when QCE_DEBUG is enabled. Change-Id: Ib84f4025263c622d6e51a47fc147856049bd75d4 --- crypto-qti/qce50.c | 31 +++++++++++++++---------------- crypto-qti/qcedev.c | 1 + 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 81d7d2c1be..b44ba16bf7 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -204,12 +204,12 @@ static uint32_t qce_get_config_be(struct qce_device *pce_dev, static void dump_status_regs(unsigned int s1, unsigned int s2,unsigned int s3, unsigned int s4, unsigned int s5,unsigned int s6) { - pr_err("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, s1); - pr_err("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, s2); - pr_err("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, s3); - pr_err("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, s4); - pr_err("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, s5); - pr_err("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, s6); + pr_info("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, s1); + pr_info("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, s2); + pr_info("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, s3); + pr_info("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, s4); + pr_info("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, s5); + pr_info("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, s6); } void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2, @@ -1121,8 +1121,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, if (is_offload_op(creq->offload_op)) { /* pattern info */ pce = cmdlistinfo->pattern_info; - if (creq->is_pattern_valid) - pce->data = creq->pattern_info; + pce->data = creq->pattern_info; /* block offset */ pce = cmdlistinfo->block_offset; @@ -1289,11 +1288,11 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; iovec = pce_sps_data->in_transfer.iovec; - pr_err("==============================================\n"); - pr_err("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); - pr_err("==============================================\n"); + pr_info("==============================================\n"); + pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); + pr_info("==============================================\n"); for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) { - pr_err(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, + pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); if (iovec->flags & cmd_flags) { struct sps_command_element *pced; @@ -1302,7 +1301,7 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) (GET_VIRT_ADDR(iovec->addr)); ents = iovec->size/(sizeof(struct sps_command_element)); for (j = 0; j < ents; j++) { - pr_err(" [%d] [0x%x] 0x%x\n", j, + pr_info(" [%d] [0x%x] 0x%x\n", j, pced->addr, pced->data); pced++; } @@ -1310,9 +1309,9 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) iovec++; } - pr_err("==============================================\n"); - pr_err("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); - pr_err("==============================================\n"); + pr_info("==============================================\n"); + pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); + pr_info("==============================================\n"); iovec = pce_sps_data->out_transfer.iovec; for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) { pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 34c4238c7e..4bc5f59ea2 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -517,6 +517,7 @@ static int start_offload_cipher_req(struct qcedev_control *podev, u8 patt_sz = 0, proc_data_sz = 0; int ret = 0; + memset(&creq, 0, sizeof(creq)); /* Start the command on the podev->active_command */ qcedev_areq = podev->active_command; qcedev_areq->cipher_req.cookie = qcedev_areq->handle; From 0dfcefe3d75a6abb8fafbd341304e712ecc8fb94 Mon Sep 17 00:00:00 2001 From: Jeevan Shriram Date: Wed, 25 May 2022 12:49:51 -0700 Subject: [PATCH 033/202] securemsm: Add securemsm module loading during recovery Add support for loading securemsm module to be loaded during recovery to avoid unknown symbols error for other dependent drivers. Change-Id: I477b69cc649ed6b40d0f22a695a25a8d674328f2 --- securemsm_kernel_vendor_board.mk | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index 02aba51998..ce426df3c8 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -5,8 +5,15 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ $(KERNEL_MODULES_OUT)/qrng_dlkm.ko \ +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ + $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ + ifeq ($(TARGET_BOARD_AUTO),true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko else BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko +BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko endif From 6d6aad924f3947ca92d5c5978dca26b6abbd181c Mon Sep 17 00:00:00 2001 From: Divya Sharma Date: Mon, 23 May 2022 15:31:12 -0700 Subject: [PATCH 034/202] securemsm-kernel: Enable/Disable DLKM Control DLKM using TARGET_ENABLE_DLKM. During early bring when most of the modules are not ready either due to missing header or other reasons. Its good idea to keep it disabled until its fully ready. Change-Id: I421f7329cd84d53b3233cea96b0e1920de9d39ea --- Android.mk | 19 +++++++++++++------ Kbuild | 2 -- securemsm_kernel_product_board.mk | 11 +++++++++++ securemsm_kernel_vendor_board.mk | 13 +++++++++++++ 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/Android.mk b/Android.mk index 69f390fe78..50f7e5158d 100644 --- a/Android.mk +++ b/Android.mk @@ -1,9 +1,16 @@ -# Android makefile for audio kernel modules +# Android makefile for securemsm kernel modules + +ENABLE_SECUREMSM_DLKM := false +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) +ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), true) +ENABLE_SECUREMSM_DLKM := true +endif +else +ENABLE_SECUREMSM_DLKM := true +endif + +ifeq ($(ENABLE_SECUREMSM_DLKM), true) -#Target based of taro does not need these DLKM's as they are present as kernel drivers -#But the project is mapped for DEV SP due to dependency on smcinvoke_kernel_headers -#Hence preventing the DLKM's to be part of the taro based DEV SP -ifneq ($(TARGET_BOARD_PLATFORM), taro) LOCAL_PATH := $(call my-dir) DLKM_DIR := $(TOP)/device/qcom/common/dlkm @@ -115,4 +122,4 @@ LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk endif #TARGET_BOARD_AUTO ################################################### -endif +endif #COMPILE_SECUREMSM_DLKM check diff --git a/Kbuild b/Kbuild index 8da36daae8..eec7f9fb40 100644 --- a/Kbuild +++ b/Kbuild @@ -1,4 +1,3 @@ -ifneq ($(TARGET_BOARD_PLATFORM ), taro) LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ -I$(SSG_MODULE_ROOT)/linux/ \ -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h @@ -33,4 +32,3 @@ hdcp_qseecom_dlkm-objs := hdcp/hdcp_qseecom.o obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o qrng_dlkm-objs := qrng/msm_rng.o -endif diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index 0bd5f828a9..a182fb7dcd 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -1,5 +1,15 @@ #Build ssg kernel driver +ENABLE_SECUREMSM_DLKM := false +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) +ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), true) +ENABLE_SECUREMSM_DLKM := true +endif +else +ENABLE_SECUREMSM_DLKM := true +endif +ifeq ($(ENABLE_SECUREMSM_DLKM), true) +PRODUCT_PACKAGES += smcinvoke_dlkm.ko PRODUCT_PACKAGES += tz_log_dlkm.ko PRODUCT_PACKAGES += qcedev-mod_dlkm.ko PRODUCT_PACKAGES += qce50_dlkm.ko @@ -12,5 +22,6 @@ else PRODUCT_PACKAGES += smcinvoke_dlkm.ko endif #TARGET_BOARD_AUTO +endif #ENABLE_SECUREMSM_DLKM diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index ce426df3c8..bb2771a085 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -1,3 +1,15 @@ +ENABLE_SECUREMSM_DLKM := false + +ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true) +ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), true) +ENABLE_SECUREMSM_DLKM := true +endif +else +ENABLE_SECUREMSM_DLKM := true +endif + + +ifeq ($(ENABLE_SECUREMSM_DLKM), true) BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \ $(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \ @@ -17,3 +29,4 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko endif +endif #ENABLE_SECUREMSM_DLKM From 995c3731b72490e4f8bf7c20a439bfac268ace69 Mon Sep 17 00:00:00 2001 From: Bruce Levy Date: Thu, 19 May 2022 10:42:00 -0700 Subject: [PATCH 035/202] smcinvoke: Fix mutex lock and unlock Acquire lock before delete_cb_txn_locked. This API expects the lock is aquired before calling it release the lock afterwards. Also rename the api with_locked to state that api needs to be called on locked mutex. Change-Id: I6384ff60004da90b46904823e399c62c55ea4273 --- smcinvoke/smcinvoke.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 048556f0a1..23497004e5 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -548,7 +548,7 @@ static void release_tzhandles(const int32_t *tzhandles, size_t len) mutex_unlock(&g_smcinvoke_lock); } -static void delete_cb_txn(struct kref *kref) +static void delete_cb_txn_locked(struct kref *kref) { struct smcinvoke_cb_txn *cb_txn = container_of(kref, struct smcinvoke_cb_txn, ref_cnt); @@ -1226,7 +1226,7 @@ out: cb_req->hdr.counts, cb_reqs_inflight); memcpy(buf, cb_req, buf_len); - kref_put(&cb_txn->ref_cnt, delete_cb_txn); + kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked); if (srvr_info) kref_put(&srvr_info->ref_cnt, destroy_cb_server); mutex_unlock(&g_smcinvoke_lock); @@ -1846,7 +1846,9 @@ static long process_accept_req(struct file *filp, unsigned int cmd, cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; cb_txn->state = SMCINVOKE_REQ_PROCESSED; - kref_put(&cb_txn->ref_cnt, delete_cb_txn); + mutex_lock(&g_smcinvoke_lock); + kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked); + mutex_unlock(&g_smcinvoke_lock); wake_up(&server_info->rsp_wait_q); /* * if marshal_out fails, we should let userspace release @@ -1890,14 +1892,16 @@ static long process_accept_req(struct file *filp, unsigned int cmd, pr_err("failed to marshal in the callback request\n"); cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL; cb_txn->state = SMCINVOKE_REQ_PROCESSED; - kref_put(&cb_txn->ref_cnt, delete_cb_txn); + mutex_lock(&g_smcinvoke_lock); + kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked); + mutex_unlock(&g_smcinvoke_lock); wake_up_interruptible(&server_info->rsp_wait_q); continue; } mutex_lock(&g_smcinvoke_lock); hash_add(server_info->responses_table, &cb_txn->hash, cb_txn->txn_id); - kref_put(&cb_txn->ref_cnt, delete_cb_txn); + kref_put(&cb_txn->ref_cnt, delete_cb_txn_locked); mutex_unlock(&g_smcinvoke_lock); trace_process_accept_req_placed(current->pid, current->tgid); From ab140f0f119964ac4afadf57df7787fc6603f177 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Tue, 31 May 2022 22:38:35 -0700 Subject: [PATCH 036/202] qce: stability and performance improvements 1. There is no need to vote for high bandwidth for every ioctl operation, Instead, vote only when a new device node is opened (which translates to a new session from a userspace point of view). 2. Fix the way BAM pipes are reset when there is a crypto operation by initializing it correctly through the SPS BAM framework. 3. Checking crypto status for every request is performance heavy. Remove these checks and read status only on error conditions. Change-Id: Ibb3607ecb6919f563b00a9a8cd6f5440a8c3940a --- crypto-qti/qce50.c | 78 ++++++++++++++++++++++++++++++++++++++------- crypto-qti/qcedev.c | 20 ++---------- 2 files changed, 70 insertions(+), 28 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index b44ba16bf7..70265198a7 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -36,7 +36,7 @@ #define CRYPTO_SMMU_IOVA_START 0x10000000 #define CRYPTO_SMMU_IOVA_SIZE 0x40000000 -#define CRYPTO_CONFIG_RESET 0xE001F +#define CRYPTO_CONFIG_RESET 0xE01EF #define MAX_SPS_DESC_FIFO_SIZE 0xfff0 #define QCE_MAX_NUM_DSCR 0x200 #define QCE_SECTOR_SIZE 0x200 @@ -2233,9 +2233,66 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, bool is_complete); +static int qce_sps_pipe_reset(struct qce_device *pce_dev, int op) +{ + int rc = -1; + struct sps_pipe *sps_pipe_info = NULL; + struct sps_connect *sps_connect_info = NULL; + + /* Reset both the pipe sets in the pipe group */ + sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, + pce_dev->ce_bam_info.dest_pipe_index[op]); + sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, + pce_dev->ce_bam_info.src_pipe_index[op]); + + /* Reconnect to consumer pipe */ + sps_pipe_info = pce_dev->ce_bam_info.consumer[op].pipe; + sps_connect_info = &pce_dev->ce_bam_info.consumer[op].connect; + rc = sps_disconnect(sps_pipe_info); + if (rc) { + pr_err("sps_disconnect() fail pipe=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto exit; + } + memset(sps_connect_info->desc.base, 0x00, + sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto exit; + } + + /* Reconnect to producer pipe */ + sps_pipe_info = pce_dev->ce_bam_info.producer[op].pipe; + sps_connect_info = &pce_dev->ce_bam_info.producer[op].connect; + rc = sps_disconnect(sps_pipe_info); + if (rc) { + pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto exit; + } + memset(sps_connect_info->desc.base, 0x00, + sps_connect_info->desc.size); + rc = sps_connect(sps_pipe_info, sps_connect_info); + if (rc) { + pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n", + (uintptr_t)sps_pipe_info, rc); + goto exit; + } + + /* Register producer callback */ + rc = sps_register_event(sps_pipe_info, + &pce_dev->ce_bam_info.producer[op].event); + if (rc) + pr_err("Producer cb registration failed rc = %d\n", + rc); +exit: + return rc; +} + int qce_manage_timeout(void *handle, int req_info) { - int rc = 0; struct qce_device *pce_dev = (struct qce_device *) handle; struct skcipher_request *areq; struct ce_request_info *preq_info; @@ -2247,17 +2304,16 @@ int qce_manage_timeout(void *handle, int req_info) areq = (struct skcipher_request *) preq_info->areq; pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info, op); - rc = _qce_unlock_other_pipes(pce_dev, req_info); - if (rc) - pr_err("%s: fail unlock other pipes, rc = %d", __func__, rc); + + if (qce_sps_pipe_reset(pce_dev, op)) + pr_err("%s: pipe reset failed\n", __func__); + + if (_qce_unlock_other_pipes(pce_dev, req_info)) + pr_err("%s: fail unlock other pipes\n", __func__); + qce_free_req_info(pce_dev, req_info, true); qce_callback(areq, NULL, NULL, 0); - sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, - pce_dev->ce_bam_info.dest_pipe_index[op]); - sps_pipe_reset(pce_dev->ce_bam_info.bam_handle, - pce_dev->ce_bam_info.src_pipe_index[op]); - - return rc; + return 0; } EXPORT_SYMBOL(qce_manage_timeout); diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 4bc5f59ea2..fd8574374f 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -273,6 +273,8 @@ static int qcedev_open(struct inode *inode, struct file *file) handle->cntl = podev; file->private_data = handle; + qcedev_ce_high_bw_req(podev, true); + mutex_init(&handle->registeredbufs.lock); INIT_LIST_HEAD(&handle->registeredbufs.list); return 0; @@ -290,6 +292,7 @@ static int qcedev_release(struct inode *inode, struct file *file) __func__, podev); } + qcedev_ce_high_bw_req(podev, false); if (qcedev_unmap_all_buffers(handle)) pr_err("%s: failed to unmap all ion buffers\n", __func__); @@ -719,7 +722,6 @@ static void qcedev_check_crypto_status( QCEDEV_OFFLOAD_GENERIC_ERROR; return; } - } static int submit_req(struct qcedev_async_req *qcedev_areq, @@ -736,10 +738,6 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, qcedev_areq->err = 0; podev = handle->cntl; - qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); - if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) - return 0; - spin_lock_irqsave(&podev->lock, flags); if (podev->active_command == NULL) { @@ -783,10 +781,6 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, if (ret) qcedev_areq->err = -EIO; - qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); - if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) - return 0; - pstat = &_qcedev_stat; if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) { switch (qcedev_areq->cipher_op_req.op) { @@ -2109,10 +2103,6 @@ long qcedev_ioctl(struct file *file, init_completion(&qcedev_areq->complete); pstat = &_qcedev_stat; - if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ && - cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ) - qcedev_ce_high_bw_req(podev, true); - switch (cmd) { case QCEDEV_IOCTL_ENC_REQ: case QCEDEV_IOCTL_DEC_REQ: @@ -2148,7 +2138,6 @@ long qcedev_ioctl(struct file *file, err = -EFAULT; goto exit_free_qcedev_areq; } - qcedev_areq->op_type = QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER; if (qcedev_check_offload_cipher_params( &qcedev_areq->offload_cipher_op_req, podev)) { @@ -2428,9 +2417,6 @@ long qcedev_ioctl(struct file *file, } exit_free_qcedev_areq: - if (cmd != QCEDEV_IOCTL_MAP_BUF_REQ && - cmd != QCEDEV_IOCTL_UNMAP_BUF_REQ && podev != NULL) - qcedev_ce_high_bw_req(podev, false); kfree(qcedev_areq); return err; } From c7859ce020dbdc9979dedda96b80b98a73af0923 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 23 May 2022 13:43:52 -0700 Subject: [PATCH 037/202] qce: add null pointer checks and correct return vals 1. Currently, each request api sometimes returns 0 even when an error has occured. This will result in unecessary crypto requests when failures are expected. 2. The request callback handlers does not check handles to be NULL before accessing. Fix to add NULL handling. Change-Id: Ia44e353bdb75434dfbc0e3ec0582abc0208be6a7 --- crypto-qti/qce50.c | 234 +++++++++++++++++++++++++++++--------------- crypto-qti/qcedev.c | 12 +++ 2 files changed, 169 insertions(+), 77 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 70265198a7..4f37ff23cc 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -5100,52 +5100,65 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr && cmdlistinfo) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + if (pce_dev->support_cmd_dscr && cmdlistinfo) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } if (pce_dev->ce_bam_info.minor_version == 0) { goto bad; } else { - if (q_req->assoclen && (_qce_sps_add_sg_data( - pce_dev, q_req->asg, q_req->assoclen, - &pce_sps_data->in_transfer))) - goto bad; - if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen, + if (q_req->assoclen) { + rc = _qce_sps_add_sg_data(pce_dev, q_req->asg, + q_req->assoclen, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } + rc = _qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen, areq->assoclen, - &pce_sps_data->in_transfer)) + &pce_sps_data->in_transfer); + if (rc) goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir); - if (pce_dev->no_get_around) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + if (pce_dev->no_get_around) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } /* Pass through to ignore associated data*/ - if (_qce_sps_add_data( + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->ignore_buffer), q_req->assoclen, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; - if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len, + rc = _qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len, areq->assoclen, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; /* Pass through to ignore hw_pad (padding of the MAC data) */ - if (_qce_sps_add_data( + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->ignore_buffer), - hw_pad_out, &pce_sps_data->out_transfer)) + hw_pad_out, &pce_sps_data->out_transfer); + if (rc) goto bad; if (pce_dev->no_get_around || totallen_in <= SPS_MAX_PKT_SIZE) { - if (_qce_sps_add_data( + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; } else { @@ -5346,8 +5359,10 @@ int qce_aead_req(void *handle, struct qce_req *q_req) _qce_sps_iovec_count_init(pce_dev, req_info); if (pce_dev->support_cmd_dscr) { - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; } else { rc = _ce_setup_aead_direct(pce_dev, q_req, totallen, areq->assoclen); @@ -5358,25 +5373,28 @@ int qce_aead_req(void *handle, struct qce_req *q_req) preq_info->mode = q_req->mode; if (pce_dev->ce_bam_info.minor_version == 0) { - if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, - &pce_sps_data->in_transfer)) + rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen, + &pce_sps_data->in_transfer); + if (rc) goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, - &pce_sps_data->out_transfer)) + rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen, + &pce_sps_data->out_transfer); + if (rc) goto bad; if (totallen > SPS_MAX_PKT_SIZE) { _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; } else { - if (_qce_sps_add_data(GET_PHYS_ADDR( + rc = _qce_sps_add_data(GET_PHYS_ADDR( pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); @@ -5384,26 +5402,32 @@ int qce_aead_req(void *handle, struct qce_req *q_req) } rc = _qce_sps_transfer(pce_dev, req_info); } else { - if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, - &pce_sps_data->in_transfer)) + rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen, + &pce_sps_data->in_transfer); + if (rc) goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - if (pce_dev->no_get_around) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + if (pce_dev->no_get_around) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } - if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, - &pce_sps_data->out_transfer)) + rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen, + &pce_sps_data->out_transfer); + if (rc) goto bad; if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) { - if (_qce_sps_add_data( + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; } else { @@ -5509,31 +5533,41 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) preq_info->req_len = areq->cryptlen; _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr && cmdlistinfo) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, + if (pce_dev->support_cmd_dscr && cmdlistinfo) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } + rc = _qce_sps_add_data(areq->src->dma_address, areq->cryptlen, &pce_sps_data->in_transfer); - if (_qce_sps_add_data(areq->src->dma_address, areq->cryptlen, - &pce_sps_data->in_transfer)) + if (rc) goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - if (pce_dev->no_get_around) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + if (pce_dev->no_get_around) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } - if (_qce_sps_add_data(areq->dst->dma_address, areq->cryptlen, - &pce_sps_data->out_transfer)) + rc = _qce_sps_add_data(areq->dst->dma_address, areq->cryptlen, + &pce_sps_data->out_transfer); + if (rc) goto bad; if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) { pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; - if (!is_offload_op(c_req->offload_op)) - if (_qce_sps_add_data( + if (!is_offload_op(c_req->offload_op)) { + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; + } } else { pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; } @@ -5560,6 +5594,7 @@ bad: } qce_free_req_info(pce_dev, req_info, false); + return rc; } EXPORT_SYMBOL(qce_ablk_cipher_req); @@ -5618,29 +5653,40 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr && cmdlistinfo) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); - if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, - &pce_sps_data->in_transfer)) + if (pce_dev->support_cmd_dscr && cmdlistinfo) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } + rc = _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, + &pce_sps_data->in_transfer); + if (rc) goto bad; /* always ensure there is input data. ZLT does not work for bam-ndp */ - if (!areq->nbytes) - _qce_sps_add_data( + if (!areq->nbytes) { + rc = _qce_sps_add_data( GET_PHYS_ADDR(pce_sps_data->ignore_buffer), pce_dev->ce_bam_info.ce_burst_size, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - if (pce_dev->no_get_around) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + if (pce_dev->no_get_around) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } - if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_sps_data->out_transfer); + if (rc) goto bad; if (is_dummy) { @@ -5660,6 +5706,7 @@ bad: preq_info->src_nents, DMA_TO_DEVICE); } qce_free_req_info(pce_dev, req_info, false); + return rc; } EXPORT_SYMBOL(qce_process_sha_req); @@ -5743,26 +5790,37 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + if (pce_dev->support_cmd_dscr) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len, + rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len, &pce_sps_data->in_transfer); + if (rc) + goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; - _qce_sps_add_data((uint32_t)dst, req->data_len, + rc = _qce_sps_add_data((uint32_t)dst, req->data_len, &pce_sps_data->out_transfer); + if (rc) + goto bad; - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); + if (rc) + goto bad; select_mode(pce_dev, preq_info); rc = _qce_sps_transfer(pce_dev, req_info); @@ -5780,6 +5838,7 @@ bad: (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); qce_free_req_info(pce_dev, req_info, false); + return rc; } EXPORT_SYMBOL(qce_f8_req); @@ -5860,25 +5919,35 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + if (pce_dev->support_cmd_dscr) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + goto bad; + } - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total, + rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total, &pce_sps_data->in_transfer); + if (rc) + goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; - _qce_sps_add_data((uint32_t)dst, total, + rc = _qce_sps_add_data((uint32_t)dst, total, &pce_sps_data->out_transfer); + if (rc) + goto bad; - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); + if (rc) + goto bad; select_mode(pce_dev, preq_info); rc = _qce_sps_transfer(pce_dev, req_info); @@ -5894,6 +5963,7 @@ bad: (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); qce_free_req_info(pce_dev, req_info, false); + return rc; } EXPORT_SYMBOL(qce_f8_multi_pkt_req); @@ -5947,21 +6017,30 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, preq_info->req_len = req->msize; _qce_sps_iovec_count_init(pce_dev, req_info); - if (pce_dev->support_cmd_dscr) - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize, + if (pce_dev->support_cmd_dscr) { + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, + cmdlistinfo, &pce_sps_data->in_transfer); + if (rc) + goto bad; + } + rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize, &pce_sps_data->in_transfer); + if (rc) + goto bad; _qce_set_flag(&pce_sps_data->in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, + rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, &pce_sps_data->cmdlistptr.unlock_all_pipes, &pce_sps_data->in_transfer); + if (rc) + goto bad; - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); + if (rc) + goto bad; select_mode(pce_dev, preq_info); rc = _qce_sps_transfer(pce_dev, req_info); @@ -5973,6 +6052,7 @@ bad: dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, req->msize, DMA_TO_DEVICE); qce_free_req_info(pce_dev, req_info, false); + return rc; } EXPORT_SYMBOL(qce_f9_req); diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index fd8574374f..4c7f7ea215 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -355,8 +355,12 @@ void qcedev_sha_req_cb(void *cookie, unsigned char *digest, uint32_t *auth32 = (uint32_t *)authdata; areq = (struct qcedev_sha_req *) cookie; + if (!areq || !areq->cookie) + return; handle = (struct qcedev_handle *) areq->cookie; pdev = handle->cntl; + if (!pdev) + return; if (digest) memcpy(&handle->sha_ctxt.digest[0], digest, 32); @@ -379,8 +383,12 @@ void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, struct qcedev_async_req *qcedev_areq; areq = (struct qcedev_cipher_req *) cookie; + if (!areq || !areq->cookie) + return; handle = (struct qcedev_handle *) areq->cookie; podev = handle->cntl; + if (!podev) + return; qcedev_areq = podev->active_command; if (iv) @@ -501,8 +509,12 @@ void qcedev_offload_cipher_req_cb(void *cookie, unsigned char *icv, struct qcedev_async_req *qcedev_areq; areq = (struct qcedev_cipher_req *) cookie; + if (!areq || !areq->cookie) + return; handle = (struct qcedev_handle *) areq->cookie; podev = handle->cntl; + if (!podev) + return; qcedev_areq = podev->active_command; if (iv) From cc0d8159233d0a02494b1cb6b2822e9331eef92d Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 13 Jun 2022 10:17:42 -0700 Subject: [PATCH 038/202] qce: changes for non offload usecases With the offload implementation, there are a couple of issues for non-offload usecases. 1. Set config and offload op parameters correctly for non-cipher usecases. 2. Due to default wait value, there is a crypto operation irrespective of previous errors, fix that. Change-Id: Idba55b3603349b7ad831eccd6ee25c98b1df5de6 --- crypto-qti/qce.h | 1 + crypto-qti/qce50.c | 89 +++++++++++++++++++++++++++++++++++++-------- crypto-qti/qcedev.c | 2 +- 3 files changed, 76 insertions(+), 16 deletions(-) diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h index f7f8e9863c..a0e1e36912 100644 --- a/crypto-qti/qce.h +++ b/crypto-qti/qce.h @@ -107,6 +107,7 @@ enum qce_req_op_enum { /* Offload operation type */ enum qce_offload_op_enum { + QCE_OFFLOAD_NONE = 0, /* kernel pipe */ QCE_OFFLOAD_HLOS_HLOS = 1, QCE_OFFLOAD_HLOS_CPB = 2, QCE_OFFLOAD_CPB_HLOS = 3, diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 4f37ff23cc..1d73ab48b3 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -235,12 +235,16 @@ void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2, } EXPORT_SYMBOL(qce_get_crypto_status); -static void qce_set_offload_config(struct qce_device *pce_dev, - struct qce_req *creq) +static int qce_crypto_config(struct qce_device *pce_dev, + enum qce_offload_op_enum offload_op) { - uint32_t config_be = pce_dev->reg.crypto_cfg_be; + uint32_t config_be = 0; - switch (creq->offload_op) { + switch (offload_op) { + case QCE_OFFLOAD_NONE: + config_be = qce_get_config_be(pce_dev, + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE]); + break; case QCE_OFFLOAD_HLOS_HLOS: config_be = qce_get_config_be(pce_dev, pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS]); @@ -254,13 +258,15 @@ static void qce_set_offload_config(struct qce_device *pce_dev, pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS]); break; default: - break; + pr_err("%s: Valid pipe config not set, offload op = %d\n", + __func__, offload_op); + return -EINVAL; } pce_dev->reg.crypto_cfg_be = config_be; pce_dev->reg.crypto_cfg_le = (config_be | CRYPTO_LITTLE_ENDIAN_MASK); - return; + return 0; } /* @@ -463,6 +469,15 @@ static int _ce_setup_hash(struct qce_device *pce_dev, uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t); uint32_t auth_cfg; + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; + + pce = cmdlistinfo->crypto_cfg; + pce->data = pce_dev->reg.crypto_cfg_be; + + pce = cmdlistinfo->crypto_cfg_le; + pce->data = pce_dev->reg.crypto_cfg_le; + if ((sreq->alg == QCE_HASH_SHA1_HMAC) || (sreq->alg == QCE_HASH_SHA256_HMAC) || (sreq->alg == QCE_HASH_AES_CMAC)) { @@ -673,6 +688,15 @@ static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req, key_size = q_req->encklen; enck_size_in_word = key_size/sizeof(uint32_t); + if (qce_crypto_config(pce_dev, q_req->offload_op)) + return -EINVAL; + + pce = cmdlistinfo->crypto_cfg; + pce->data = pce_dev->reg.crypto_cfg_be; + + pce = cmdlistinfo->crypto_cfg_le; + pce->data = pce_dev->reg.crypto_cfg_le; + switch (q_req->alg) { case CIPHER_ALG_DES: enciv_in_word = 2; @@ -843,7 +867,8 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, else key_size = creq->encklen; - qce_set_offload_config(pce_dev, creq); + if (qce_crypto_config(pce_dev, creq->offload_op)) + return -EINVAL; pce = cmdlistinfo->crypto_cfg; pce->data = pce_dev->reg.crypto_cfg_be; @@ -1174,6 +1199,15 @@ static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req, break; } + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; + + pce = cmdlistinfo->crypto_cfg; + pce->data = pce_dev->reg.crypto_cfg_be; + + pce = cmdlistinfo->crypto_cfg_le; + pce->data = pce_dev->reg.crypto_cfg_le; + /* write key in CRYPTO_AUTH_IV0-3_REG */ _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE); pce = cmdlistinfo->auth_iv; @@ -1236,6 +1270,16 @@ static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, cfg = pce_dev->reg.encr_cfg_snow3g; break; } + + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; + + pce = cmdlistinfo->crypto_cfg; + pce->data = pce_dev->reg.crypto_cfg_be; + + pce = cmdlistinfo->crypto_cfg_le; + pce->data = pce_dev->reg.crypto_cfg_le; + /* write key */ _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE); pce = cmdlistinfo->encr_key; @@ -1361,6 +1405,8 @@ static int _ce_setup_hash_direct(struct qce_device *pce_dev, /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* @@ -1550,6 +1596,8 @@ static int _ce_setup_aead_direct(struct qce_device *pce_dev, /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); + if (qce_crypto_config(pce_dev, q_req->offload_op)) + return -EINVAL; QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* @@ -1727,7 +1775,8 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev, /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); - qce_set_offload_config(pce_dev, creq); + if (qce_crypto_config(pce_dev, creq->offload_op)) + return -EINVAL; QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, @@ -2063,6 +2112,8 @@ static int _ce_f9_setup_direct(struct qce_device *pce_dev, break; } + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); @@ -2151,6 +2202,8 @@ static int _ce_f8_setup_direct(struct qce_device *pce_dev, /* clear status */ QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG); /* set big endian configuration */ + if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE)) + return -EINVAL; QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase + CRYPTO_CONFIG_REG)); /* write auth seg configuration */ @@ -3218,7 +3271,7 @@ static int qce_sps_init(struct qce_device *pce_dev) pce_dev->ce_bam_info.bam_handle); for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { - if (i == 0 && !(pce_dev->kernel_pipes_support)) + if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support)) continue; else if ((i > 0) && !(pce_dev->offload_pipes_support)) break; @@ -3462,7 +3515,7 @@ static void qce_sps_exit(struct qce_device *pce_dev) int i = 0; for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { - if (i == 0 && !(pce_dev->kernel_pipes_support)) + if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support)) continue; else if ((i > 0) && !(pce_dev->offload_pipes_support)) break; @@ -4696,7 +4749,8 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev) static int qce_init_ce_cfg_val(struct qce_device *pce_dev) { - uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index[0]; + uint32_t pipe_pair = + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE]; pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair); @@ -5203,7 +5257,7 @@ static int _qce_suspend(void *handle) return -ENODEV; for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { - if (i == 0 && !(pce_dev->kernel_pipes_support)) + if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support)) continue; else if ((i > 0) && !(pce_dev->offload_pipes_support)) break; @@ -5228,7 +5282,7 @@ static int _qce_resume(void *handle) return -ENODEV; for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { - if (i == 0 && !(pce_dev->kernel_pipes_support)) + if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support)) continue; else if ((i > 0) && !(pce_dev->offload_pipes_support)) break; @@ -5351,6 +5405,7 @@ int qce_aead_req(void *handle, struct qce_req *q_req) preq_info->qce_cb = q_req->qce_cb; preq_info->dir = q_req->dir; preq_info->asg = NULL; + preq_info->offload_op = QCE_OFFLOAD_NONE; /* setup xfer type for producer callback handling */ preq_info->xfer_type = QCE_XFER_AEAD; @@ -5646,6 +5701,7 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) preq_info->areq = areq; preq_info->qce_cb = sreq->qce_cb; + preq_info->offload_op = QCE_OFFLOAD_NONE; /* setup xfer type for producer callback handling */ preq_info->xfer_type = QCE_XFER_HASHING; @@ -5783,6 +5839,7 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, /* setup for callback, and issue command to sps */ preq_info->areq = cookie; preq_info->qce_cb = qce_cb; + preq_info->offload_op = QCE_OFFLOAD_NONE; /* setup xfer type for producer callback handling */ preq_info->xfer_type = QCE_XFER_F8; @@ -5912,6 +5969,7 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, /* setup for callback, and issue command to sps */ preq_info->areq = cookie; preq_info->qce_cb = qce_cb; + preq_info->offload_op = QCE_OFFLOAD_NONE; /* setup xfer type for producer callback handling */ preq_info->xfer_type = QCE_XFER_F8; @@ -6011,6 +6069,7 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, /* setup for callback, and issue command to sps */ preq_info->areq = cookie; preq_info->qce_cb = qce_cb; + preq_info->offload_op = QCE_OFFLOAD_NONE; /* setup xfer type for producer callback handling */ preq_info->xfer_type = QCE_XFER_F9; @@ -6096,10 +6155,10 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, pce_dev->kernel_pipes_support = true; if (of_property_read_u32((&pdev->dev)->of_node, "qcom,bam-pipe-pair", - &pce_dev->ce_bam_info.pipe_pair_index[0])) { + &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE])) { pr_warn("Kernel pipes not supported.\n"); //Unused pipe, just as failsafe. - pce_dev->ce_bam_info.pipe_pair_index[0] = 2; + pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE] = 2; pce_dev->kernel_pipes_support = false; } diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 4c7f7ea215..c155d92883 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -744,7 +744,7 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, int ret = 0; struct qcedev_stat *pstat; int current_req_info = 0; - int wait = 0; + int wait = MAX_CRYPTO_WAIT_TIME; bool print_sts = false; qcedev_areq->err = 0; From e2231d0fe2e2366430cf2e7535008acc165db95c Mon Sep 17 00:00:00 2001 From: Nitin LNU Date: Wed, 11 May 2022 16:25:55 +0530 Subject: [PATCH 039/202] securemsm-kernel: Post process shmbridge delete and object release 1.When the QTEE is busy with a high concurrency, the objects and shared memory bridge will not be released and deleted in time, this will lead to memory leakage issues in the QTEE. 2.To avoid the memory leakage in case of EBUSY, we add 2 exclusive worker threads to postprocess the object release and shmbridge deletion separately. Change-Id: I94c656d191d5098f1c093650e4321e6b5353e45e Signed-off-by: Nitin LNU --- smcinvoke/smcinvoke.c | 350 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 304 insertions(+), 46 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 23497004e5..96c8e98589 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -149,6 +149,17 @@ static DEFINE_MUTEX(g_smcinvoke_lock); #define TAKE_LOCK 1 #define MUTEX_LOCK(x) { if (x) mutex_lock(&g_smcinvoke_lock); } #define MUTEX_UNLOCK(x) { if (x) mutex_unlock(&g_smcinvoke_lock); } + +#define POST_KT_SLEEP 0 +#define POST_KT_WAKEUP 1 +#define MAX_CHAR_NAME 50 + +enum worker_thread_type { + SHMB_WORKER_THREAD = 0, + OBJECT_WORKER_THREAD, + MAX_THREAD_NUMBER +}; + static DEFINE_HASHTABLE(g_cb_servers, 8); static LIST_HEAD(g_mem_objs); static uint16_t g_last_cb_server_id = CBOBJ_SERVER_ID_START; @@ -268,6 +279,55 @@ struct smcinvoke_mem_obj { uint64_t shmbridge_handle; }; +static LIST_HEAD(g_bridge_postprocess); +DEFINE_MUTEX(bridge_postprocess_lock); + +static LIST_HEAD(g_object_postprocess); +DEFINE_MUTEX(object_postprocess_lock); + +struct bridge_deregister { + uint64_t shmbridge_handle; + struct dma_buf *dmabuf_to_free; +}; + +struct object_release { + uint32_t tzhandle; + uint32_t context_type; +}; + + +struct smcinvoke_shmbridge_deregister_pending_list { + struct list_head list; + struct bridge_deregister data; +}; + +struct smcinvoke_object_release_pending_list { + struct list_head list; + struct object_release data; +}; + +struct smcinvoke_worker_thread { + enum worker_thread_type type; + atomic_t postprocess_kthread_state; + wait_queue_head_t postprocess_kthread_wq; + struct task_struct *postprocess_kthread_task; +}; + +struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER]; +const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = { + "smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess"}; + +static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr, + size_t in_buf_len, + uint8_t *out_buf, phys_addr_t out_paddr, + size_t out_buf_len, + struct smcinvoke_cmd_req *req, + union smcinvoke_arg *args_buf, + bool *tz_acked, uint32_t context_type, + struct qtee_shm *in_shm, struct qtee_shm *out_shm); + +static void process_piggyback_data(void *buf, size_t buf_size); + static void destroy_cb_server(struct kref *kref) { struct smcinvoke_server_info *server = container_of(kref, @@ -370,27 +430,236 @@ static uint32_t next_mem_map_obj_id_locked(void) return g_last_mem_map_obj_id; } +static void smcinvoke_shmbridge_post_process(void) +{ + struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL; + struct list_head *pos; + int ret = 0; + uint64_t handle = 0; + struct dma_buf *dmabuf_to_free = NULL; + + do { + mutex_lock(&bridge_postprocess_lock); + if (list_empty(&g_bridge_postprocess)) { + mutex_unlock(&bridge_postprocess_lock); + break; + } + pos = g_bridge_postprocess.next; + entry = list_entry(pos, + struct smcinvoke_shmbridge_deregister_pending_list, + list); + if (entry) { + handle = entry->data.shmbridge_handle; + dmabuf_to_free = entry->data.dmabuf_to_free; + } else { + pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos); + } + list_del(pos); + kfree_sensitive(entry); + mutex_unlock(&bridge_postprocess_lock); + + if (entry) { + do { + ret = qtee_shmbridge_deregister(handle); + if (unlikely(ret)) { + pr_err("SHM failed: ret:%d ptr:0x%x h:%#llx\n", + ret, + dmabuf_to_free, + handle); + } else { + pr_debug("SHM deletion: Handle:%#llx\n", + handle); + dma_buf_put(dmabuf_to_free); + } + } while (-EBUSY == ret); + } + } while (1); +} + +static int smcinvoke_object_post_process(void) +{ + struct smcinvoke_object_release_pending_list *entry = NULL; + struct list_head *pos; + int ret = 0; + bool release_handles; + uint32_t context_type; + uint8_t *in_buf = NULL; + uint8_t *out_buf = NULL; + struct smcinvoke_cmd_req req = {0}; + struct smcinvoke_msg_hdr hdr = {0}; + struct qtee_shm in_shm = {0}, out_shm = {0}; + + ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for in msg in object release\n"); + goto out; + } + + ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm); + if (ret) { + ret = -ENOMEM; + pr_err("shmbridge alloc failed for out msg in object release\n"); + goto out; + } + + do { + mutex_lock(&object_postprocess_lock); + if (list_empty(&g_object_postprocess)) { + mutex_unlock(&object_postprocess_lock); + break; + } + pos = g_object_postprocess.next; + entry = list_entry(pos, struct smcinvoke_object_release_pending_list, list); + if (entry) { + in_buf = in_shm.vaddr; + out_buf = out_shm.vaddr; + hdr.tzhandle = entry->data.tzhandle; + hdr.op = OBJECT_OP_RELEASE; + hdr.counts = 0; + *(struct smcinvoke_msg_hdr *)in_buf = hdr; + context_type = entry->data.context_type; + } else { + pr_err("entry is NULL, pos:%#llx\n", (uint64_t)pos); + } + list_del(pos); + kfree_sensitive(entry); + mutex_unlock(&object_postprocess_lock); + + if (entry) { + do { + ret = prepare_send_scm_msg(in_buf, in_shm.paddr, + SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr, + SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, + &release_handles, context_type, &in_shm, &out_shm); + process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE); + if (ret) { + pr_err("Failed to release object(0x%x), ret:%d\n", + hdr.tzhandle, ret); + } else { + pr_debug("Released object(0x%x) successfully.\n", + hdr.tzhandle); + } + } while (-EBUSY == ret); + } + } while (1); + +out: + qtee_shmbridge_free_shm(&in_shm); + qtee_shmbridge_free_shm(&out_shm); + + return ret; +} + +static void __wakeup_postprocess_kthread(struct smcinvoke_worker_thread *smcinvoke) +{ + if (smcinvoke) { + atomic_set(&smcinvoke->postprocess_kthread_state, + POST_KT_WAKEUP); + wake_up_interruptible(&smcinvoke->postprocess_kthread_wq); + } else { + pr_err("Invalid smcinvoke pointer.\n"); + } +} + + +static int smcinvoke_postprocess_kthread_func(void *data) +{ + struct smcinvoke_worker_thread *smcinvoke_wrk_trd = data; + const char *tag; + + if (!smcinvoke_wrk_trd) { + pr_err("Bad input.\n"); + return -EINVAL; + } + + tag = smcinvoke_wrk_trd->type == SHMB_WORKER_THREAD ? "shmbridge":"object"; + + while (!kthread_should_stop()) { + wait_event_interruptible( + smcinvoke_wrk_trd->postprocess_kthread_wq, + kthread_should_stop() || + (atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state) + == POST_KT_WAKEUP)); + pr_debug("kthread to %s postprocess is called %d\n", + tag, + atomic_read(&smcinvoke_wrk_trd->postprocess_kthread_state)); + switch (smcinvoke_wrk_trd->type) { + case SHMB_WORKER_THREAD: + smcinvoke_shmbridge_post_process(); + break; + case OBJECT_WORKER_THREAD: + smcinvoke_object_post_process(); + break; + default: + pr_err("Invalid thread type(%d), do nothing.\n", + (int)smcinvoke_wrk_trd->type); + break; + } + atomic_set(&smcinvoke_wrk_trd->postprocess_kthread_state, + POST_KT_SLEEP); + } + pr_warn("kthread to %s postprocess stopped\n", tag); + + return 0; +} + + +static int smcinvoke_create_kthreads(void) +{ + int i, rc = 0; + const enum worker_thread_type thread_type[MAX_THREAD_NUMBER] = { + SHMB_WORKER_THREAD, OBJECT_WORKER_THREAD}; + + for (i = 0; i < MAX_THREAD_NUMBER; i++) { + init_waitqueue_head(&smcinvoke[i].postprocess_kthread_wq); + smcinvoke[i].type = thread_type[i]; + smcinvoke[i].postprocess_kthread_task = kthread_run( + smcinvoke_postprocess_kthread_func, + &smcinvoke[i], thread_name[i]); + if (IS_ERR(smcinvoke[i].postprocess_kthread_task)) { + rc = PTR_ERR(smcinvoke[i].postprocess_kthread_task); + pr_err("fail to create kthread to postprocess, rc = %x\n", + rc); + return rc; + } + atomic_set(&smcinvoke[i].postprocess_kthread_state, + POST_KT_SLEEP); + } + + return rc; +} + +static void smcinvoke_destroy_kthreads(void) +{ + int i; + + for (i = 0; i < MAX_THREAD_NUMBER; i++) + kthread_stop(smcinvoke[i].postprocess_kthread_task); +} + static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) { - int ret = 0; - bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge; - struct dma_buf *dmabuf_to_free = mem_obj->dma_buf; - uint64_t shmbridge_handle = mem_obj->shmbridge_handle; + struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL; + if (!mem_obj->is_smcinvoke_created_shmbridge) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + entry->data.shmbridge_handle = mem_obj->shmbridge_handle; + entry->data.dmabuf_to_free = mem_obj->dma_buf; + mutex_lock(&bridge_postprocess_lock); + list_add_tail(&entry->list, &g_bridge_postprocess); + mutex_unlock(&bridge_postprocess_lock); + pr_debug("SHMBridge list: added a Handle:%#llx\n", + mem_obj->shmbridge_handle); + __wakeup_postprocess_kthread(&smcinvoke[SHMB_WORKER_THREAD]); + } else { + dma_buf_put(mem_obj->dma_buf); + } list_del(&mem_obj->list); kfree(mem_obj); mem_obj = NULL; - mutex_unlock(&g_smcinvoke_lock); - - if (is_bridge_created) - ret = qtee_shmbridge_deregister(shmbridge_handle); - if (ret) - pr_err("Error:%d delete bridge failed leaking memory 0x%x\n", - ret, dmabuf_to_free); - else - dma_buf_put(dmabuf_to_free); - - mutex_lock(&g_smcinvoke_lock); } static void del_mem_regn_obj_locked(struct kref *kref) @@ -2299,14 +2568,9 @@ static int release_cb_server(uint16_t server_id) int smcinvoke_release_filp(struct file *filp) { int ret = 0; - bool release_handles; - uint8_t *in_buf = NULL; - uint8_t *out_buf = NULL; - struct smcinvoke_msg_hdr hdr = {0}; struct smcinvoke_file_data *file_data = filp->private_data; - struct smcinvoke_cmd_req req = {0}; uint32_t tzhandle = 0; - struct qtee_shm in_shm = {0}, out_shm = {0}; + struct smcinvoke_object_release_pending_list *entry = NULL; trace_smcinvoke_release_filp(current->files, filp, file_count(filp), file_data->context_type); @@ -2321,38 +2585,23 @@ int smcinvoke_release_filp(struct file *filp) if (!tzhandle || tzhandle == SMCINVOKE_TZ_ROOT_OBJ) goto out; - ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &in_shm); - if (ret) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { ret = -ENOMEM; - pr_err("shmbridge alloc failed for in msg in release\n"); goto out; } - ret = qtee_shmbridge_allocate_shm(SMCINVOKE_TZ_MIN_BUF_SIZE, &out_shm); - if (ret) { - ret = -ENOMEM; - pr_err("shmbridge alloc failed for out msg in release\n"); - goto out; - } + entry->data.tzhandle = tzhandle; + entry->data.context_type = file_data->context_type; + mutex_lock(&object_postprocess_lock); + list_add_tail(&entry->list, &g_object_postprocess); + mutex_unlock(&object_postprocess_lock); + pr_debug("Object release list: added a handle:0x%lx\n", tzhandle); + __wakeup_postprocess_kthread(&smcinvoke[OBJECT_WORKER_THREAD]); - in_buf = in_shm.vaddr; - out_buf = out_shm.vaddr; - hdr.tzhandle = tzhandle; - hdr.op = OBJECT_OP_RELEASE; - hdr.counts = 0; - *(struct smcinvoke_msg_hdr *)in_buf = hdr; - - ret = prepare_send_scm_msg(in_buf, in_shm.paddr, - SMCINVOKE_TZ_MIN_BUF_SIZE, out_buf, out_shm.paddr, - SMCINVOKE_TZ_MIN_BUF_SIZE, &req, NULL, &release_handles, - file_data->context_type, &in_shm, &out_shm); - - process_piggyback_data(out_buf, SMCINVOKE_TZ_MIN_BUF_SIZE); out: kfree(filp->private_data); filp->private_data = NULL; - qtee_shmbridge_free_shm(&in_shm); - qtee_shmbridge_free_shm(&out_shm); return ret; @@ -2428,14 +2677,22 @@ static int smcinvoke_probe(struct platform_device *pdev) rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) { pr_err("dma_set_mask_and_coherent failed %d\n", rc); - goto exit_destroy_device; + goto exit_cv_del; } legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node, "qcom,support-legacy_smc"); invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD; + rc = smcinvoke_create_kthreads(); + if (rc) { + pr_err("smcinvoke_create_kthreads failed %d\n", rc); + goto exit_cv_del; + } + return 0; +exit_cv_del: + cdev_del(&smcinvoke_cdev); exit_destroy_device: device_destroy(driver_class, smcinvoke_device_no); exit_destroy_class: @@ -2449,6 +2706,7 @@ static int smcinvoke_remove(struct platform_device *pdev) { int count = 1; + smcinvoke_destroy_kthreads(); cdev_del(&smcinvoke_cdev); device_destroy(driver_class, smcinvoke_device_no); class_destroy(driver_class); From 236cdfb109e80d399b02e5d587b3e4671f865370 Mon Sep 17 00:00:00 2001 From: Indranil Pradhan Date: Wed, 29 Jun 2022 17:03:04 +0530 Subject: [PATCH 040/202] securemsm-kernel:Changing the kernel error messages to Info message for scm_call Changing the kernel error messages to Info message for unsupported sys-call in TZ. Test: Compiled tested and validated on device. Change-Id: Iced52a7c0ac40717de11d6b07923d414b380c40f --- tz_log/tz_log.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tz_log/tz_log.c b/tz_log/tz_log.c index 9124bff572..411104d2eb 100644 --- a/tz_log/tz_log.c +++ b/tz_log/tz_log.c @@ -1641,7 +1641,10 @@ static void tzdbg_query_encrypted_log(void) ret = qcom_scm_query_encrypted_log_feature(&enabled); if (ret) { - pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret); + if (ret == -EIO) + pr_info("SCM_CALL : SYS CALL NOT SUPPORTED IN TZ\n"); + else + pr_err("scm_call QUERY_ENCR_LOG_FEATURE failed ret %d\n", ret); tzdbg.is_encrypted_log_enabled = false; } else { pr_warn("encrypted qseelog enabled is %d\n", enabled); From 63658ee1fd23be1e9382412239cecab2ce3c4e6f Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Fri, 1 Jul 2022 14:35:02 -0700 Subject: [PATCH 041/202] qcedev: reduce wait timeout for crypto operations Current wait times for crypto is too high which might cause concurerency issues for other EEs. Reduce this to what is required. Also, reset the offload error for every new IOCTL call. Change-Id: I43dd2e59e3f30706c87cdc4f633d8132dc8410c9 Signed-off-by: Gaurav Kashyap --- crypto-qti/qcedev.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index c155d92883..db0cbf3a93 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -37,8 +37,12 @@ #define CACHE_LINE_SIZE 64 #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE #define MAX_CEHW_REQ_TRANSFER_SIZE (128*32*1024) -/* Max wait time once a crypt o request is done */ -#define MAX_CRYPTO_WAIT_TIME 1500 +/* + * Max wait time once a crypto request is done. + * Assuming 5ms per crypto operation, this is calculated for + * the scenario of having 3 offload reqs + 1 tz req + buffer. + */ +#define MAX_CRYPTO_WAIT_TIME 25 static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, @@ -2156,7 +2160,7 @@ long qcedev_ioctl(struct file *file, err = -EINVAL; goto exit_free_qcedev_areq; } - + qcedev_areq->offload_cipher_op_req.err = QCEDEV_OFFLOAD_NO_ERROR; err = qcedev_smmu_ablk_offload_cipher(qcedev_areq, handle); if (err) goto exit_free_qcedev_areq; From 1bdcb08f8322b11a2640dd24a680aa53bd05b164 Mon Sep 17 00:00:00 2001 From: wenji Date: Tue, 12 Jul 2022 11:13:47 +0800 Subject: [PATCH 042/202] securemsm-kernel: Postpone the shambridge created by smcinvoke 1. When there is a large shambridge memory which is not deleted in time, The shambridge in QTEE might be out of memory. we met this issue in the TVM TUI unit test. TrustedUISampleTest -vm --gtest_filter=*Basic_TUIStartStop* 2. To avoid being out of shambridge memory, we should postpone the deletion of shambridge created by smcinvoke itself. Change-Id: I2ef837339881c1dcd78a0da10fc848488c9819ca Signed-off-by: wenji --- smcinvoke/smcinvoke.c | 47 +++++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 96c8e98589..91b4af32be 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -640,26 +640,43 @@ static void smcinvoke_destroy_kthreads(void) static inline void free_mem_obj_locked(struct smcinvoke_mem_obj *mem_obj) { + int ret = 0; + bool is_bridge_created = mem_obj->is_smcinvoke_created_shmbridge; + struct dma_buf *dmabuf_to_free = mem_obj->dma_buf; + uint64_t shmbridge_handle = mem_obj->shmbridge_handle; struct smcinvoke_shmbridge_deregister_pending_list *entry = NULL; - if (!mem_obj->is_smcinvoke_created_shmbridge) { - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return; - entry->data.shmbridge_handle = mem_obj->shmbridge_handle; - entry->data.dmabuf_to_free = mem_obj->dma_buf; - mutex_lock(&bridge_postprocess_lock); - list_add_tail(&entry->list, &g_bridge_postprocess); - mutex_unlock(&bridge_postprocess_lock); - pr_debug("SHMBridge list: added a Handle:%#llx\n", - mem_obj->shmbridge_handle); - __wakeup_postprocess_kthread(&smcinvoke[SHMB_WORKER_THREAD]); - } else { - dma_buf_put(mem_obj->dma_buf); - } list_del(&mem_obj->list); kfree(mem_obj); mem_obj = NULL; + mutex_unlock(&g_smcinvoke_lock); + + if (is_bridge_created) + ret = qtee_shmbridge_deregister(shmbridge_handle); + if (ret) { + pr_err("Error:%d delete bridge failed leaking memory 0x%x\n", + ret, dmabuf_to_free); + if (ret == -EBUSY) { + pr_err("EBUSY: we postpone it 0x%x\n", + dmabuf_to_free); + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry) { + entry->data.shmbridge_handle = shmbridge_handle; + entry->data.dmabuf_to_free = dmabuf_to_free; + mutex_lock(&bridge_postprocess_lock); + list_add_tail(&entry->list, &g_bridge_postprocess); + mutex_unlock(&bridge_postprocess_lock); + pr_debug("SHMBridge list: added a Handle:%#llx\n", + shmbridge_handle); + __wakeup_postprocess_kthread( + &smcinvoke[SHMB_WORKER_THREAD]); + } + } + } else { + dma_buf_put(dmabuf_to_free); + } + + mutex_lock(&g_smcinvoke_lock); } static void del_mem_regn_obj_locked(struct kref *kref) From 79b3d0e3772e76d0d4dc270932b9ad0f7026a71d Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Fri, 15 Jul 2022 09:18:24 -0700 Subject: [PATCH 043/202] qcedev: vote for clocks based on dts values Use DTSI values to vote for clocks instead of using fixed values. This enables to vote for crypto clocks on a per target basis. Change-Id: I05c9e55f4aa0ec876903f1963f859ecf1fc929ab Signed-off-by: Gaurav Kashyap --- crypto-qti/qce.h | 6 +++--- crypto-qti/qcedev.c | 27 +++++++++++++++++++++++---- crypto-qti/qcedevi.h | 4 ++++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h index a0e1e36912..3c64e30b60 100644 --- a/crypto-qti/qce.h +++ b/crypto-qti/qce.h @@ -55,9 +55,9 @@ #define QCE_CLK_DISABLE_FIRST 3 #define QCE_BW_REQUEST_RESET_FIRST 4 -/* interconnect average and peak bw for crypto device */ -#define CRYPTO_AVG_BW 393600 -#define CRYPTO_PEAK_BW 393600 +/* default average and peak bw for crypto device */ +#define CRYPTO_AVG_BW 100100 +#define CRYPTO_PEAK_BW 100100 typedef void (*qce_comp_func_ptr_t)(void *areq, unsigned char *icv, unsigned char *iv, int ret); diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index db0cbf3a93..8b4b737ae2 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -113,7 +113,7 @@ static int qcedev_control_clocks(struct qcedev_control *podev, bool enable) return ret; } ret = icc_set_bw(podev->icc_path, - CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + podev->icc_avg_bw, podev->icc_peak_bw); if (ret) { pr_err("%s Unable to set high bw\n", __func__); ret = qce_disable_clk(podev->qce); @@ -124,7 +124,7 @@ static int qcedev_control_clocks(struct qcedev_control *podev, bool enable) break; case QCE_BW_REQUEST_FIRST: ret = icc_set_bw(podev->icc_path, - CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + podev->icc_avg_bw, podev->icc_peak_bw); if (ret) { pr_err("%s Unable to set high bw\n", __func__); return ret; @@ -163,7 +163,7 @@ static int qcedev_control_clocks(struct qcedev_control *podev, bool enable) if (ret) { pr_err("%s Unable to disable clk\n", __func__); ret = icc_set_bw(podev->icc_path, - CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + podev->icc_avg_bw, podev->icc_peak_bw); if (ret) pr_err("%s Unable to set high bw\n", __func__); return ret; @@ -2495,7 +2495,26 @@ static int qcedev_probe_device(struct platform_device *pdev) goto exit_del_cdev; } - rc = icc_set_bw(podev->icc_path, CRYPTO_AVG_BW, CRYPTO_PEAK_BW); + /* + * HLOS crypto vote values from DTSI. If no values specified, use + * nominal values. + */ + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,icc_avg_bw", + &podev->icc_avg_bw)) { + pr_warn("%s: No icc avg BW set, using default\n", __func__); + podev->icc_avg_bw = CRYPTO_AVG_BW; + } + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,icc_peak_bw", + &podev->icc_peak_bw)) { + pr_warn("%s: No icc peak BW set, using default\n", __func__); + podev->icc_peak_bw = CRYPTO_PEAK_BW; + } + + rc = icc_set_bw(podev->icc_path, podev->icc_avg_bw, + podev->icc_peak_bw); if (rc) { pr_err("%s Unable to set high bandwidth\n", __func__); goto exit_unregister_bus_scale; diff --git a/crypto-qti/qcedevi.h b/crypto-qti/qcedevi.h index ca0208a736..6a7ec0b57c 100644 --- a/crypto-qti/qcedevi.h +++ b/crypto-qti/qcedevi.h @@ -89,6 +89,10 @@ struct qcedev_control { /* replaced msm_bus with interconnect path */ struct icc_path *icc_path; + /* average and peak bw values for interconnect */ + uint32_t icc_avg_bw; + uint32_t icc_peak_bw; + /* char device */ struct cdev cdev; From 0841f5d4c3886bca9afa984099b5f6ab09da8313 Mon Sep 17 00:00:00 2001 From: wenji Date: Sat, 23 Jul 2022 19:44:00 +0800 Subject: [PATCH 044/202] securemsm-kernel: Create device node of smcinvoke later Because the device node of smcinvoke is published at the beginning of the smcinvoke_probe(), the device node will be ready while the smcinvoke is not initialized completely. Hence the smcinvoke_release probably is called when the smcinvoke driver is not ready. this case will lead to smcinvoke crash issue. To avoid this concurrency issue, we create the device node at the end of the smcinvoke_probe. Change-Id: I930685a24fb744893017c90c1881f13e2f2c3d7c Signed-off-by: wenji --- smcinvoke/smcinvoke.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 91b4af32be..9cf91502be 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -313,8 +313,8 @@ struct smcinvoke_worker_thread { struct task_struct *postprocess_kthread_task; }; -struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER]; -const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = { +static struct smcinvoke_worker_thread smcinvoke[MAX_THREAD_NUMBER]; +static const char thread_name[MAX_THREAD_NUMBER][MAX_CHAR_NAME] = { "smcinvoke_shmbridge_postprocess", "smcinvoke_object_postprocess"}; static int prepare_send_scm_msg(const uint8_t *in_buf, phys_addr_t in_paddr, @@ -2661,11 +2661,26 @@ static int smcinvoke_probe(struct platform_device *pdev) unsigned int count = 1; int rc = 0; + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + pr_err("dma_set_mask_and_coherent failed %d\n", rc); + return rc; + } + legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-legacy_smc"); + invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD; + + rc = smcinvoke_create_kthreads(); + if (rc) { + pr_err("smcinvoke_create_kthreads failed %d\n", rc); + return rc; + } + rc = alloc_chrdev_region(&smcinvoke_device_no, baseminor, count, SMCINVOKE_DEV); if (rc < 0) { pr_err("chrdev_region failed %d for %s\n", rc, SMCINVOKE_DEV); - return rc; + goto exit_destroy_wkthread; } driver_class = class_create(THIS_MODULE, SMCINVOKE_DEV); if (IS_ERR(driver_class)) { @@ -2691,31 +2706,17 @@ static int smcinvoke_probe(struct platform_device *pdev) goto exit_destroy_device; } smcinvoke_pdev = pdev; - rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (rc) { - pr_err("dma_set_mask_and_coherent failed %d\n", rc); - goto exit_cv_del; - } - legacy_smc_call = of_property_read_bool((&pdev->dev)->of_node, - "qcom,support-legacy_smc"); - invoke_cmd = legacy_smc_call ? SMCINVOKE_INVOKE_CMD_LEGACY : SMCINVOKE_INVOKE_CMD; - - rc = smcinvoke_create_kthreads(); - if (rc) { - pr_err("smcinvoke_create_kthreads failed %d\n", rc); - goto exit_cv_del; - } return 0; -exit_cv_del: - cdev_del(&smcinvoke_cdev); exit_destroy_device: device_destroy(driver_class, smcinvoke_device_no); exit_destroy_class: class_destroy(driver_class); exit_unreg_chrdev_region: unregister_chrdev_region(smcinvoke_device_no, count); +exit_destroy_wkthread: + smcinvoke_destroy_kthreads(); return rc; } From 0d26cf953e58a93cb53d5609be6ea7e5c4747ecd Mon Sep 17 00:00:00 2001 From: Anvisha Date: Wed, 20 Jul 2022 16:24:26 +0530 Subject: [PATCH 045/202] qcedev: Added qce50 compilation for qcedev driver This change will compile and generate qce50 object only when QCEDEV(QTI Crypto Engine driver) is compiled. Test: Nominal, Stress, Adversial and Repetitive Test passed. Change-Id: I00f41c825d516dcf6e974a7e0333899a746f696f Signed-off-by: Anvisha --- Kbuild | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Kbuild b/Kbuild index eec7f9fb40..a0fe28ca3c 100644 --- a/Kbuild +++ b/Kbuild @@ -18,11 +18,8 @@ endif obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o tz_log_dlkm-objs := tz_log/tz_log.o -obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o -qce50_dlkm-objs := crypto-qti/qce50.o - obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o -qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o +qcedev-mod_dlkm-objs := crypto-qti/qce50.o crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o From 3c241f3b30e26ab0837e242acd606e16c2d4866f Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Sat, 23 Jul 2022 11:07:14 -0700 Subject: [PATCH 046/202] securemsm-kernel: Fix compilation errors with CONFIG_FTRACE=n smcinvoke.c:578:10: error: implicit declaration of function 'kthread_should_stop' while (!kthread_should_stop()) { smcinvoke.c:617:43: error: implicit declaration of function 'kthread_run' smcinvoke[i].postprocess_kthread_task = kthread_run( Change-Id: Id6a8b6844ec7ae00b55c81d3760a91da9d49a9ad Signed-off-by: Patrick Daly --- smcinvoke/smcinvoke.c | 1 + 1 file changed, 1 insertion(+) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 9cf91502be..d8af89b708 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "misc/qseecom_kernel.h" #include "smcinvoke.h" #include "smcinvoke_object.h" From 795df801dd64be2d91713a2b19bb32365a523b46 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 11 Jul 2022 16:32:09 -0700 Subject: [PATCH 047/202] qcedev: concurrency support in crypto driver Support to have multiple qcedev requests at a time in the driver. Change-Id: I2ba8f22e2b659db04db348dfa3b06b70bf234d0b Signed-off-by: Gaurav Kashyap --- crypto-qti/qce50.c | 35 ++++++++++--- crypto-qti/qcedev.c | 119 +++++++++++++++++++++++++++++-------------- crypto-qti/qcedevi.h | 3 ++ 3 files changed, 111 insertions(+), 46 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 1d73ab48b3..2166779e6c 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -1129,10 +1129,10 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, if (creq->is_copy_op) { pce->data = 0; } else { - if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) - pce->data = (creq->cryptlen + creq->authsize); - else - pce->data = creq->cryptlen; + if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) + pce->data = (creq->cryptlen + creq->authsize); + else + pce->data = creq->cryptlen; } /* write encr seg start */ @@ -1143,7 +1143,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, pce = cmdlistinfo->seg_size; pce->data = totallen_in; - if (is_offload_op(creq->offload_op)) { + if (!is_des_cipher) { /* pattern info */ pce = cmdlistinfo->pattern_info; pce->data = creq->pattern_info; @@ -1155,9 +1155,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, /* IV counter size */ qce_set_iv_ctr_mask(pce_dev, creq); - } - if (!is_des_cipher) { pce = cmdlistinfo->encr_mask_3; pce->data = pce_dev->reg.encr_cntr_mask_3; pce = cmdlistinfo->encr_mask_2; @@ -2364,8 +2362,11 @@ int qce_manage_timeout(void *handle, int req_info) if (_qce_unlock_other_pipes(pce_dev, req_info)) pr_err("%s: fail unlock other pipes\n", __func__); + if (!atomic_read(&preq_info->in_use)) { + pr_err("request information %d already done\n", req_info); + return -ENXIO; + } qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, 0); return 0; } EXPORT_SYMBOL(qce_manage_timeout); @@ -2433,6 +2434,10 @@ static int _aead_complete(struct qce_device *pce_dev, int req_info) result_status = -ENXIO; } + if (!atomic_read(&preq_info->in_use)) { + pr_err("request information %d already done\n", req_info); + return -ENXIO; + } if (preq_info->mode == QCE_MODE_CCM) { /* * Not from result dump, instead, use the status we just @@ -2506,6 +2511,11 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info) pce_sps_data->consumer_status); result_status = -ENXIO; } + + if (!atomic_read(&preq_info->in_use)) { + pr_err("request information %d already done\n", req_info); + return -ENXIO; + } qce_free_req_info(pce_dev, req_info, true); qce_callback(areq, digest, (char *)bytecount32, result_status); return 0; @@ -2655,6 +2665,11 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) (char *)(pce_sps_data->result->encr_cntr_iv), sizeof(iv)); } + + if (!atomic_read(&preq_info->in_use)) { + pr_err("request information %d already done\n", req_info); + return -ENXIO; + } qce_free_req_info(pce_dev, req_info, true); qce_callback(areq, NULL, iv, result_status); } @@ -3468,6 +3483,10 @@ static void _sps_producer_callback(struct sps_event_notify *notify) } preq_info = &pce_dev->ce_request_info[req_info]; + if (!atomic_read(&preq_info->in_use)) { + pr_err("request information %d already done\n", req_info); + return; + } op = pce_dev->ce_request_info[req_info].offload_op; pce_sps_data = &preq_info->ce_sps; diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 8b4b737ae2..228f5b0469 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -44,6 +44,14 @@ */ #define MAX_CRYPTO_WAIT_TIME 25 +#define MAX_REQUEST_TIME 5000 + +enum qcedev_req_status { + QCEDEV_REQ_CURRENT = 0, + QCEDEV_REQ_WAITING = 1, + QCEDEV_REQ_SUBMITTED = 2, +}; + static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, @@ -311,42 +319,24 @@ static void req_done(unsigned long data) struct qcedev_async_req *areq; unsigned long flags = 0; struct qcedev_async_req *new_req = NULL; - int ret = 0; - int current_req_info = 0; spin_lock_irqsave(&podev->lock, flags); areq = podev->active_command; podev->active_command = NULL; -again: + if (areq && !areq->timed_out) + complete(&areq->complete); + + /* Look through queued requests and wake up the corresponding thread */ if (!list_empty(&podev->ready_commands)) { new_req = container_of(podev->ready_commands.next, struct qcedev_async_req, list); list_del(&new_req->list); - podev->active_command = new_req; - new_req->err = 0; - if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER) - ret = start_cipher_req(podev, ¤t_req_info); - else if (new_req->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) - ret = start_offload_cipher_req(podev, ¤t_req_info); - else - ret = start_sha_req(podev, ¤t_req_info); + new_req->state = QCEDEV_REQ_CURRENT; + wake_up_interruptible(&new_req->wait_q); } spin_unlock_irqrestore(&podev->lock, flags); - - if (areq) - complete(&areq->complete); - - if (new_req && ret) { - complete(&new_req->complete); - spin_lock_irqsave(&podev->lock, flags); - podev->active_command = NULL; - areq = NULL; - ret = 0; - new_req = NULL; - goto again; - } } void qcedev_sha_req_cb(void *cookie, unsigned char *digest, @@ -408,6 +398,7 @@ static int start_cipher_req(struct qcedev_control *podev, struct qce_req creq; int ret = 0; + memset(&creq, 0, sizeof(creq)); /* start the command on the podev->active_command */ qcedev_areq = podev->active_command; qcedev_areq->cipher_req.cookie = qcedev_areq->handle; @@ -461,6 +452,7 @@ static int start_cipher_req(struct qcedev_control *podev, creq.iv = &qcedev_areq->cipher_op_req.iv[0]; creq.ivsize = qcedev_areq->cipher_op_req.ivlen; + creq.iv_ctr_size = 0; creq.enckey = &qcedev_areq->cipher_op_req.enckey[0]; creq.encklen = qcedev_areq->cipher_op_req.encklen; @@ -495,7 +487,7 @@ static int start_cipher_req(struct qcedev_control *podev, creq.qce_cb = qcedev_cipher_req_cb; creq.areq = (void *)&qcedev_areq->cipher_req; creq.flags = 0; - creq.offload_op = 0; + creq.offload_op = QCE_OFFLOAD_NONE; ret = qce_ablk_cipher_req(podev->qce, &creq); *current_req_info = creq.current_req_info; unsupported: @@ -750,29 +742,73 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, int current_req_info = 0; int wait = MAX_CRYPTO_WAIT_TIME; bool print_sts = false; + struct qcedev_async_req *new_req = NULL; qcedev_areq->err = 0; podev = handle->cntl; + init_waitqueue_head(&qcedev_areq->wait_q); + spin_lock_irqsave(&podev->lock, flags); - if (podev->active_command == NULL) { - podev->active_command = qcedev_areq; - if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) - ret = start_cipher_req(podev, ¤t_req_info); - else if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER) - ret = start_offload_cipher_req(podev, ¤t_req_info); - else - ret = start_sha_req(podev, ¤t_req_info); - } else { - list_add_tail(&qcedev_areq->list, &podev->ready_commands); - } + /* + * Service only one crypto request at a time. + * Any other new requests are queued in ready_commands and woken up + * only when the active command has finished successfully or when the + * request times out or when the command failed when setting up. + */ + do { + if (podev->active_command == NULL) { + podev->active_command = qcedev_areq; + qcedev_areq->state = QCEDEV_REQ_SUBMITTED; + switch (qcedev_areq->op_type) { + case QCEDEV_CRYPTO_OPER_CIPHER: + ret = start_cipher_req(podev, + ¤t_req_info); + break; + case QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER: + ret = start_offload_cipher_req(podev, + ¤t_req_info); + break; + default: - if (ret != 0) + ret = start_sha_req(podev, + ¤t_req_info); + break; + } + } else { + list_add_tail(&qcedev_areq->list, + &podev->ready_commands); + qcedev_areq->state = QCEDEV_REQ_WAITING; + if (wait_event_interruptible_lock_irq_timeout( + qcedev_areq->wait_q, + (qcedev_areq->state == QCEDEV_REQ_CURRENT), + podev->lock, + msecs_to_jiffies(MAX_REQUEST_TIME)) == 0) { + pr_err("%s: request timed out\n", __func__); + return qcedev_areq->err; + } + } + } while (qcedev_areq->state != QCEDEV_REQ_SUBMITTED); + + if (ret != 0) { podev->active_command = NULL; + /* + * Look through queued requests and wake up the corresponding + * thread. + */ + if (!list_empty(&podev->ready_commands)) { + new_req = container_of(podev->ready_commands.next, + struct qcedev_async_req, list); + list_del(&new_req->list); + new_req->state = QCEDEV_REQ_CURRENT; + wake_up_interruptible(&new_req->wait_q); + } + } spin_unlock_irqrestore(&podev->lock, flags); + qcedev_areq->timed_out = false; if (ret == 0) wait = wait_for_completion_timeout(&qcedev_areq->complete, msecs_to_jiffies(MAX_CRYPTO_WAIT_TIME)); @@ -788,7 +824,14 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, current_req_info); print_sts = true; qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); - qce_manage_timeout(podev->qce, current_req_info); + qcedev_areq->timed_out = true; + ret = qce_manage_timeout(podev->qce, current_req_info); + if (ret) { + pr_err("%s: error during manage timeout", __func__); + qcedev_areq->err = -EIO; + return qcedev_areq->err; + } + tasklet_schedule(&podev->done_tasklet); if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) return 0; diff --git a/crypto-qti/qcedevi.h b/crypto-qti/qcedevi.h index 6a7ec0b57c..719ec80788 100644 --- a/crypto-qti/qcedevi.h +++ b/crypto-qti/qcedevi.h @@ -66,6 +66,9 @@ struct qcedev_async_req { }; struct qcedev_handle *handle; int err; + wait_queue_head_t wait_q; + uint16_t state; + bool timed_out; }; /********************************************************************** From a4a925db9271c41ab7a6a0b760a4de69b3ed9acc Mon Sep 17 00:00:00 2001 From: Nicholas Pelham Date: Thu, 14 Jul 2022 11:36:30 -0700 Subject: [PATCH 048/202] securemsm-kernel: Add IClientEnv_registerWithCredentials Use new ClientEnv register method for kernel clients Change-Id: I3ce32788c5c7658dcaf808c00d52c32df226fec6 --- smcinvoke/IClientEnv.h | 26 ++++++++++++++++++++++++++ smcinvoke/smcinvoke.c | 9 +++++++++ smcinvoke/smcinvoke_kernel.c | 18 ++++-------------- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/smcinvoke/IClientEnv.h b/smcinvoke/IClientEnv.h index 1ad17971f2..2c6d329ddf 100644 --- a/smcinvoke/IClientEnv.h +++ b/smcinvoke/IClientEnv.h @@ -1,12 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0-only * * Copyright (c) 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #define IClientEnv_OP_open 0 #define IClientEnv_OP_registerLegacy 1 #define IClientEnv_OP_register 2 #define IClientEnv_OP_registerWithWhitelist 3 +#define IClientEnv_OP_notifyDomainChange 4 +#define IClientEnv_OP_registerWithCredentials 5 static inline int32_t IClientEnv_release(struct Object self) @@ -89,3 +92,26 @@ IClientEnv_registerWithWhitelist(struct Object self, return result; } +static inline int32_t +IClientEnv_notifyDomainChange(struct Object self) +{ + return Object_invoke(self, IClientEnv_OP_notifyDomainChange, 0, 0); +} + +static inline int32_t +IClientEnv_registerWithCredentials(struct Object self, struct Object + credentials_val, struct Object *clientEnv_ptr) +{ + union ObjectArg a[2]={{{0,0}}}; + int32_t result; + + a[0].o = credentials_val; + + result = Object_invoke(self, IClientEnv_OP_registerWithCredentials, a, + ObjectCounts_pack(0, 0, 1, 1)); + + *clientEnv_ptr = a[1].o; + + return result; +} + diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index d8af89b708..98d79869bf 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -33,6 +33,7 @@ #include "misc/qseecom_kernel.h" #include "smcinvoke.h" #include "smcinvoke_object.h" +#include "IClientEnv.h" #define CREATE_TRACE_POINTS #include "trace_smcinvoke.h" @@ -2256,6 +2257,14 @@ static long process_invoke_req(struct file *filp, unsigned int cmd, return -EINVAL; } + if (context_type == SMCINVOKE_OBJ_TYPE_TZ_OBJ && + tzobj->tzhandle == SMCINVOKE_TZ_ROOT_OBJ && + (req.op == IClientEnv_OP_notifyDomainChange || + req.op == IClientEnv_OP_registerWithCredentials)) { + pr_err("invalid rootenv op\n"); + return -EINVAL; + } + nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) + OBJECT_COUNTS_NUM_objects(req.counts); diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c index 96fe0af47a..46a292613c 100644 --- a/smcinvoke/smcinvoke_kernel.c +++ b/smcinvoke/smcinvoke_kernel.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #if !IS_ENABLED(CONFIG_QSEECOM) #include @@ -277,23 +278,12 @@ static int get_root_obj(struct Object *rootObj) } /* - * Get a client environment using CBOR encoded credentials - * with UID of SYSTEM_UID (1000) + * Get a client environment using a NULL credentials Object */ int32_t get_client_env_object(struct Object *clientEnvObj) { int32_t ret = OBJECT_ERROR; struct Object rootObj = Object_NULL; - /* Hardcode self cred buffer in CBOR encoded format. - * CBOR encoded credentials is created using following parameters, - * #define ATTR_UID 1 - * #define ATTR_PKG_NAME 3 - * #define SYSTEM_UID 1000 - * static const uint8_t bufString[] = {"UefiSmcInvoke"}; - */ - uint8_t encodedBuf[] = {0xA2, 0x01, 0x19, 0x03, 0xE8, 0x03, 0x6E, 0x55, - 0x65, 0x66, 0x69, 0x53, 0x6D, 0x63, 0x49, 0x6E, - 0x76, 0x6F, 0x6B, 0x65, 0x0}; /* get rootObj */ ret = get_root_obj(&rootObj); @@ -303,8 +293,8 @@ int32_t get_client_env_object(struct Object *clientEnvObj) } /* get client env */ - ret = IClientEnv_registerLegacy(rootObj, encodedBuf, - sizeof(encodedBuf), clientEnvObj); + ret = IClientEnv_registerWithCredentials(rootObj, + Object_NULL, clientEnvObj); if (ret) pr_err("Failed to get ClientEnvObject, ret = %d\n", ret); Object_release(rootObj); From 2f31c15027af507d3a5b826284a2cc8be3f00dab Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Sun, 31 Jul 2022 09:03:33 -0700 Subject: [PATCH 049/202] qcedev: vote at lowsvs by default in hlos Since TZ already votes at nominal, voting at nominal from HLOS will cause aggregate to cross Turbo, so vote at lowSVS by default from HLOS. Change-Id: I662fecfa1c8dd29e71eb3c59e2c03d58710ca387 Signed-off-by: Gaurav Kashyap --- crypto-qti/qce.h | 4 ++-- crypto-qti/qce50.c | 11 +++++++++++ crypto-qti/qcryptohw_50.h | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h index 3c64e30b60..ccdbb78f6d 100644 --- a/crypto-qti/qce.h +++ b/crypto-qti/qce.h @@ -56,8 +56,8 @@ #define QCE_BW_REQUEST_RESET_FIRST 4 /* default average and peak bw for crypto device */ -#define CRYPTO_AVG_BW 100100 -#define CRYPTO_PEAK_BW 100100 +#define CRYPTO_AVG_BW 384 +#define CRYPTO_PEAK_BW 384 typedef void (*qce_comp_func_ptr_t)(void *areq, unsigned char *icv, unsigned char *iv, int ret); diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 2166779e6c..454897d453 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -269,6 +269,13 @@ static int qce_crypto_config(struct qce_device *pce_dev, return 0; } +static void qce_enable_clock_gating(struct qce_device *pce_dev) +{ + writel_relaxed(0x1, pce_dev->iobase + CRYPTO_PWR_CTRL); + //Write memory barrier + wmb(); +} + /* * IV counter mask is be set based on the values sent through the offload ioctl * calls. Currently for offload operations, it is 64 bytes of mask for AES CTR, @@ -2359,6 +2366,8 @@ int qce_manage_timeout(void *handle, int req_info) if (qce_sps_pipe_reset(pce_dev, op)) pr_err("%s: pipe reset failed\n", __func__); + qce_enable_clock_gating(pce_dev); + if (_qce_unlock_other_pipes(pce_dev, req_info)) pr_err("%s: fail unlock other pipes\n", __func__); @@ -5330,6 +5339,7 @@ static int _qce_resume(void *handle) pr_err("Producer cb registration failed rc = %d\n", rc); } + qce_enable_clock_gating(pce_dev); return rc; } @@ -6591,6 +6601,7 @@ void *qce_open(struct platform_device *pdev, int *rc) pce_dev->dev_no = pcedev_no; pcedev_no++; pce_dev->owner = QCE_OWNER_NONE; + qce_enable_clock_gating(pce_dev); mutex_unlock(&qce_iomap_mutex); return pce_dev; err: diff --git a/crypto-qti/qcryptohw_50.h b/crypto-qti/qcryptohw_50.h index 253cfd1654..ec07422861 100644 --- a/crypto-qti/qcryptohw_50.h +++ b/crypto-qti/qcryptohw_50.h @@ -281,6 +281,7 @@ #define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC #define CRYPTO_CONFIG_REG 0x1A400 +#define CRYPTO_PWR_CTRL 0x1A408 #define CRYPTO_DEBUG_ENABLE_REG 0x1AF00 #define CRYPTO_DEBUG_REG 0x1AF04 From 200b12bb33eb9614f01f3ac3aab54e0f63562aba Mon Sep 17 00:00:00 2001 From: basant kumar Date: Mon, 8 Aug 2022 17:06:27 +0530 Subject: [PATCH 050/202] securemsm-kernel: smcinvoke: Add explicit cache flush during callback req from TZ During Callback request from TZ, smcinvoke in and out buffers need explicit cache operation with legacy smcinvoke. Change-Id: I3eacd69901c1ce117017b2d59a28dfab83b5f3f9 --- smcinvoke/smcinvoke.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 98d79869bf..9b46adf552 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -1350,8 +1350,14 @@ static int invoke_cmd_handler(int cmd, phys_addr_t in_paddr, size_t in_buf_len, break; case SMCINVOKE_CB_RSP_CMD: + if (legacy_smc_call) + qtee_shmbridge_inv_shm_buf(out_shm); ret = qcom_scm_invoke_callback_response(virt_to_phys(out_buf), out_buf_len, result, response_type, data); + if (legacy_smc_call) { + qtee_shmbridge_inv_shm_buf(in_shm); + qtee_shmbridge_inv_shm_buf(out_shm); + } break; default: From 94990a445d26ec78905263387bc0769ece451f46 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 15 Aug 2022 15:27:33 -0700 Subject: [PATCH 051/202] qcedev: check num_fds during unmap check the num_fds passed into unmap buf ioctl, or else it can lead to an out of bounds access. Change-Id: Ief209a60a6b7dc1ea4be485eaf5cf51d2955a980 Signed-off-by: Gaurav Kashyap --- crypto-qti/qcedev.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 228f5b0469..b61e7fc410 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -2415,7 +2415,9 @@ long qcedev_ioctl(struct file *file, goto exit_free_qcedev_areq; } - if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) { + if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) { + pr_err("%s: err: num_fds = %d exceeds max value\n", + __func__, map_buf.num_fds); err = -EINVAL; goto exit_free_qcedev_areq; } @@ -2455,6 +2457,12 @@ long qcedev_ioctl(struct file *file, err = -EFAULT; goto exit_free_qcedev_areq; } + if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) { + pr_err("%s: err: num_fds = %d exceeds max value\n", + __func__, unmap_buf.num_fds); + err = -EINVAL; + goto exit_free_qcedev_areq; + } for (i = 0; i < unmap_buf.num_fds; i++) { err = qcedev_check_and_unmap_buffer(handle, From 20f8c5a846b2babac7cae5cd9a62dc91ffe98d76 Mon Sep 17 00:00:00 2001 From: wenji Date: Fri, 12 Aug 2022 14:47:31 +0800 Subject: [PATCH 052/202] securemsm-kernel: Remove interruptable signal 1. When invoke thread have resumed from freezing to running or is killed, the interruption signals will always interrupt the smcinvoke callback calls. this will lead the QTEE and listener to be out of sync. 2. To fix this issue, we remove the signal interruption, this solution will prevent the smcinvoke from being out of sync. Change-Id: I8f0cd7342784564ce12e617dc4076638f365aba9 Signed-off-by: wenji --- smcinvoke/smcinvoke.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 9b46adf552..c13af0a693 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -1464,7 +1464,7 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp) timeout_jiff = msecs_to_jiffies(1000); while (cbobj_retries < CBOBJ_MAX_RETRIES) { - ret = wait_event_interruptible_timeout(srvr_info->rsp_wait_q, + ret = wait_event_timeout(srvr_info->rsp_wait_q, (cb_txn->state == SMCINVOKE_REQ_PROCESSED) || (srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT), timeout_jiff); From 6aec313e2a0e2702308f0d7bec34a5d548d280f8 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Wed, 17 Aug 2022 15:39:03 -0700 Subject: [PATCH 053/202] Revert "qcedev: Added qce50 compilation for qcedev driver" This reverts commit 0d26cf953e58a93cb53d5609be6ea7e5c4747ecd. Change-Id: I904eac0ef56f5851d63aee9bcb97f0585069a41e Signed-off-by: Gaurav Kashyap --- Kbuild | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Kbuild b/Kbuild index a0fe28ca3c..eec7f9fb40 100644 --- a/Kbuild +++ b/Kbuild @@ -18,8 +18,11 @@ endif obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o tz_log_dlkm-objs := tz_log/tz_log.o +obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o +qce50_dlkm-objs := crypto-qti/qce50.o + obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o -qcedev-mod_dlkm-objs := crypto-qti/qce50.o crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o +qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o From 91e3da89cb81c18c2739245f5bd74406f959e125 Mon Sep 17 00:00:00 2001 From: Jayasri Bhattacharyya Date: Wed, 17 Aug 2022 18:40:29 +0530 Subject: [PATCH 054/202] securemam-kernel: Change the scope of CONFIG_QSEECOM Export get_client_env_object when CONFIG_QSEECOM is enabled Change-Id: I1a52d6e86c8bc75436df55a91759b251caeaa051 --- smcinvoke/smcinvoke_kernel.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c index 46a292613c..96032544a6 100644 --- a/smcinvoke/smcinvoke_kernel.c +++ b/smcinvoke/smcinvoke_kernel.c @@ -3,7 +3,6 @@ * Copyright (c) 2021, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ -#if !IS_ENABLED(CONFIG_QSEECOM) #include #include #include @@ -14,12 +13,15 @@ #include #include #include "smcinvoke.h" -#include "linux/qseecom.h" #include "smcinvoke_object.h" +#include "IClientEnv.h" + +#if !IS_ENABLED(CONFIG_QSEECOM) +#include "linux/qseecom.h" #include "misc/qseecom_kernel.h" #include "IQSEEComCompat.h" #include "IQSEEComCompatAppLoader.h" -#include "IClientEnv.h" +#endif const uint32_t CQSEEComCompatAppLoader_UID = 122; @@ -302,6 +304,8 @@ int32_t get_client_env_object(struct Object *clientEnvObj) } EXPORT_SYMBOL(get_client_env_object); +#if !IS_ENABLED(CONFIG_QSEECOM) + static int load_app(struct qseecom_compat_context *cxt, const char *app_name) { size_t fw_size = 0; From ccc44aff77f9cdf56595ce4bbba894b770b49523 Mon Sep 17 00:00:00 2001 From: Jayasri Bhattacharyya Date: Thu, 14 Jul 2022 12:39:55 +0530 Subject: [PATCH 055/202] Securemsm-kernel: Enable qseecom for khaje 1. Add qseecom_dlkm.ko based on Khaje and AUTO arch type 2. Restructure conf file into multiple conf files to enable feature based conf file. Change-Id: I8bc0472667aebc35e8d1afa37eaca5c3353fd191 Signed-off-by: Jayasri Bhattacharyya --- Android.mk | 8 +++--- Kbuild | 30 +++++++++++++--------- config/sec-kernel_auto_defconfig.conf | 6 ----- config/sec-kernel_defconfig.conf | 1 - config/sec-kernel_defconfig.h | 1 - config/sec-kernel_defconfig_qseecom.conf | 1 + config/sec-kernel_defconfig_qseecom.h | 6 +++++ config/sec-kernel_defconfig_smcinvoke.conf | 1 + config/sec-kernel_defconfig_smcinvoke.h | 6 +++++ config/sec-kernel_defconfig_tvm.conf | 6 ----- linux/misc/qseecom_kernel.h | 2 +- securemsm_kernel_product_board.mk | 10 ++++---- securemsm_kernel_vendor_board.mk | 13 +++++----- 13 files changed, 48 insertions(+), 43 deletions(-) delete mode 100644 config/sec-kernel_auto_defconfig.conf create mode 100644 config/sec-kernel_defconfig_qseecom.conf create mode 100644 config/sec-kernel_defconfig_qseecom.h create mode 100644 config/sec-kernel_defconfig_smcinvoke.conf create mode 100644 config/sec-kernel_defconfig_smcinvoke.h delete mode 100644 config/sec-kernel_defconfig_tvm.conf diff --git a/Android.mk b/Android.mk index 50f7e5158d..08c958c393 100644 --- a/Android.mk +++ b/Android.mk @@ -37,8 +37,6 @@ LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################### ################################################### -ifneq ($(TARGET_BOARD_AUTO),true) -#$(error $(SSG_SRC_FILES)) include $(CLEAR_VARS) #LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := smcinvoke_dlkm.ko @@ -48,7 +46,6 @@ LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -endif #TARGET_BOARD_AUTO ################################################### ################################################### include $(CLEAR_VARS) @@ -111,7 +108,7 @@ LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk ################################################### ################################################### -ifeq ($(TARGET_BOARD_AUTO),true) +ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO))) include $(CLEAR_VARS) LOCAL_SRC_FILES := $(SSG_SRC_FILES) LOCAL_MODULE := qseecom_dlkm.ko @@ -120,6 +117,7 @@ LOCAL_MODULE_TAGS := optional LOCAL_MODULE_DEBUG_ENABLE := true LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) include $(DLKM_DIR)/Build_external_kernelmodule.mk -endif #TARGET_BOARD_AUTO +endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO +################################################### ################################################### endif #COMPILE_SECUREMSM_DLKM check diff --git a/Kbuild b/Kbuild index eec7f9fb40..d23207a271 100644 --- a/Kbuild +++ b/Kbuild @@ -1,19 +1,25 @@ LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \ - -I$(SSG_MODULE_ROOT)/linux/ \ - -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h -ifeq ($(CONFIG_ARCH_SA8155),y) -include $(SSG_MODULE_ROOT)/config/sec-kernel_auto_defconfig.conf -obj-$(CONFIG_QSEECOM) += qseecom_dlkm.o -qseecom_dlkm-objs := qseecom/qseecom.o -else -ifeq ($(CONFIG_ARCH_QTI_VM), y) -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_tvm.conf -else -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf + -I$(SSG_MODULE_ROOT)/linux/ + +ifneq ($(CONFIG_ARCH_QTI_VM), y) + LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h + include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf endif + +#Enable Qseecom if CONFIG_ARCH_KHAJE OR CONFIG_ARCH_KHAJE is set to y +ifneq (, $(filter y, $(CONFIG_ARCH_KHAJE) $(CONFIG_ARCH_SA8155))) + include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.conf + LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.h + + obj-$(CONFIG_QSEECOM) += qseecom_dlkm.o + qseecom_dlkm-objs := qseecom/qseecom.o +endif + +include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.conf +LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.h + obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o -endif obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o tz_log_dlkm-objs := tz_log/tz_log.o diff --git a/config/sec-kernel_auto_defconfig.conf b/config/sec-kernel_auto_defconfig.conf deleted file mode 100644 index d0aefdd092..0000000000 --- a/config/sec-kernel_auto_defconfig.conf +++ /dev/null @@ -1,6 +0,0 @@ -export CONFIG_QTI_TZ_LOG=m -export CONFIG_CRYPTO_DEV_QCEDEV=m -export CONFIG_CRYPTO_DEV_QCRYPTO=m -export CONFIG_HDCP_QSEECOM=m -export CONFIG_HW_RANDOM_MSM_LEGACY=m -export CONFIG_QSEECOM=m diff --git a/config/sec-kernel_defconfig.conf b/config/sec-kernel_defconfig.conf index c75cb200d6..db6cd22434 100644 --- a/config/sec-kernel_defconfig.conf +++ b/config/sec-kernel_defconfig.conf @@ -1,4 +1,3 @@ -export CONFIG_QCOM_SMCINVOKE=m export CONFIG_QTI_TZ_LOG=m export CONFIG_CRYPTO_DEV_QCEDEV=m export CONFIG_CRYPTO_DEV_QCRYPTO=m diff --git a/config/sec-kernel_defconfig.h b/config/sec-kernel_defconfig.h index 4cf5f029dc..a1e2a603bb 100644 --- a/config/sec-kernel_defconfig.h +++ b/config/sec-kernel_defconfig.h @@ -3,7 +3,6 @@ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.. */ -#define CONFIG_QCOM_SMCINVOKE 1 #define CONFIG_QTI_TZ_LOG 1 #define CONFIG_CRYPTO_DEV_QCEDEV 1 #define CONFIG_CRYPTO_DEV_QCRYPTO 1 diff --git a/config/sec-kernel_defconfig_qseecom.conf b/config/sec-kernel_defconfig_qseecom.conf new file mode 100644 index 0000000000..ec586b6537 --- /dev/null +++ b/config/sec-kernel_defconfig_qseecom.conf @@ -0,0 +1 @@ +export CONFIG_QSEECOM=m diff --git a/config/sec-kernel_defconfig_qseecom.h b/config/sec-kernel_defconfig_qseecom.h new file mode 100644 index 0000000000..e4e3ca3f92 --- /dev/null +++ b/config/sec-kernel_defconfig_qseecom.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.. + */ + +#define CONFIG_QSEECOM 1 diff --git a/config/sec-kernel_defconfig_smcinvoke.conf b/config/sec-kernel_defconfig_smcinvoke.conf new file mode 100644 index 0000000000..5d20e21e85 --- /dev/null +++ b/config/sec-kernel_defconfig_smcinvoke.conf @@ -0,0 +1 @@ +export CONFIG_QCOM_SMCINVOKE=m diff --git a/config/sec-kernel_defconfig_smcinvoke.h b/config/sec-kernel_defconfig_smcinvoke.h new file mode 100644 index 0000000000..4c4b5c7338 --- /dev/null +++ b/config/sec-kernel_defconfig_smcinvoke.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.. + */ + +#define CONFIG_QCOM_SMCINVOKE 1 diff --git a/config/sec-kernel_defconfig_tvm.conf b/config/sec-kernel_defconfig_tvm.conf deleted file mode 100644 index 9c1ad4c4f2..0000000000 --- a/config/sec-kernel_defconfig_tvm.conf +++ /dev/null @@ -1,6 +0,0 @@ -export CONFIG_QCOM_SMCINVOKE=m -export CONFIG_QTI_TZ_LOG=n -export CONFIG_CRYPTO_DEV_QCEDEV=n -export CONFIG_CRYPTO_DEV_QCRYPTO=n -export CONFIG_HDCP_QSEECOM=n -export CONFIG_HW_RANDOM_MSM_LEGACY=n diff --git a/linux/misc/qseecom_kernel.h b/linux/misc/qseecom_kernel.h index f67ca47c0c..2c0ffeca76 100644 --- a/linux/misc/qseecom_kernel.h +++ b/linux/misc/qseecom_kernel.h @@ -33,7 +33,7 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len); int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high); -#if IS_ENABLED(CONFIG_QSEECOM) || IS_ENABLED(CONFIG_ARCH_SA8155) +#if IS_ENABLED(CONFIG_QSEECOM) int qseecom_process_listener_from_smcinvoke(uint32_t *result, u64 *response_type, unsigned int *data); #else diff --git a/securemsm_kernel_product_board.mk b/securemsm_kernel_product_board.mk index a182fb7dcd..4f13b790de 100644 --- a/securemsm_kernel_product_board.mk +++ b/securemsm_kernel_product_board.mk @@ -9,18 +9,18 @@ ENABLE_SECUREMSM_DLKM := true endif ifeq ($(ENABLE_SECUREMSM_DLKM), true) -PRODUCT_PACKAGES += smcinvoke_dlkm.ko PRODUCT_PACKAGES += tz_log_dlkm.ko PRODUCT_PACKAGES += qcedev-mod_dlkm.ko PRODUCT_PACKAGES += qce50_dlkm.ko PRODUCT_PACKAGES += qcrypto-msm_dlkm.ko PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko PRODUCT_PACKAGES += qrng_dlkm.ko -ifeq ($(TARGET_BOARD_AUTO),true) -PRODUCT_PACKAGES += qseecom_dlkm.ko -else PRODUCT_PACKAGES += smcinvoke_dlkm.ko -endif #TARGET_BOARD_AUTO + +#Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true +ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO))) +PRODUCT_PACKAGES += qseecom_dlkm.ko +endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO endif #ENABLE_SECUREMSM_DLKM diff --git a/securemsm_kernel_vendor_board.mk b/securemsm_kernel_vendor_board.mk index bb2771a085..4fe698b178 100644 --- a/securemsm_kernel_vendor_board.mk +++ b/securemsm_kernel_vendor_board.mk @@ -20,13 +20,14 @@ BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \ $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko \ -ifeq ($(TARGET_BOARD_AUTO),true) -BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko -BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko -BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko -else BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko -endif + +#Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true +ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO))) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko +endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO endif #ENABLE_SECUREMSM_DLKM From bbca7083844b63024cc3c2cc9c375885b7e79c76 Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 25 Aug 2022 22:15:42 -0700 Subject: [PATCH 056/202] qcedev: hold spinlock while managing crypto errors. Currently, there is no lock held when doing crypto housekeeping when a timeout occurs. Use a lock in this scenario to avoid concurrent scenario timing errors. Change-Id: I60c243e0dfde5a716df772177ab4cd75d9b5b7cc Signed-off-by: Gaurav Kashyap --- crypto-qti/qcedev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index b61e7fc410..d53e512aeb 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -786,6 +786,7 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, podev->lock, msecs_to_jiffies(MAX_REQUEST_TIME)) == 0) { pr_err("%s: request timed out\n", __func__); + spin_unlock_irqrestore(&podev->lock, flags); return qcedev_areq->err; } } @@ -823,14 +824,17 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, pr_err("%s: wait timed out, req info = %d\n", __func__, current_req_info); print_sts = true; + spin_lock_irqsave(&podev->lock, flags); qcedev_check_crypto_status(qcedev_areq, podev->qce, print_sts); qcedev_areq->timed_out = true; ret = qce_manage_timeout(podev->qce, current_req_info); if (ret) { pr_err("%s: error during manage timeout", __func__); qcedev_areq->err = -EIO; + spin_unlock_irqrestore(&podev->lock, flags); return qcedev_areq->err; } + spin_unlock_irqrestore(&podev->lock, flags); tasklet_schedule(&podev->done_tasklet); if (qcedev_areq->offload_cipher_op_req.err != QCEDEV_OFFLOAD_NO_ERROR) From 2461cc62a655808e2bc39f1fc684de9a20a5210a Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Thu, 25 Aug 2022 22:06:34 -0700 Subject: [PATCH 057/202] qcedev: disable clock gating till stability issues resolved Disable the clock gating feature till some of the stability issues concerning it is resolved. Change-Id: I7c7832a843240a75b426ca9187fd02dc1488d0ca Signed-off-by: Gaurav Kashyap --- crypto-qti/qce50.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index 454897d453..bf1a37cd08 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -271,9 +271,8 @@ static int qce_crypto_config(struct qce_device *pce_dev, static void qce_enable_clock_gating(struct qce_device *pce_dev) { - writel_relaxed(0x1, pce_dev->iobase + CRYPTO_PWR_CTRL); - //Write memory barrier - wmb(); + /* This feature might cause some HW issues, noop till resolved. */ + return; } /* From b7877a04967b711b7e6a40aa7d2a0a257ff85b16 Mon Sep 17 00:00:00 2001 From: Divisha Bisht Date: Tue, 6 Sep 2022 15:24:38 +0530 Subject: [PATCH 058/202] securemsm-kernel: Update error codes in IQSEEComCompatAppLoader.h Error codes in IQSEEComCompatAppLoader.h updated as per the actual IDL interface in TZ Change-Id: I407d9bccdeef1d1015f6533c78adab23267133ea --- smcinvoke/IQSEEComCompatAppLoader.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/smcinvoke/IQSEEComCompatAppLoader.h b/smcinvoke/IQSEEComCompatAppLoader.h index 9bc390049b..2b8b054307 100644 --- a/smcinvoke/IQSEEComCompatAppLoader.h +++ b/smcinvoke/IQSEEComCompatAppLoader.h @@ -1,10 +1,15 @@ /* SPDX-License-Identifier: GPL-2.0-only * * Copyright (c) 2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "smcinvoke_object.h" +#define IQSEEComCompatAppLoader_MAX_FILENAME_LEN UINT32_C(64) +#define IQSEEComCompatAppLoader_ELFCLASS32 UINT32_C(1) +#define IQSEEComCompatAppLoader_ELFCLASS64 UINT32_C(2) + #define IQSEEComCompatAppLoader_ERROR_INVALID_BUFFER INT32_C(10) #define IQSEEComCompatAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11) #define IQSEEComCompatAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12) @@ -17,9 +22,11 @@ #define IQSEEComCompatAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19) #define IQSEEComCompatAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20) #define IQSEEComCompatAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21) -#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(22) -#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(23) -#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(24) +#define IQSEEComCompatAppLoader_ERROR_APP_BLACKLISTED INT32_C(22) +#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(23) +#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(24) +#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(25) +#define IQSEEComCompatAppLoader_ERROR_APP_ARCH_NOT_SUPPORTED INT32_C(26) #define IQSEEComCompatAppLoader_OP_loadFromRegion 0 #define IQSEEComCompatAppLoader_OP_loadFromBuffer 1 From efd4a5f0bdd2e47382a70346836839d0caa0d934 Mon Sep 17 00:00:00 2001 From: Smita Ghosh Date: Fri, 9 Sep 2022 13:25:47 -0700 Subject: [PATCH 059/202] smcinvoke: Increase MAX_RETRY Change-Id: Ibcc7bd8614246e44938aac338ac2c06414d3ce63 --- smcinvoke/smcinvoke.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index c13af0a693..46e3a5a19f 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -56,7 +56,8 @@ #define SMCINVOKE_MEM_RGN_OBJ 1 #define SMCINVOKE_MEM_PERM_RW 6 #define SMCINVOKE_SCM_EBUSY_WAIT_MS 30 -#define SMCINVOKE_SCM_EBUSY_MAX_RETRY 67 +#define SMCINVOKE_SCM_EBUSY_MAX_RETRY 200 + /* TZ defined values - Start */ From cc34676b70e4034359f2e05c45507e1488fdbb2e Mon Sep 17 00:00:00 2001 From: venkata sateesh Date: Mon, 29 Aug 2022 22:29:32 -0700 Subject: [PATCH 060/202] Securemsm-kernel: Enable qseecom for Auto GVM Platform Add qseecom support for Auto GVM platform. Change-Id: I9b386486961e36b571498bf6c495c8c786df64c3 Signed-off-by: venkata sateesh --- Kbuild | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Kbuild b/Kbuild index d23207a271..a278f9cccd 100644 --- a/Kbuild +++ b/Kbuild @@ -6,8 +6,8 @@ ifneq ($(CONFIG_ARCH_QTI_VM), y) include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf endif -#Enable Qseecom if CONFIG_ARCH_KHAJE OR CONFIG_ARCH_KHAJE is set to y -ifneq (, $(filter y, $(CONFIG_ARCH_KHAJE) $(CONFIG_ARCH_SA8155))) +#Enable Qseecom if CONFIG_ARCH_KHAJE OR CONFIG_ARCH_KHAJE or CONFIG_QTI_QUIN_GVM is set to y +ifneq (, $(filter y, $(CONFIG_QTI_QUIN_GVM) $(CONFIG_ARCH_KHAJE) $(CONFIG_ARCH_SA8155))) include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.conf LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.h From 0f28e48cbccdda3103382f680993da89a0ab7c3f Mon Sep 17 00:00:00 2001 From: Smita Ghosh Date: Wed, 5 Oct 2022 20:40:18 -0700 Subject: [PATCH 061/202] secure-kernel: Add support for 5.15 kernel Add following changes. Fix switch case latest compiler throwing error where CASE statement does not have break or return under all the execution paths. Rename size_add to size_add_ There is a name collision between this one and one coming from overflow.h. Hence renaming it. Modify IRQ handling for qce50 module Change PDE_DATA to pde_data Signed-off-by: Smita Ghosh Change-Id: I4226fb41df12273b45d2e114f1aad2709dc36eb7 Signed-off-by: Smita Ghosh --- crypto-qti/qce50.c | 6 ++---- crypto-qti/qcedev.c | 4 ++-- crypto-qti/qcrypto.c | 9 ++++++++- hdcp/hdcp_qseecom.c | 4 +++- smcinvoke/smcinvoke.c | 10 +++++----- tz_log/tz_log.c | 2 +- 6 files changed, 21 insertions(+), 14 deletions(-) diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index bf1a37cd08..ed0dca216d 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -6278,10 +6278,8 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, goto err_getting_bam_info; } - resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (resource) { - pce_dev->ce_bam_info.bam_irq = resource->start; - } else { + pce_dev->ce_bam_info.bam_irq = platform_get_irq(pdev,0); + if (pce_dev->ce_bam_info.bam_irq < 0) { pr_err("CRYPTO BAM IRQ unavailable.\n"); goto err_dev; } diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index d53e512aeb..3f9897187e 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -2256,7 +2256,7 @@ long qcedev_ioctl(struct file *file, err = -ENOTTY; goto exit_free_qcedev_areq; } - /* Fall-through */ + fallthrough; case QCEDEV_IOCTL_SHA_UPDATE_REQ: { struct scatterlist sg_src; @@ -2842,6 +2842,6 @@ static void qcedev_exit(void) MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("QTI DEV Crypto driver"); - +MODULE_IMPORT_NS(DMA_BUF); module_init(qcedev_init); module_exit(qcedev_exit); diff --git a/crypto-qti/qcrypto.c b/crypto-qti/qcrypto.c index d13e179080..402cac78c6 100644 --- a/crypto-qti/qcrypto.c +++ b/crypto-qti/qcrypto.c @@ -1388,6 +1388,8 @@ static int _qcrypto_check_aes_keylen(struct crypto_priv *cp, unsigned int len) case AES_KEYSIZE_192: if (cp->ce_support.aes_key_192) break; + else + return -EINVAL; default: //crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; @@ -3078,8 +3080,13 @@ static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key, case AES_KEYSIZE_256: break; case AES_KEYSIZE_192: - if (cp->ce_support.aes_key_192) + if (cp->ce_support.aes_key_192) { break; + } + else { + ctx->enc_key_len = 0; + return -EINVAL; + } default: ctx->enc_key_len = 0; //crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); diff --git a/hdcp/hdcp_qseecom.c b/hdcp/hdcp_qseecom.c index a3f8ebf2fb..6ed988d91c 100644 --- a/hdcp/hdcp_qseecom.c +++ b/hdcp/hdcp_qseecom.c @@ -1272,8 +1272,10 @@ int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd, break; case HDCP2_CMD_STOP: rc = hdcp2_app_stop(handle); + break; default: - goto error; + rc = -EINVAL; + break; } if (rc) diff --git a/smcinvoke/smcinvoke.c b/smcinvoke/smcinvoke.c index 46e3a5a19f..5aca50b447 100644 --- a/smcinvoke/smcinvoke.c +++ b/smcinvoke/smcinvoke.c @@ -882,14 +882,13 @@ static struct smcinvoke_cb_txn *find_cbtxn_locked( } /* - * size_add saturates at SIZE_MAX. If integer overflow is detected, + * size_add_ saturates at SIZE_MAX. If integer overflow is detected, * this function would return SIZE_MAX otherwise normal a+b is returned. */ -static inline size_t size_add(size_t a, size_t b) +static inline size_t size_add_(size_t a, size_t b) { return (b > (SIZE_MAX - a)) ? SIZE_MAX : a + b; } - /* * pad_size is used along with size_align to define a buffer overflow * protected version of ALIGN @@ -905,7 +904,7 @@ static inline size_t pad_size(size_t a, size_t b) */ static inline size_t size_align(size_t a, size_t b) { - return size_add(a, pad_size(a, b)); + return size_add_(a, pad_size(a, b)); } static uint16_t get_server_id(int cb_server_fd) @@ -1729,7 +1728,7 @@ static size_t compute_in_msg_size(const struct smcinvoke_cmd_req *req, /* each buffer has to be 8 bytes aligned */ while (i < OBJECT_COUNTS_NUM_buffers(req->counts)) - total_size = size_add(total_size, + total_size = size_add_(total_size, size_align(args_buf[i++].b.size, SMCINVOKE_ARGS_ALIGN_SIZE)); @@ -2802,3 +2801,4 @@ module_exit(smcinvoke_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("SMC Invoke driver"); MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver); +MODULE_IMPORT_NS(DMA_BUF); diff --git a/tz_log/tz_log.c b/tz_log/tz_log.c index 411104d2eb..ab6296d41a 100644 --- a/tz_log/tz_log.c +++ b/tz_log/tz_log.c @@ -1304,7 +1304,7 @@ static ssize_t tzdbg_fs_read(struct file *file, char __user *buf, static int tzdbg_procfs_open(struct inode *inode, struct file *file) { - return single_open(file, NULL, PDE_DATA(inode)); + return single_open(file, NULL, pde_data(inode)); } static int tzdbg_procfs_release(struct inode *inode, struct file *file) From 50845e2d72af4c6050c2e44e1a72335d076bb3c0 Mon Sep 17 00:00:00 2001 From: Spencer Willett Date: Tue, 11 Oct 2022 13:43:29 -0700 Subject: [PATCH 062/202] securemsm-kernel: smcinvoke: update error code for Invoke failed Return OBJECT_ERROR_BUSY if the Object is busy. Return OBJECT_ERROR_KMEM if Out of memory. Return OBJECT_ERROR_UNAVAIL if the request could not be processed. Change-Id: I17b9ecd7dd817b445d84cb7d01b019497248a2a3 --- smcinvoke/smcinvoke_kernel.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/smcinvoke/smcinvoke_kernel.c b/smcinvoke/smcinvoke_kernel.c index 96032544a6..fad59adf34 100644 --- a/smcinvoke/smcinvoke_kernel.c +++ b/smcinvoke/smcinvoke_kernel.c @@ -250,7 +250,14 @@ static int invoke_over_smcinvoke(void *cxt, close_fd(obj.fd); } } - ret = OBJECT_ERROR_KMEM; + if (ret == -EBUSY) { + ret = OBJECT_ERROR_BUSY; + } + else if (ret == -ENOMEM){ + ret = OBJECT_ERROR_KMEM; + } else { + ret = OBJECT_ERROR_UNAVAIL; + } goto exit; } From b5191f1c6101bb00b9e0d839d9550bbbe17f77a3 Mon Sep 17 00:00:00 2001 From: Spencer Willett Date: Fri, 14 Oct 2022 13:52:16 -0700 Subject: [PATCH 063/202] securemsm-kernel: Expose smcinvoke header files Using genrule and cc_library_headers we expose three smcinvoke headers. We use Android.bp edits to accomplish this. CRS-Fixed: 3317072 Change-Id: I1d946d5a23e22e3b525c99c7223e6109aab8c059 --- Android.bp | 39 ++++++++++++++++++++++++++++++++++++++- ssg_kernel_headers.py | 38 +++++++++++++++++++++++--------------- 2 files changed, 61 insertions(+), 16 deletions(-) diff --git a/Android.bp b/Android.bp index 3912ac5210..a53dcbbc40 100644 --- a/Android.bp +++ b/Android.bp @@ -1,5 +1,42 @@ +headers_src = [ + "linux/smc*ke.h", + "linux/smc*_object.h", + "linux/IClientE*v.h", +] + +smcinvoke_headers_out = [ + "linux/smcinvoke.h", + "linux/smcinvoke_object.h", + "linux/IClientEnv.h", +] + +smcinvoke_kernel_headers_verbose = "--verbose " + +genrule { + name: "qti_generate_smcinvoke_kernel_headers", + tools: ["headers_install.sh", + "unifdef" + ], + tool_files: [ + "ssg_kernel_headers.py", + ], + srcs: headers_src, + cmd: "python3 -u $(location ssg_kernel_headers.py) " + + smcinvoke_kernel_headers_verbose + + "--header_arch arm64 " + + "--gen_dir $(genDir) " + + "--smcinvoke_headers_to_expose $(locations linux/smc*ke.h) $(locations linux/smc*_object.h) $(locations linux/IClientE*v.h) " + + "--unifdef $(location unifdef) " + + "--headers_install $(location headers_install.sh)", + out: smcinvoke_headers_out, +} + + cc_library_headers { name: "smcinvoke_kernel_headers", - vendor_available: true, export_include_dirs: ["."], + generated_headers: ["qti_generate_smcinvoke_kernel_headers"], + export_generated_headers: ["qti_generate_smcinvoke_kernel_headers"], + vendor: true, + recovery_available: true } diff --git a/ssg_kernel_headers.py b/ssg_kernel_headers.py index 2285c65cbb..6708388a5c 100644 --- a/ssg_kernel_headers.py +++ b/ssg_kernel_headers.py @@ -1,4 +1,5 @@ # Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as published by @@ -24,6 +25,10 @@ def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): print('error: expected prefix [%s] on header [%s]' % (prefix, h)) return False + # out_h is combining the relative path to the header file (made in gen_smcinvoke_headers()) to the gen_dir out/soong/.temp/sbox//out/ + # ex. out/soong/.temp/sbox//out/linux/smcinvoke.h + # After the build is complete, you can find the headers that you exposed located in the following gen path: + # out/soong/.intermediates/.../qti_generate_smcinvoke_kernel_headers/gen/ out_h = os.path.join(gen_dir, h[len(prefix):]) (out_h_dirname, out_h_basename) = os.path.split(out_h) env = os.environ.copy() @@ -40,17 +45,19 @@ def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h): return False return True -def gen_audio_headers(verbose, gen_dir, headers_install, unifdef, audio_include_uapi): +def gen_smcinvoke_headers(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose): error_count = 0 - for h in audio_include_uapi: - audio_uapi_include_prefix = os.path.join(h.split('/include/uapi/')[0], - 'include', - 'uapi', - 'audio') + os.sep - - if not run_headers_install( - verbose, gen_dir, headers_install, unifdef, - audio_uapi_include_prefix, h): error_count += 1 + # smcinvoke_headers_to_expose is a string list of individual paths to headers to expose + # They are passed using Android.bp variable substition: $(locations