secuemsm-kernel : Enable DLKM's from vendor

SSG's kernel modules will be loaded through userspace
init instead of kernel.

Change-Id: Ibf88a6a56df65a933d451d39136060967595e5b7
This commit is contained in:
Smita Ghosh
2021-11-06 17:21:49 -07:00
committed by Bruce Levy
parent 61bb176ca2
commit e190b86556
42 changed files with 24477 additions and 0 deletions

5
Android.bp Normal file
View File

@@ -0,0 +1,5 @@
cc_library_headers {
name: "smcinvoke_kernel_headers",
vendor_available: true,
export_include_dirs: ["."],
}

65
Android.mk Normal file
View File

@@ -0,0 +1,65 @@
# Android makefile for audio kernel modules
LOCAL_PATH := $(call my-dir)
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
SSG_SRC_FILES := \
$(wildcard $(LOCAL_PATH)/*) \
$(wildcard $(LOCAL_PATH)/*/*) \
$(wildcard $(LOCAL_PATH)/*/*/*) \
$(wildcard $(LOCAL_PATH)/*/*/*/*)
#$(error $(SSG_SRC_FILES))
include $(CLEAR_VARS)
#LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := smcinvoke_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := smcinvoke_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
##################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := tz_log_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := tz_log_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
#################################################
##################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qce50_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qce50_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
#################################################
##################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qcedev-mod_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qcedev-mod_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
#################################################
##################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qcrypto-msm_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qcrypto-msm_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
#################################################

17
Kbuild Normal file
View File

@@ -0,0 +1,17 @@
include $(SSG_MODULE_ROOT)/config/ssg_smcinvoke.conf
obj-m += smcinvoke_dlkm.o
smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o
obj-m += tz_log_dlkm.o
tz_log_dlkm-objs := tz_log/tz_log.o
obj-m += qce50_dlkm.o
qce50_dlkm-objs := crypto-qti/qce50.o
obj-m += qcedev-mod_dlkm.o
qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o crypto-qti/compat_qcedev.o
obj-m += qcrypto-msm_dlkm.o
qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o crypto-qti/des.o

13
Makefile Normal file
View File

@@ -0,0 +1,13 @@
M=$(PWD)
SSG_MODULE_ROOT=$(KERNEL_SRC)/$(M)
KBUILD_OPTIONS+= SSG_MODULE_ROOT=$(SSG_MODULE_ROOT)
all: modules
clean:
$(MAKE) -C $(KERNEL_SRC) M=$(M) clean
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)

View File

@@ -0,0 +1,5 @@
dtbo-y +=securemsm-kernel.dtbo
always-y := $(dtb-y) $(dtbo-y)
subdir-y := $(dts-dirs)
clean-files := *.dtb *.dtbo

View File

@@ -0,0 +1,72 @@
/dts-v1/;
/plugin/;
#include <dt-bindings/clock/qcom,aop-qmp.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
#include <dt-bindings/interconnect/qcom,waipio.h>
#include <dt-bindings/soc/qcom,ipcc.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/soc/qcom,ipcc.h>
#include <dt-bindings/spmi/spmi.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
&reserved_memory {
user_contig_mem: user_contig_region {
compatible = "shared-dma-pool";
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
size = <0x0 0x1000000>;
};
qseecom_mem: qseecom_region {
compatible = "shared-dma-pool";
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
size = <0x0 0x1400000>;
};
qseecom_ta_mem: qseecom_ta_region {
compatible = "shared-dma-pool";
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
size = <0x0 0x1000000>;
};
};
&firmware {
qcom_smcinvoke {
compatible = "qcom,smcinvoke";
};
qcom_tzlog: tz-log@146AA720 {
compatible = "qcom,tz-log";
reg = <0x146AA720 0x3000>;
qcom,hyplog-enabled;
hyplog-address-offset = <0x410>;
hyplog-size-offset = <0x414>;
};
qcom,dma-heaps {
qcom,qseecom {
qcom,dma-heap-name = "qcom,qseecom";
qcom,dma-heap-type = <HEAP_TYPE_CMA>;
memory-region = <&qseecom_mem>;
};
qcom,qseecom_ta {
qcom,dma-heap-name = "qcom,qseecom-ta";
qcom,dma-heap-type = <HEAP_TYPE_CMA>;
memory-region = <&qseecom_ta_mem>;
};
};
};

View File

@@ -0,0 +1,7 @@
export CONFIG_QCOM_SMCINVOKE=m
export CONFIG_QTI_TZ_LOG=m
export CONFIG_CRYPTO_DEV_QCEDEV=m
export CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m
export CONFIG_CRYPTO_DEV_QCRYPTO=m
export CONFIG_SCSI_UFS_CRYPTO=m
export CONFIG_SCSI_UFS_CRYPTO_QTI=m

535
crypto-qti/compat_qcedev.c Normal file
View File

@@ -0,0 +1,535 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* QTI CE 32-bit compatibility syscall for 64-bit systems
*
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "linux/qcedev.h"
#include <linux/compat.h>
#include "compat_qcedev.h"
static void *compat_alloc_user_space(int size){
return NULL;
}
static int compat_get_qcedev_pmem_info(
struct compat_qcedev_pmem_info __user *pmem32,
struct qcedev_pmem_info __user *pmem)
{
compat_ulong_t offset;
compat_int_t fd_src;
compat_int_t fd_dst;
int err, i;
uint32_t len;
err = get_user(fd_src, &pmem32->fd_src);
err |= put_user(fd_src, &pmem->fd_src);
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(offset, &pmem32->src[i].offset);
err |= put_user(offset, &pmem->src[i].offset);
err |= get_user(len, &pmem32->src[i].len);
err |= put_user(len, &pmem->src[i].len);
}
err |= get_user(fd_dst, &pmem32->fd_dst);
err |= put_user(fd_dst, &pmem->fd_dst);
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(offset, &pmem32->dst[i].offset);
err |= put_user(offset, &pmem->dst[i].offset);
err |= get_user(len, &pmem32->dst[i].len);
err |= put_user(len, &pmem->dst[i].len);
}
return err;
}
static int compat_put_qcedev_pmem_info(
struct compat_qcedev_pmem_info __user *pmem32,
struct qcedev_pmem_info __user *pmem)
{
compat_ulong_t offset;
compat_int_t fd_src;
compat_int_t fd_dst;
int err, i;
uint32_t len;
err = get_user(fd_src, &pmem->fd_src);
err |= put_user(fd_src, &pmem32->fd_src);
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(offset, &pmem->src[i].offset);
err |= put_user(offset, &pmem32->src[i].offset);
err |= get_user(len, &pmem->src[i].len);
err |= put_user(len, &pmem32->src[i].len);
}
err |= get_user(fd_dst, &pmem->fd_dst);
err |= put_user(fd_dst, &pmem32->fd_dst);
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(offset, &pmem->dst[i].offset);
err |= put_user(offset, &pmem32->dst[i].offset);
err |= get_user(len, &pmem->dst[i].len);
err |= put_user(len, &pmem32->dst[i].len);
}
return err;
}
static int compat_get_qcedev_vbuf_info(
struct compat_qcedev_vbuf_info __user *vbuf32,
struct qcedev_vbuf_info __user *vbuf)
{
compat_uptr_t vaddr;
int err = 0, i;
uint32_t len;
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &vbuf32->src[i].vaddr);
err |= put_user(vaddr,
(compat_uptr_t __user *)&vbuf->src[i].vaddr);
err |= get_user(len, &vbuf32->src[i].len);
err |= put_user(len, &vbuf->src[i].len);
}
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
err |= put_user(vaddr,
(compat_uptr_t __user *)&vbuf->dst[i].vaddr);
err |= get_user(len, &vbuf32->dst[i].len);
err |= put_user(len, &vbuf->dst[i].len);
}
return err;
}
static int compat_put_qcedev_vbuf_info(
struct compat_qcedev_vbuf_info __user *vbuf32,
struct qcedev_vbuf_info __user *vbuf)
{
compat_uptr_t vaddr;
int err = 0, i;
uint32_t len;
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr,
(compat_uptr_t __user *)&vbuf->src[i].vaddr);
err |= put_user(vaddr, &vbuf32->src[i].vaddr);
err |= get_user(len, &vbuf->src[i].len);
err |= put_user(len, &vbuf32->src[i].len);
}
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr,
(compat_uptr_t __user *)&vbuf->dst[i].vaddr);
err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
err |= get_user(len, &vbuf->dst[i].len);
err |= put_user(len, &vbuf32->dst[i].len);
}
return err;
}
static int compat_get_qcedev_cipher_op_req(
struct compat_qcedev_cipher_op_req __user *data32,
struct qcedev_cipher_op_req __user *data)
{
enum qcedev_cipher_mode_enum mode;
enum qcedev_cipher_alg_enum alg;
compat_ulong_t byteoffset;
enum qcedev_oper_enum op;
compat_ulong_t data_len;
compat_ulong_t encklen;
compat_ulong_t entries;
compat_ulong_t ivlen;
uint8_t in_place_op;
int err, i;
uint8_t use_pmem;
uint8_t enckey;
uint8_t iv;
err = get_user(use_pmem, &data32->use_pmem);
err |= put_user(use_pmem, &data->use_pmem);
if (use_pmem)
err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
else
err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
err |= get_user(entries, &data32->entries);
err |= put_user(entries, &data->entries);
err |= get_user(data_len, &data32->data_len);
err |= put_user(data_len, &data->data_len);
err |= get_user(in_place_op, &data32->in_place_op);
err |= put_user(in_place_op, &data->in_place_op);
for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
err |= get_user(enckey, &(data32->enckey[i]));
err |= put_user(enckey, &(data->enckey[i]));
}
err |= get_user(encklen, &data32->encklen);
err |= put_user(encklen, &data->encklen);
for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
err |= get_user(iv, &(data32->iv[i]));
err |= put_user(iv, &(data->iv[i]));
}
err |= get_user(ivlen, &data32->ivlen);
err |= put_user(ivlen, &data->ivlen);
err |= get_user(byteoffset, &data32->byteoffset);
err |= put_user(byteoffset, &data->byteoffset);
err |= get_user(alg, &data32->alg);
err |= put_user(alg, &data->alg);
err |= get_user(mode, &data32->mode);
err |= put_user(mode, &data->mode);
err |= get_user(op, &data32->op);
err |= put_user(op, &data->op);
return err;
}
static int compat_put_qcedev_cipher_op_req(
struct compat_qcedev_cipher_op_req __user *data32,
struct qcedev_cipher_op_req __user *data)
{
enum qcedev_cipher_mode_enum mode;
enum qcedev_cipher_alg_enum alg;
compat_ulong_t byteoffset;
enum qcedev_oper_enum op;
compat_ulong_t data_len;
compat_ulong_t encklen;
compat_ulong_t entries;
compat_ulong_t ivlen;
uint8_t in_place_op;
int err, i;
uint8_t use_pmem;
uint8_t enckey;
uint8_t iv;
err = get_user(use_pmem, &data->use_pmem);
err |= put_user(use_pmem, &data32->use_pmem);
if (use_pmem)
err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
else
err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
err |= get_user(entries, &data->entries);
err |= put_user(entries, &data32->entries);
err |= get_user(data_len, &data->data_len);
err |= put_user(data_len, &data32->data_len);
err |= get_user(in_place_op, &data->in_place_op);
err |= put_user(in_place_op, &data32->in_place_op);
for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
err |= get_user(enckey, &(data->enckey[i]));
err |= put_user(enckey, &(data32->enckey[i]));
}
err |= get_user(encklen, &data->encklen);
err |= put_user(encklen, &data32->encklen);
for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
err |= get_user(iv, &(data->iv[i]));
err |= put_user(iv, &(data32->iv[i]));
}
err |= get_user(ivlen, &data->ivlen);
err |= put_user(ivlen, &data32->ivlen);
err |= get_user(byteoffset, &data->byteoffset);
err |= put_user(byteoffset, &data32->byteoffset);
err |= get_user(alg, &data->alg);
err |= put_user(alg, &data32->alg);
err |= get_user(mode, &data->mode);
err |= put_user(mode, &data32->mode);
err |= get_user(op, &data->op);
err |= put_user(op, &data32->op);
return err;
}
static int compat_xfer_qcedev_map_buf_req(
struct compat_qcedev_map_buf_req __user *data32,
struct qcedev_map_buf_req __user *data, bool to_get)
{
int rc = 0, i, fd = -1;
uint32_t fd_size, fd_offset, num_fds, buf_vaddr;
if (to_get) {
/* copy from compat struct */
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
rc |= get_user(fd, &data32->fd[i]);
rc |= put_user(fd, &data->fd[i]);
rc |= get_user(fd_size, &data32->fd_size[i]);
rc |= put_user(fd_size, &data->fd_size[i]);
rc |= get_user(fd_offset, &data32->fd_offset[i]);
rc |= put_user(fd_offset, &data->fd_offset[i]);
rc |= get_user(buf_vaddr, &data32->buf_vaddr[i]);
rc |= put_user(buf_vaddr, &data->buf_vaddr[i]);
}
rc |= get_user(num_fds, &data32->num_fds);
rc |= put_user(num_fds, &data->num_fds);
} else {
/* copy to compat struct */
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
rc |= get_user(fd, &data->fd[i]);
rc |= put_user(fd, &data32->fd[i]);
rc |= get_user(fd_size, &data->fd_size[i]);
rc |= put_user(fd_size, &data32->fd_size[i]);
rc |= get_user(fd_offset, &data->fd_offset[i]);
rc |= put_user(fd_offset, &data32->fd_offset[i]);
rc |= get_user(buf_vaddr, &data->buf_vaddr[i]);
rc |= put_user(buf_vaddr, &data32->buf_vaddr[i]);
}
rc |= get_user(num_fds, &data->num_fds);
rc |= put_user(num_fds, &data32->num_fds);
}
return rc;
}
static int compat_xfer_qcedev_unmap_buf_req(
struct compat_qcedev_unmap_buf_req __user *data32,
struct qcedev_unmap_buf_req __user *data, bool to_get)
{
int i, rc = 0, fd = -1;
uint32_t num_fds;
if (to_get) {
/* copy from compat struct */
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
rc |= get_user(fd, &data32->fd[i]);
rc |= put_user(fd, &data->fd[i]);
}
rc |= get_user(num_fds, &data32->num_fds);
rc |= put_user(num_fds, &data->num_fds);
} else {
/* copy to compat struct */
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
rc |= get_user(fd, &data->fd[i]);
rc |= put_user(fd, &data32->fd[i]);
}
rc |= get_user(num_fds, &data->num_fds);
rc |= put_user(num_fds, &data32->num_fds);
}
return rc;
}
static int compat_get_qcedev_sha_op_req(
struct compat_qcedev_sha_op_req __user *data32,
struct qcedev_sha_op_req __user *data)
{
enum qcedev_sha_alg_enum alg;
compat_ulong_t authklen;
compat_ulong_t data_len;
compat_ulong_t entries;
compat_ulong_t diglen;
compat_uptr_t authkey;
compat_uptr_t vaddr;
int err = 0, i;
uint8_t digest;
uint32_t len;
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr, &data32->data[i].vaddr);
err |= put_user(vaddr,
(compat_uptr_t __user *)&data->data[i].vaddr);
err |= get_user(len, &data32->data[i].len);
err |= put_user(len, &data->data[i].len);
}
err |= get_user(entries, &data32->entries);
err |= put_user(entries, &data->entries);
err |= get_user(data_len, &data32->data_len);
err |= put_user(data_len, &data->data_len);
for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
err |= get_user(digest, &(data32->digest[i]));
err |= put_user(digest, &(data->digest[i]));
}
err |= get_user(diglen, &data32->diglen);
err |= put_user(diglen, &data->diglen);
err |= get_user(authkey, &data32->authkey);
err |= put_user(authkey, (compat_uptr_t __user *)&data->authkey);
err |= get_user(authklen, &data32->authklen);
err |= put_user(authklen, &data->authklen);
err |= get_user(alg, &data32->alg);
err |= put_user(alg, &data->alg);
return err;
}
static int compat_put_qcedev_sha_op_req(
struct compat_qcedev_sha_op_req __user *data32,
struct qcedev_sha_op_req __user *data)
{
enum qcedev_sha_alg_enum alg;
compat_ulong_t authklen;
compat_ulong_t data_len;
compat_ulong_t entries;
compat_ulong_t diglen;
compat_uptr_t authkey;
compat_uptr_t vaddr;
int err = 0, i;
uint8_t digest;
uint32_t len;
for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
err |= get_user(vaddr,
(compat_uptr_t __user *)&data->data[i].vaddr);
err |= put_user(vaddr, &data32->data[i].vaddr);
err |= get_user(len, &data->data[i].len);
err |= put_user(len, &data32->data[i].len);
}
err |= get_user(entries, &data->entries);
err |= put_user(entries, &data32->entries);
err |= get_user(data_len, &data->data_len);
err |= put_user(data_len, &data32->data_len);
for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
err |= get_user(digest, &(data->digest[i]));
err |= put_user(digest, &(data32->digest[i]));
}
err |= get_user(diglen, &data->diglen);
err |= put_user(diglen, &data32->diglen);
err |= get_user(authkey,
(compat_uptr_t __user *)&data->authkey);
err |= put_user(authkey, &data32->authkey);
err |= get_user(authklen, &data->authklen);
err |= put_user(authklen, &data32->authklen);
err |= get_user(alg, &data->alg);
err |= put_user(alg, &data32->alg);
return err;
}
static unsigned int convert_cmd(unsigned int cmd)
{
switch (cmd) {
case COMPAT_QCEDEV_IOCTL_ENC_REQ:
return QCEDEV_IOCTL_ENC_REQ;
case COMPAT_QCEDEV_IOCTL_DEC_REQ:
return QCEDEV_IOCTL_DEC_REQ;
case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
return QCEDEV_IOCTL_SHA_INIT_REQ;
case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
return QCEDEV_IOCTL_SHA_UPDATE_REQ;
case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
return QCEDEV_IOCTL_SHA_FINAL_REQ;
case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
return QCEDEV_IOCTL_GET_SHA_REQ;
case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
return QCEDEV_IOCTL_GET_CMAC_REQ;
case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ:
return QCEDEV_IOCTL_MAP_BUF_REQ;
case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ:
return QCEDEV_IOCTL_UNMAP_BUF_REQ;
default:
return cmd;
}
}
long compat_qcedev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
long ret;
switch (cmd) {
case COMPAT_QCEDEV_IOCTL_ENC_REQ:
case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
struct compat_qcedev_cipher_op_req __user *data32;
struct qcedev_cipher_op_req __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (!data)
return -EFAULT;
err = compat_get_qcedev_cipher_op_req(data32, data);
if (err)
return err;
ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
err = compat_put_qcedev_cipher_op_req(data32, data);
return ret ? ret : err;
}
case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
struct compat_qcedev_sha_op_req __user *data32;
struct qcedev_sha_op_req __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (!data)
return -EFAULT;
err = compat_get_qcedev_sha_op_req(data32, data);
if (err)
return err;
ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
err = compat_put_qcedev_sha_op_req(data32, data);
return ret ? ret : err;
}
case COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ: {
struct compat_qcedev_map_buf_req __user *data32;
struct qcedev_map_buf_req __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (!data)
return -EINVAL;
err = compat_xfer_qcedev_map_buf_req(data32, data, true);
if (err)
return err;
ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
err = compat_xfer_qcedev_map_buf_req(data32, data, false);
return ret ? ret : err;
break;
}
case COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ: {
struct compat_qcedev_unmap_buf_req __user *data32;
struct qcedev_unmap_buf_req __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (!data)
return -EINVAL;
err = compat_xfer_qcedev_unmap_buf_req(data32, data, true);
if (err)
return err;
ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
err = compat_xfer_qcedev_unmap_buf_req(data32, data, false);
return ret ? ret : err;
break;
}
default:
return -ENOIOCTLCMD;
}
return 0;
}

202
crypto-qti/compat_qcedev.h Normal file
View File

@@ -0,0 +1,202 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014,2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_COMPAT_QCEDEV__H
#define _UAPI_COMPAT_QCEDEV__H
#include <linux/types.h>
#include <linux/ioctl.h>
#if IS_ENABLED(CONFIG_COMPAT)
#include <linux/compat.h>
/**
* struct compat_buf_info - Buffer information
* @offset: Offset from the base address of the buffer
* (Used when buffer is allocated using PMEM)
* @vaddr: Virtual buffer address pointer
* @len: Size of the buffer
*/
struct compat_buf_info {
union {
compat_ulong_t offset;
compat_uptr_t vaddr;
};
compat_ulong_t len;
};
/**
* struct compat_qcedev_vbuf_info - Source and destination Buffer information
* @src: Array of buf_info for input/source
* @dst: Array of buf_info for output/destination
*/
struct compat_qcedev_vbuf_info {
struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct compat_qcedev_pmem_info - Stores PMEM buffer information
* @fd_src: Handle to /dev/adsp_pmem used to allocate
* memory for input/src buffer
* @src: Array of buf_info for input/source
* @fd_dst: Handle to /dev/adsp_pmem used to allocate
* memory for output/dst buffer
* @dst: Array of buf_info for output/destination
* @pmem_src_offset: The offset from input/src buffer
* (allocated by PMEM)
*/
struct compat_qcedev_pmem_info {
compat_int_t fd_src;
struct compat_buf_info src[QCEDEV_MAX_BUFFERS];
compat_int_t fd_dst;
struct compat_buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct compat_qcedev_cipher_op_req - Holds the ciphering request information
* @use_pmem (IN): Flag to indicate if buffer source is PMEM
* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
* @pmem (IN): Stores PMEM buffer information.
* Refer struct qcedev_pmem_info
* @vbuf (IN/OUT): Stores Source and destination Buffer information
* Refer to struct qcedev_vbuf_info
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination
* When using PMEM allocated memory, must set this to 1
* @enckey (IN): 128 bits of confidentiality key
* enckey[0] bit 127-120, enckey[1] bit 119-112,..
* enckey[15] bit 7-0
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver)
* @iv (IN/OUT): Initialization vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only)
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
* Applicable when using AES algorithm only
* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
*
* If use_pmem is set to 0, the driver assumes that memory was not allocated
* via PMEM, and kernel will need to allocate memory and copy data from user
* space buffer (data_src/dta_dst) and process accordingly and copy data back
* to the user space buffer
*
* If use_pmem is set to 1, the driver assumes that memory was allocated via
* PMEM.
* The kernel driver will use the fd_src to determine the kernel virtual address
* base that maps to the user space virtual address base for the buffer
* allocated in user space.
* The final input/src and output/dst buffer pointer will be determined
* by adding the offsets to the kernel virtual addr.
*
* If use of hardware key is supported in the target, user can configure the
* key parameters (encklen, enckey) to use the hardware key.
* In order to use the hardware key, set encklen to 0 and set the enckey
* data array to 0.
*/
struct compat_qcedev_cipher_op_req {
uint8_t use_pmem;
union {
struct compat_qcedev_pmem_info pmem;
struct compat_qcedev_vbuf_info vbuf;
};
compat_ulong_t entries;
compat_ulong_t data_len;
uint8_t in_place_op;
uint8_t enckey[QCEDEV_MAX_KEY_SIZE];
compat_ulong_t encklen;
uint8_t iv[QCEDEV_MAX_IV_SIZE];
compat_ulong_t ivlen;
compat_ulong_t byteoffset;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_oper_enum op;
};
/**
* struct qcedev_sha_op_req - Holds the hashing request information
* @data (IN): Array of pointers to the data to be hashed
* @entries (IN): Number of buf_info entries in the data array
* @data_len (IN): Length of data to be hashed
* @digest (IN/OUT): Returns the hashed data information
* @diglen (OUT): Size of the hashed/digest data
* @authkey (IN): Pointer to authentication key for HMAC
* @authklen (IN): Size of the authentication key
* @alg (IN): Secure Hash algorithm
*/
struct compat_qcedev_sha_op_req {
struct compat_buf_info data[QCEDEV_MAX_BUFFERS];
compat_ulong_t entries;
compat_ulong_t data_len;
uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
compat_ulong_t diglen;
compat_uptr_t authkey;
compat_ulong_t authklen;
enum qcedev_sha_alg_enum alg;
};
/**
* struct compact_qcedev_map_buf_req - Holds the mapping request information
* fd (IN): Array of fds.
* num_fds (IN): Number of fds in fd[].
* fd_size (IN): Array of sizes corresponding to each fd in fd[].
* fd_offset (IN): Array of offset corresponding to each fd in fd[].
* vaddr (OUT): Array of mapped virtual address corresponding to
* each fd in fd[].
*/
struct compat_qcedev_map_buf_req {
compat_long_t fd[QCEDEV_MAX_BUFFERS];
compat_ulong_t num_fds;
compat_ulong_t fd_size[QCEDEV_MAX_BUFFERS];
compat_ulong_t fd_offset[QCEDEV_MAX_BUFFERS];
compat_u64 buf_vaddr[QCEDEV_MAX_BUFFERS];
};
/**
* struct compat_qcedev_unmap_buf_req - Holds the hashing request information
* fd (IN): Array of fds to unmap
* num_fds (IN): Number of fds in fd[].
*/
struct compat_qcedev_unmap_buf_req {
compat_long_t fd[QCEDEV_MAX_BUFFERS];
compat_ulong_t num_fds;
};
struct file;
long qcedev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
long compat_qcedev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
#define COMPAT_QCEDEV_IOCTL_ENC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
#define COMPAT_QCEDEV_IOCTL_DEC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
#define COMPAT_QCEDEV_IOCTL_LOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 7)
#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 8)
#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
#define COMPAT_QCEDEV_IOCTL_MAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 10, struct compat_qcedev_map_buf_req)
#define COMPAT_QCEDEV_IOCTL_UNMAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 11, struct compat_qcedev_unmap_buf_req)
#endif /* CONFIG_COMPAT */
#endif /* _UAPI_COMPAT_QCEDEV__H */

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef _FIPS_STATUS__H
#define _FIPS_STATUS__H
#include <linux/types.h>
#include <linux/ioctl.h>
/**
* fips_status: global FIPS140-2 status
* @FIPS140_STATUS_NA:
* Not a FIPS140-2 compliant Build.
* The flag status won't
* change throughout
* the lifetime
* @FIPS140_STATUS_PASS_CRYPTO:
* KAT self tests are passed.
* @FIPS140_STATUS_QCRYPTO_ALLOWED:
* Integrity test is passed.
* @FIPS140_STATUS_PASS:
* All tests are passed and build
* is in FIPS140-2 mode
* @FIPS140_STATUS_FAIL:
* One of the test is failed.
* This will block all requests
* to crypto modules
*/
enum fips_status {
FIPS140_STATUS_NA = 0,
FIPS140_STATUS_PASS_CRYPTO = 1,
FIPS140_STATUS_QCRYPTO_ALLOWED = 2,
FIPS140_STATUS_PASS = 3,
FIPS140_STATUS_FAIL = 0xFF
};
#endif /* _FIPS_STATUS__H */

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_CRYPTO_DEVICE__H
#define __QCOM_CRYPTO_DEVICE__H
#include <linux/types.h>
struct msm_ce_hw_support {
uint32_t ce_shared;
uint32_t shared_ce_resource;
uint32_t hw_key_support;
uint32_t sha_hmac;
};
#endif /* __QCOM_CRYPTO_DEVICE__H */

289
crypto-qti/linux/qcedev.h Normal file
View File

@@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef _QCEDEV__H
#define _QCEDEV__H
#include <linux/types.h>
#include <linux/ioctl.h>
#include "fips_status.h"
#define QCEDEV_MAX_SHA_BLOCK_SIZE 64
#define QCEDEV_MAX_BEARER 31
#define QCEDEV_MAX_KEY_SIZE 64
#define QCEDEV_MAX_IV_SIZE 32
#define QCEDEV_MAX_BUFFERS 16
#define QCEDEV_MAX_SHA_DIGEST 32
#define QCEDEV_USE_PMEM 1
#define QCEDEV_NO_PMEM 0
#define QCEDEV_AES_KEY_128 16
#define QCEDEV_AES_KEY_192 24
#define QCEDEV_AES_KEY_256 32
/**
*qcedev_oper_enum: Operation types
* @QCEDEV_OPER_ENC: Encrypt
* @QCEDEV_OPER_DEC: Decrypt
* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
* user. Key already set by an external processor.
* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
* user. Key already set by an external processor.
*/
enum qcedev_oper_enum {
QCEDEV_OPER_DEC = 0,
QCEDEV_OPER_ENC = 1,
QCEDEV_OPER_DEC_NO_KEY = 2,
QCEDEV_OPER_ENC_NO_KEY = 3,
QCEDEV_OPER_LAST
};
/**
*qcedev_oper_enum: Cipher algorithm types
* @QCEDEV_ALG_DES: DES
* @QCEDEV_ALG_3DES: 3DES
* @QCEDEV_ALG_AES: AES
*/
enum qcedev_cipher_alg_enum {
QCEDEV_ALG_DES = 0,
QCEDEV_ALG_3DES = 1,
QCEDEV_ALG_AES = 2,
QCEDEV_ALG_LAST
};
/**
*qcedev_cipher_mode_enum : AES mode
* @QCEDEV_AES_MODE_CBC: CBC
* @QCEDEV_AES_MODE_ECB: ECB
* @QCEDEV_AES_MODE_CTR: CTR
* @QCEDEV_AES_MODE_XTS: XTS
* @QCEDEV_AES_MODE_CCM: CCM
* @QCEDEV_DES_MODE_CBC: CBC
* @QCEDEV_DES_MODE_ECB: ECB
*/
enum qcedev_cipher_mode_enum {
QCEDEV_AES_MODE_CBC = 0,
QCEDEV_AES_MODE_ECB = 1,
QCEDEV_AES_MODE_CTR = 2,
QCEDEV_AES_MODE_XTS = 3,
QCEDEV_AES_MODE_CCM = 4,
QCEDEV_DES_MODE_CBC = 5,
QCEDEV_DES_MODE_ECB = 6,
QCEDEV_AES_DES_MODE_LAST
};
/**
*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
* @QCEDEV_ALG_AES_CMAC: Configurable MAC size
*/
enum qcedev_sha_alg_enum {
QCEDEV_ALG_SHA1 = 0,
QCEDEV_ALG_SHA256 = 1,
QCEDEV_ALG_SHA1_HMAC = 2,
QCEDEV_ALG_SHA256_HMAC = 3,
QCEDEV_ALG_AES_CMAC = 4,
QCEDEV_ALG_SHA_ALG_LAST
};
/**
* struct buf_info - Buffer information
* @offset: Offset from the base address of the buffer
* (Used when buffer is allocated using PMEM)
* @vaddr: Virtual buffer address pointer
* @len: Size of the buffer
*/
struct buf_info {
union {
__u32 offset;
__u8 *vaddr;
};
__u32 len;
};
/**
* struct qcedev_vbuf_info - Source and destination Buffer information
* @src: Array of buf_info for input/source
* @dst: Array of buf_info for output/destination
*/
struct qcedev_vbuf_info {
struct buf_info src[QCEDEV_MAX_BUFFERS];
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_pmem_info - Stores PMEM buffer information
* @fd_src: Handle to /dev/adsp_pmem used to allocate
* memory for input/src buffer
* @src: Array of buf_info for input/source
* @fd_dst: Handle to /dev/adsp_pmem used to allocate
* memory for output/dst buffer
* @dst: Array of buf_info for output/destination
* @pmem_src_offset: The offset from input/src buffer
* (allocated by PMEM)
*/
struct qcedev_pmem_info {
int fd_src;
struct buf_info src[QCEDEV_MAX_BUFFERS];
int fd_dst;
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_cipher_op_req - Holds the ciphering request information
* @use_pmem (IN): Flag to indicate if buffer source is PMEM
* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
* @pmem (IN): Stores PMEM buffer information.
* Refer struct qcedev_pmem_info
* @vbuf (IN/OUT): Stores Source and destination Buffer information
* Refer to struct qcedev_vbuf_info
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination
* When using PMEM allocated memory, must set this to 1
* @enckey (IN): 128 bits of confidentiality key
* enckey[0] bit 127-120, enckey[1] bit 119-112,..
* enckey[15] bit 7-0
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver)
* @iv (IN/OUT): Initialisation vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only)
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
* Apllicabel when using AES algorithm only
* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
*
*If use_pmem is set to 0, the driver assumes that memory was not allocated
* via PMEM, and kernel will need to allocate memory and copy data from user
* space buffer (data_src/dta_dst) and process accordingly and copy data back
* to the user space buffer
*
* If use_pmem is set to 1, the driver assumes that memory was allocated via
* PMEM.
* The kernel driver will use the fd_src to determine the kernel virtual address
* base that maps to the user space virtual address base for the buffer
* allocated in user space.
* The final input/src and output/dst buffer pointer will be determined
* by adding the offsets to the kernel virtual addr.
*
* If use of hardware key is supported in the target, user can configure the
* key parameters (encklen, enckey) to use the hardware key.
* In order to use the hardware key, set encklen to 0 and set the enckey
* data array to 0.
*/
struct qcedev_cipher_op_req {
__u8 use_pmem;
union {
struct qcedev_pmem_info pmem;
struct qcedev_vbuf_info vbuf;
};
__u32 entries;
__u32 data_len;
__u8 in_place_op;
__u8 enckey[QCEDEV_MAX_KEY_SIZE];
__u32 encklen;
__u8 iv[QCEDEV_MAX_IV_SIZE];
__u32 ivlen;
__u32 byteoffset;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_oper_enum op;
};
/**
* struct qcedev_sha_op_req - Holds the hashing request information
* @data (IN): Array of pointers to the data to be hashed
* @entries (IN): Number of buf_info entries in the data array
* @data_len (IN): Length of data to be hashed
* @digest (IN/OUT): Returns the hashed data information
* @diglen (OUT): Size of the hashed/digest data
* @authkey (IN): Pointer to authentication key for HMAC
* @authklen (IN): Size of the authentication key
* @alg (IN): Secure Hash algorithm
*/
struct qcedev_sha_op_req {
struct buf_info data[QCEDEV_MAX_BUFFERS];
__u32 entries;
__u32 data_len;
__u8 digest[QCEDEV_MAX_SHA_DIGEST];
__u32 diglen;
__u8 *authkey;
__u32 authklen;
enum qcedev_sha_alg_enum alg;
};
/**
* struct qfips_verify_t - Holds data for FIPS Integrity test
* @kernel_size (IN): Size of kernel Image
* @kernel (IN): pointer to buffer containing the kernel Image
*/
struct qfips_verify_t {
unsigned int kernel_size;
void *kernel;
};
/**
* struct qcedev_map_buf_req - Holds the mapping request information
* fd (IN): Array of fds.
* num_fds (IN): Number of fds in fd[].
* fd_size (IN): Array of sizes corresponding to each fd in fd[].
* fd_offset (IN): Array of offset corresponding to each fd in fd[].
* vaddr (OUT): Array of mapped virtual address corresponding to
* each fd in fd[].
*/
struct qcedev_map_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
__u32 fd_size[QCEDEV_MAX_BUFFERS];
__u32 fd_offset[QCEDEV_MAX_BUFFERS];
__u64 buf_vaddr[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_unmap_buf_req - Holds the hashing request information
* fd (IN): Array of fds to unmap
* num_fds (IN): Number of fds in fd[].
*/
struct qcedev_unmap_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
};
struct file;
#define QCEDEV_IOC_MAGIC 0x87
#define QCEDEV_IOCTL_ENC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_DEC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_SHA_INIT_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_UPDATE_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_FINAL_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_GET_SHA_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_LOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 7)
#define QCEDEV_IOCTL_UNLOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 8)
#define QCEDEV_IOCTL_GET_CMAC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_MAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
#define QCEDEV_IOCTL_UNMAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
#endif /* _QCEDEV__H */

215
crypto-qti/linux/qcota.h Normal file
View File

@@ -0,0 +1,215 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_QCOTA_H
#define _UAPI_QCOTA_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define QCE_OTA_MAX_BEARER 31
#define OTA_KEY_SIZE 16 /* 128 bits of keys. */
enum qce_ota_dir_enum {
QCE_OTA_DIR_UPLINK = 0,
QCE_OTA_DIR_DOWNLINK = 1,
QCE_OTA_DIR_LAST
};
enum qce_ota_algo_enum {
QCE_OTA_ALGO_KASUMI = 0,
QCE_OTA_ALGO_SNOW3G = 1,
QCE_OTA_ALGO_LAST
};
/**
* struct qce_f8_req - qce f8 request
* @data_in: packets input data stream to be ciphered.
* If NULL, streaming mode operation.
* @data_out: ciphered packets output data.
* @data_len: length of data_in and data_out in bytes.
* @count_c: count-C, ciphering sequence number, 32 bit
* @bearer: 5 bit of radio bearer identifier.
* @ckey: 128 bits of confidentiality key,
* ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
* @direction: uplink or donwlink.
* @algorithm: Kasumi, or Snow3G.
*
* If data_in is NULL, the engine will run in a special mode called
* key stream mode. In this special mode, the engine will generate
* key stream output for the number of bytes specified in the
* data_len, based on the input parameters of direction, algorithm,
* ckey, bearer, and count_c. The data_len is restricted to
* the length of multiple of 16 bytes. Application can then take the
* output stream, do a exclusive or to the input data stream, and
* generate the final cipher data stream.
*/
struct qce_f8_req {
__u8 *data_in;
__u8 *data_out;
__u16 data_len;
__u32 count_c;
__u8 bearer;
__u8 ckey[OTA_KEY_SIZE];
enum qce_ota_dir_enum direction;
enum qce_ota_algo_enum algorithm;
};
/**
* struct qce_f8_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with uniform size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_start: ciphering starts offset within a packet.
*
* @cipher_size: number of bytes to be ciphered within a packet.
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: data_len indicates the length of a packet.
*
* @data_in: packets are concatenated together in a byte
* stream started at data_in.
*
* @data_out: The returned ciphered output for multiple
* packets.
* Each packet ciphered output are concatenated
* together into a byte stream started at data_out.
* Note, each ciphered packet output area from
* offset 0 to cipher_start-1, and from offset
* cipher_size to data_len -1 are remained
* unaltered from packet input area.
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered, and output to the
* data_out stream.
*
* Packet data are laid out contiguously in sequence in data_in,
* and data_out area. Every packet is identical size.
* If the PDU is not byte aligned, set the data_len value of
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes. Next packet starts on
* the next byte boundary.
*
* For each packet, data from offset 0 to cipher_start
* will be left unchanged and output to the data_out area.
* This area of the packet can be for the RLC header, which is not
* to be ciphered.
*
* The ciphering of a packet starts from offset cipher_start, for
* cipher_size bytes of data. Data starting from
* offset cipher_start + cipher_size to the end of packet will be left
* unchanged and output to the dataOut area.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*
*/
struct qce_f8_multi_pkt_req {
__u16 num_pkt;
__u16 cipher_start;
__u16 cipher_size;
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with variable size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_iov[]: array of iov of packets to be ciphered.
*
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: ignored.
*
* @data_in: ignored.
*
* @data_out: ignored.
*
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered.
*
* The i-th packet are defined in cipher_iov[i-1].
* The ciphering of i-th packet starts from offset 0 of the PDU specified
* by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
* If the PDU is not byte aligned, set the cipher_iov[i-1].size value
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes.
*
* Ciphering are done in place. That is, the ciphering
* input and output data are both in cipher_iov[i-1].addr for the i-th
* packet.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*/
#define MAX_NUM_V_MULTI_PKT 20
struct cipher_iov {
unsigned char *addr;
unsigned short size;
};
struct qce_f8_variable_multi_pkt_req {
unsigned short num_pkt;
struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f9_req - qce f9 request
* @message: message
* @msize: message size in bytes (include the last partial byte).
* @last_bits: valid bits in the last byte of message.
* @mac_i: 32 bit message authentication code, to be returned.
* @fresh: random 32 bit number, one per user.
* @count_i: 32 bit count-I integrity sequence number.
* @direction: uplink or donwlink.
* @ikey: 128 bits of integrity key,
* ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
* @algorithm: Kasumi, or Snow3G.
*/
struct qce_f9_req {
__u8 *message;
__u16 msize;
__u8 last_bits;
__u32 mac_i;
__u32 fresh;
__u32 count_i;
enum qce_ota_dir_enum direction;
__u8 ikey[OTA_KEY_SIZE];
enum qce_ota_algo_enum algorithm;
};
#define QCOTA_IOC_MAGIC 0x85
#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
struct qce_f8_variable_multi_pkt_req)
#endif /* _UAPI_QCOTA_H */

View File

@@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#define QCRYPTO_CTX_KEY_MASK 0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev);
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
int qcrypto_cipher_clear_flag(struct skcipher_request *req,
unsigned int flags);
int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
struct crypto_engine_entry {
u32 hw_instance;
u32 ce_device;
int shared;
};
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
struct crypto_engine_entry *arr);
int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
struct qcrypto_func_set {
int (*cipher_set)(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
int (*get_num_engines)(void);
void (*get_engine_list)(size_t num_engines,
struct crypto_engine_entry *arr);
};
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */

994
crypto-qti/ota_crypto.c Normal file
View File

@@ -0,0 +1,994 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* QTI Over the Air (OTA) Crypto driver
*
* Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/cache.h>
#include "linux/qcota.h"
#include "qce.h"
#include "qce_ota.h"
enum qce_ota_oper_enum {
QCE_OTA_F8_OPER = 0,
QCE_OTA_MPKT_F8_OPER = 1,
QCE_OTA_F9_OPER = 2,
QCE_OTA_VAR_MPKT_F8_OPER = 3,
QCE_OTA_OPER_LAST
};
struct ota_dev_control;
struct ota_async_req {
struct list_head rlist;
struct completion complete;
int err;
enum qce_ota_oper_enum op;
union {
struct qce_f9_req f9_req;
struct qce_f8_req f8_req;
struct qce_f8_multi_pkt_req f8_mp_req;
struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
} req;
unsigned int steps;
struct ota_qce_dev *pqce;
};
/*
* Register ourselves as a char device /dev/qcota0 to be able to access the ota
* from userspace.
*/
#define QCOTA_DEV "qcota0"
struct ota_dev_control {
/* char device */
struct cdev cdev;
int minor;
struct list_head ready_commands;
unsigned int magic;
struct list_head qce_dev;
spinlock_t lock;
struct mutex register_lock;
bool registered;
uint32_t total_units;
};
struct ota_qce_dev {
struct list_head qlist;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
struct ota_async_req *active_command;
struct tasklet_struct done_tasklet;
struct ota_dev_control *podev;
uint32_t unit;
u64 total_req;
u64 err_req;
};
#define OTA_MAGIC 0x4f544143
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static int qcota_open(struct inode *inode, struct file *file);
static int qcota_release(struct inode *inode, struct file *file);
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
static const struct file_operations qcota_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = qcota_ioctl,
.open = qcota_open,
.release = qcota_release,
};
static struct ota_dev_control qcota_dev = {
.magic = OTA_MAGIC,
};
static dev_t qcota_device_no;
static struct class *driver_class;
static struct device *class_dev;
#define DEBUG_MAX_FNAME 16
#define DEBUG_MAX_RW_BUF 1024
struct qcota_stat {
u64 f8_req;
u64 f8_mp_req;
u64 f8_v_mp_req;
u64 f9_req;
u64 f8_op_success;
u64 f8_op_fail;
u64 f8_mp_op_success;
u64 f8_mp_op_fail;
u64 f8_v_mp_op_success;
u64 f8_v_mp_op_fail;
u64 f9_op_success;
u64 f9_op_fail;
};
static struct qcota_stat _qcota_stat;
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcota;
static struct ota_dev_control *qcota_control(void)
{
return &qcota_dev;
}
static int qcota_open(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = qcota_control();
if (podev == NULL) {
pr_err("%s: no such device %d\n", __func__,
MINOR(inode->i_rdev));
return -ENOENT;
}
file->private_data = podev;
return 0;
}
static int qcota_release(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
file->private_data = NULL;
return 0;
}
static bool _next_v_mp_req(struct ota_async_req *areq)
{
unsigned char *p;
if (areq->err)
return false;
if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
return false;
p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
areq->req.f8_v_mp_req.qce_f8_req.data_len =
areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
areq->req.f8_v_mp_req.qce_f8_req.count_c++;
return true;
}
static void req_done(unsigned long data)
{
struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
struct ota_dev_control *podev = pqce->podev;
struct ota_async_req *areq;
unsigned long flags;
struct ota_async_req *new_req = NULL;
int ret = 0;
bool schedule = true;
spin_lock_irqsave(&podev->lock, flags);
areq = pqce->active_command;
if (unlikely(areq == NULL))
pr_err("ota_crypto: %s, no active request\n", __func__);
else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
if (_next_v_mp_req(areq)) {
/* execute next subcommand */
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (unlikely(ret)) {
areq->err = ret;
schedule = true;
spin_lock_irqsave(&podev->lock, flags);
} else {
areq = NULL;
schedule = false;
}
} else {
/* done with this variable mp req */
schedule = true;
}
}
while (schedule) {
if (!list_empty(&podev->ready_commands)) {
new_req = container_of(podev->ready_commands.next,
struct ota_async_req, rlist);
list_del(&new_req->rlist);
pqce->active_command = new_req;
spin_unlock_irqrestore(&podev->lock, flags);
if (new_req) {
new_req->err = 0;
/* start a new request */
ret = start_req(pqce, new_req);
}
if (unlikely(new_req && ret)) {
new_req->err = ret;
complete(&new_req->complete);
ret = 0;
new_req = NULL;
spin_lock_irqsave(&podev->lock, flags);
} else {
schedule = false;
}
} else {
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
schedule = false;
}
}
if (areq)
complete(&areq->complete);
}
static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
areq->req.f9_req.mac_i = *((uint32_t *)icv);
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else
areq->err = 0;
tasklet_schedule(&pqce->done_tasklet);
}
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else {
areq->err = 0;
}
tasklet_schedule(&pqce->done_tasklet);
}
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
{
struct qce_f9_req *pf9;
struct qce_f8_multi_pkt_req *p_mp_f8;
struct qce_f8_req *pf8;
int ret = 0;
/* command should be on the podev->active_command */
areq->pqce = pqce;
switch (areq->op) {
case QCE_OTA_F8_OPER:
pf8 = &areq->req.f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
case QCE_OTA_MPKT_F8_OPER:
p_mp_f8 = &areq->req.f8_mp_req;
ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
break;
case QCE_OTA_F9_OPER:
pf9 = &areq->req.f9_req;
ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
default:
ret = -ENOTSUPP;
break;
}
areq->err = ret;
pqce->total_req++;
if (ret)
pqce->err_req++;
return ret;
}
static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
{
/* do this function with spinlock set */
struct ota_qce_dev *p;
if (unlikely(list_empty(&podev->qce_dev))) {
pr_err("%s: no valid qce to schedule\n", __func__);
return NULL;
}
list_for_each_entry(p, &podev->qce_dev, qlist) {
if (p->active_command == NULL)
return p;
}
return NULL;
}
static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
{
unsigned long flags;
int ret = 0;
struct qcota_stat *pstat;
struct ota_qce_dev *pqce;
areq->err = 0;
spin_lock_irqsave(&podev->lock, flags);
pqce = schedule_qce(podev);
if (pqce) {
pqce->active_command = areq;
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (ret != 0) {
spin_lock_irqsave(&podev->lock, flags);
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
}
} else {
list_add_tail(&areq->rlist, &podev->ready_commands);
spin_unlock_irqrestore(&podev->lock, flags);
}
if (ret == 0)
wait_for_completion(&areq->complete);
pstat = &_qcota_stat;
switch (areq->op) {
case QCE_OTA_F8_OPER:
if (areq->err)
pstat->f8_op_fail++;
else
pstat->f8_op_success++;
break;
case QCE_OTA_MPKT_F8_OPER:
if (areq->err)
pstat->f8_mp_op_fail++;
else
pstat->f8_mp_op_success++;
break;
case QCE_OTA_F9_OPER:
if (areq->err)
pstat->f9_op_fail++;
else
pstat->f9_op_success++;
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
default:
if (areq->err)
pstat->f8_v_mp_op_fail++;
else
pstat->f8_v_mp_op_success++;
break;
}
return areq->err;
}
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct ota_dev_control *podev;
uint8_t *user_src;
uint8_t *user_dst;
uint8_t *k_buf = NULL;
struct ota_async_req areq;
uint32_t total, temp;
struct qcota_stat *pstat;
int i;
uint8_t *p = NULL;
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
return -ENOTTY;
init_completion(&areq.complete);
pstat = &_qcota_stat;
switch (cmd) {
case QCOTA_F9_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
user_src = areq.req.f9_req.message;
if (!access_ok(VERIFY_READ, (void __user *)user_src,
areq.req.f9_req.msize))
return -EFAULT;
if (areq.req.f9_req.msize == 0)
return 0;
k_buf = memdup_user((const void __user *)user_src,
areq.req.f9_req.msize);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f9_req.message = k_buf;
areq.op = QCE_OTA_F9_OPER;
pstat->f9_req++;
err = submit_req(&areq, podev);
areq.req.f9_req.message = user_src;
if (err == 0 && copy_to_user((void __user *)arg,
&areq.req.f9_req, sizeof(struct qce_f9_req))) {
err = -EFAULT;
}
kfree(k_buf);
break;
case QCOTA_F8_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
total = areq.req.f8_req.data_len;
user_src = areq.req.f8_req.data_in;
if (user_src != NULL) {
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
}
user_dst = areq.req.f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
/* k_buf returned from kmalloc should be cache line aligned */
if (user_src && copy_from_user(k_buf,
(void __user *)user_src, total)) {
kfree(k_buf);
return -EFAULT;
}
if (user_src)
areq.req.f8_req.data_in = k_buf;
else
areq.req.f8_req.data_in = NULL;
areq.req.f8_req.data_out = k_buf;
areq.op = QCE_OTA_F8_OPER;
pstat->f8_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
temp = areq.req.f8_mp_req.qce_f8_req.data_len;
if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
areq.req.f8_mp_req.cipher_size)
return -EINVAL;
total = (uint32_t) areq.req.f8_mp_req.num_pkt *
areq.req.f8_mp_req.qce_f8_req.data_len;
user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
/* k_buf should be cache line aligned */
k_buf = memdup_user((const void __user *)user_src, total);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
areq.op = QCE_OTA_MPKT_F8_OPER;
pstat->f8_mp_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_V_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
return -EINVAL;
for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
if (!access_ok(VERIFY_WRITE, (void __user *)
areq.req.f8_v_mp_req.cipher_iov[i].addr,
areq.req.f8_v_mp_req.cipher_iov[i].size))
return -EFAULT;
total += areq.req.f8_v_mp_req.cipher_iov[i].size;
total = ALIGN(total, L1_CACHE_BYTES);
}
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_from_user(p, (void __user *)user_src,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_len =
areq.req.f8_v_mp_req.cipher_iov[0].size;
areq.steps = 0;
areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
pstat->f8_v_mp_req++;
err = submit_req(&areq, podev);
if (err != 0) {
kfree(k_buf);
return err;
}
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_to_user(user_dst, p,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
kfree(k_buf);
break;
default:
return -ENOTTY;
}
return err;
}
static int qcota_probe(struct platform_device *pdev)
{
void *handle = NULL;
int rc = 0;
struct ota_dev_control *podev;
struct ce_hw_support ce_support;
struct ota_qce_dev *pqce;
unsigned long flags;
podev = &qcota_dev;
pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
if (!pqce)
return -ENOMEM;
rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d\n", rc);
return rc;
}
driver_class = class_create(THIS_MODULE, QCOTA_DEV);
if (IS_ERR(driver_class)) {
rc = -ENOMEM;
pr_err("class_create failed %d\n", rc);
goto exit_unreg_chrdev_region;
}
class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
QCOTA_DEV);
if (IS_ERR(class_dev)) {
pr_err("class_device_create failed %d\n", rc);
rc = -ENOMEM;
goto exit_destroy_class;
}
cdev_init(&podev->cdev, &qcota_fops);
podev->cdev.owner = THIS_MODULE;
rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
if (rc < 0) {
pr_err("cdev_add failed %d\n", rc);
goto exit_destroy_device;
}
podev->minor = 0;
pqce->podev = podev;
pqce->active_command = NULL;
tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
/* open qce */
handle = qce_open(pdev, &rc);
if (handle == NULL) {
pr_err("%s: device %s, can not open qce\n",
__func__, pdev->name);
goto exit_del_cdev;
}
if (qce_hw_support(handle, &ce_support) < 0 ||
!ce_support.ota) {
pr_err("%s: device %s, qce does not support ota capability\n",
__func__, pdev->name);
rc = -ENODEV;
goto err;
}
pqce->qce = handle;
pqce->pdev = pdev;
pqce->total_req = 0;
pqce->err_req = 0;
platform_set_drvdata(pdev, pqce);
mutex_lock(&podev->register_lock);
rc = 0;
if (!podev->registered) {
if (rc == 0) {
pqce->unit = podev->total_units;
podev->total_units++;
podev->registered = true;
}
} else {
pqce->unit = podev->total_units;
podev->total_units++;
}
mutex_unlock(&podev->register_lock);
if (rc) {
pr_err("ion: failed to register misc device.\n");
goto err;
}
spin_lock_irqsave(&podev->lock, flags);
list_add_tail(&pqce->qlist, &podev->qce_dev);
spin_unlock_irqrestore(&podev->lock, flags);
return 0;
err:
if (handle)
qce_close(handle);
platform_set_drvdata(pdev, NULL);
tasklet_kill(&pqce->done_tasklet);
exit_del_cdev:
cdev_del(&podev->cdev);
exit_destroy_device:
device_destroy(driver_class, qcota_device_no);
exit_destroy_class:
class_destroy(driver_class);
exit_unreg_chrdev_region:
unregister_chrdev_region(qcota_device_no, 1);
kfree(pqce);
return rc;
}
static int qcota_remove(struct platform_device *pdev)
{
struct ota_dev_control *podev;
struct ota_qce_dev *pqce;
unsigned long flags;
pqce = platform_get_drvdata(pdev);
if (!pqce)
return 0;
if (pqce->qce)
qce_close(pqce->qce);
podev = pqce->podev;
if (!podev)
goto ret;
spin_lock_irqsave(&podev->lock, flags);
list_del(&pqce->qlist);
spin_unlock_irqrestore(&podev->lock, flags);
mutex_lock(&podev->register_lock);
if (--podev->total_units == 0) {
cdev_del(&podev->cdev);
device_destroy(driver_class, qcota_device_no);
class_destroy(driver_class);
unregister_chrdev_region(qcota_device_no, 1);
podev->registered = false;
}
mutex_unlock(&podev->register_lock);
ret:
tasklet_kill(&pqce->done_tasklet);
kfree(pqce);
return 0;
}
static const struct of_device_id qcota_match[] = {
{ .compatible = "qcom,qcota",
},
{}
};
static struct platform_driver qcota_plat_driver = {
.probe = qcota_probe,
.remove = qcota_remove,
.driver = {
.name = "qcota",
.of_match_table = qcota_match,
},
};
static int _disp_stats(void)
{
struct qcota_stat *pstat;
int len = 0;
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
pstat = &_qcota_stat;
len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQTI OTA crypto accelerator Statistics:\n");
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 request : %llu\n",
pstat->f8_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation success : %llu\n",
pstat->f8_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation fail : %llu\n",
pstat->f8_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP request : %llu\n",
pstat->f8_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation success : %llu\n",
pstat->f8_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation fail : %llu\n",
pstat->f8_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP request : %llu\n",
pstat->f8_v_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation success: %llu\n",
pstat->f8_v_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation fail : %llu\n",
pstat->f8_v_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 request : %llu\n",
pstat->f9_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation success : %llu\n",
pstat->f9_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation fail : %llu\n",
pstat->f9_op_fail);
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req : %llu\n",
p->unit,
p->total_req
);
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req Error : %llu\n",
p->unit,
p->err_req
);
}
spin_unlock_irqrestore(&podev->lock, flags);
return len;
}
static ssize_t _debug_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int rc = -EINVAL;
int len;
len = _disp_stats();
if (len <= count)
rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
return rc;
}
static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
p->total_req = 0;
p->err_req = 0;
}
spin_unlock_irqrestore(&podev->lock, flags);
return count;
}
static const struct file_operations _debug_stats_ops = {
.open = simple_open,
.read = _debug_stats_read,
.write = _debug_stats_write,
};
static int _qcota_debug_init(void)
{
int rc;
char name[DEBUG_MAX_FNAME];
struct dentry *dent;
_debug_dent = debugfs_create_dir("qcota", NULL);
if (IS_ERR(_debug_dent)) {
pr_err("qcota debugfs_create_dir fail, error %ld\n",
PTR_ERR(_debug_dent));
return PTR_ERR(_debug_dent);
}
snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
_debug_qcota = 0;
dent = debugfs_create_file(name, 0644, _debug_dent,
&_debug_qcota, &_debug_stats_ops);
if (dent == NULL) {
pr_err("qcota debugfs_create_file fail, error %ld\n",
PTR_ERR(dent));
rc = PTR_ERR(dent);
goto err;
}
return 0;
err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
static int __init qcota_init(void)
{
int rc;
struct ota_dev_control *podev;
rc = _qcota_debug_init();
if (rc)
return rc;
podev = &qcota_dev;
INIT_LIST_HEAD(&podev->ready_commands);
INIT_LIST_HEAD(&podev->qce_dev);
spin_lock_init(&podev->lock);
mutex_init(&podev->register_lock);
podev->registered = false;
podev->total_units = 0;
return platform_driver_register(&qcota_plat_driver);
}
static void __exit qcota_exit(void)
{
debugfs_remove_recursive(_debug_dent);
platform_driver_unregister(&qcota_plat_driver);
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI Ota Crypto driver");
module_init(qcota_init);
module_exit(qcota_exit);

196
crypto-qti/qce.h Normal file
View File

@@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver API
*
* Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_H
#define __CRYPTO_MSM_QCE_H
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
#include <crypto/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
/* SHA digest size in bytes */
#define SHA256_DIGESTSIZE 32
#define SHA1_DIGESTSIZE 20
#define AES_CE_BLOCK_SIZE 16
/* key size in bytes */
#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */
#define SHA_HMAC_KEY_SIZE 64
#define DES_KEY_SIZE 8
#define TRIPLE_DES_KEY_SIZE 24
#define AES128_KEY_SIZE 16
#define AES192_KEY_SIZE 24
#define AES256_KEY_SIZE 32
#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE
/* iv length in bytes */
#define AES_IV_LENGTH 16
#define DES_IV_LENGTH 8
#define MAX_IV_LENGTH AES_IV_LENGTH
/* Maximum number of bytes per transfer */
#define QCE_MAX_OPER_DATA 0xFF00
/* Maximum Nonce bytes */
#define MAX_NONCE 16
/* Crypto clock control flags */
#define QCE_CLK_ENABLE_FIRST 1
#define QCE_BW_REQUEST_FIRST 2
#define QCE_CLK_DISABLE_FIRST 3
#define QCE_BW_REQUEST_RESET_FIRST 4
/* interconnect average and peak bw for crypto device */
#define CRYPTO_AVG_BW 393600
#define CRYPTO_PEAK_BW 393600
typedef void (*qce_comp_func_ptr_t)(void *areq,
unsigned char *icv, unsigned char *iv, int ret);
/* Cipher algorithms supported */
enum qce_cipher_alg_enum {
CIPHER_ALG_DES = 0,
CIPHER_ALG_3DES = 1,
CIPHER_ALG_AES = 2,
CIPHER_ALG_LAST
};
/* Hash and hmac algorithms supported */
enum qce_hash_alg_enum {
QCE_HASH_SHA1 = 0,
QCE_HASH_SHA256 = 1,
QCE_HASH_SHA1_HMAC = 2,
QCE_HASH_SHA256_HMAC = 3,
QCE_HASH_AES_CMAC = 4,
QCE_HASH_LAST
};
/* Cipher encryption/decryption operations */
enum qce_cipher_dir_enum {
QCE_ENCRYPT = 0,
QCE_DECRYPT = 1,
QCE_CIPHER_DIR_LAST
};
/* Cipher algorithms modes */
enum qce_cipher_mode_enum {
QCE_MODE_CBC = 0,
QCE_MODE_ECB = 1,
QCE_MODE_CTR = 2,
QCE_MODE_XTS = 3,
QCE_MODE_CCM = 4,
QCE_CIPHER_MODE_LAST
};
/* Cipher operation type */
enum qce_req_op_enum {
QCE_REQ_ABLK_CIPHER = 0,
QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
QCE_REQ_AEAD = 2,
QCE_REQ_LAST
};
/* Algorithms/features supported in CE HW engine */
struct ce_hw_support {
bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
bool cmac;
bool aes_key_192;
bool aes_xts;
bool aes_ccm;
bool ota;
bool aligned_only;
bool bam;
bool is_shared;
bool hw_key;
bool use_sw_aes_cbc_ecb_ctr_algo;
bool use_sw_aead_algo;
bool use_sw_aes_xts_algo;
bool use_sw_ahash_algo;
bool use_sw_hmac_algo;
bool use_sw_aes_ccm_algo;
bool clk_mgmt_sus_res;
bool req_bw_before_clk;
unsigned int ce_device;
unsigned int ce_hw_instance;
unsigned int max_request;
};
/* Sha operation parameters */
struct qce_sha_req {
qce_comp_func_ptr_t qce_cb; /* call back */
enum qce_hash_alg_enum alg; /* sha algorithm */
unsigned char *digest; /* sha digest */
struct scatterlist *src; /* pointer to scatter list entry */
uint32_t auth_data[4]; /* byte count */
unsigned char *authkey; /* auth key */
unsigned int authklen; /* auth key length */
bool first_blk; /* first block indicator */
bool last_blk; /* last block indicator */
unsigned int size; /* data length in bytes */
void *areq;
unsigned int flags;
};
struct qce_req {
enum qce_req_op_enum op; /* operation type */
qce_comp_func_ptr_t qce_cb; /* call back */
void *areq;
enum qce_cipher_alg_enum alg; /* cipher algorithms*/
enum qce_cipher_dir_enum dir; /* encryption? decryption? */
enum qce_cipher_mode_enum mode; /* algorithm mode */
enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
unsigned char *authkey; /* authentication key */
unsigned int authklen; /* authentication key kength */
unsigned int authsize; /* authentication key kength */
unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */
unsigned char *assoc; /* Ptr to formatted associated data */
unsigned int assoclen; /* Formatted associated data length */
struct scatterlist *asg; /* Formatted associated data sg */
unsigned char *enckey; /* cipher key */
unsigned int encklen; /* cipher key length */
unsigned char *iv; /* initialization vector */
unsigned int ivsize; /* initialization vector size*/
unsigned int cryptlen; /* data length */
unsigned int use_pmem; /* is source of data PMEM allocated? */
struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
unsigned int flags;
};
struct qce_pm_table {
int (*suspend)(void *handle);
int (*resume)(void *handle);
};
extern struct qce_pm_table qce_pm_table;
void *qce_open(struct platform_device *pdev, int *rc);
int qce_close(void *handle);
int qce_aead_req(void *handle, struct qce_req *req);
int qce_ablk_cipher_req(void *handle, struct qce_req *req);
int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
int qce_enable_clk(void *handle);
int qce_disable_clk(void *handle);
void qce_get_driver_stats(void *handle);
void qce_clear_driver_stats(void *handle);
void qce_dump_req(void *handle);
#endif /* __CRYPTO_MSM_QCE_H */

6198
crypto-qti/qce50.c Normal file

File diff suppressed because it is too large Load Diff

239
crypto-qti/qce50.h Normal file
View File

@@ -0,0 +1,239 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
#define _DRIVERS_CRYPTO_MSM_QCE50_H_
#include "linux/msm-sps.h"
/* MAX Data xfer block size between BAM and CE */
#define MAX_CE_BAM_BURST_SIZE 0x40
#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
#define GET_VIRT_ADDR(x) \
((uintptr_t)pce_dev->coh_vmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
#define GET_PHYS_ADDR(x) \
(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
#define CRYPTO_REG_SIZE 4
#define NUM_OF_CRYPTO_AUTH_IV_REG 16
#define NUM_OF_CRYPTO_CNTR_IV_REG 4
#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
#define CRYPTO_RESULT_DUMP_SIZE \
ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
QCEBAM_BURST_SIZE)
/* QCE max number of descriptor in a descriptor list */
#define QCE_MAX_NUM_DESC 128
#define SPS_MAX_PKT_SIZE (32 * 1024 - 64)
/* default bam ipc log level */
#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
/* State of consumer/producer Pipe */
enum qce_pipe_st_enum {
QCE_PIPE_STATE_IDLE = 0,
QCE_PIPE_STATE_IN_PROG = 1,
QCE_PIPE_STATE_COMP = 2,
QCE_PIPE_STATE_LAST
};
enum qce_xfer_type_enum {
QCE_XFER_HASHING,
QCE_XFER_CIPHERING,
QCE_XFER_AEAD,
QCE_XFER_F8,
QCE_XFER_F9,
QCE_XFER_TYPE_LAST
};
struct qce_sps_ep_conn_data {
struct sps_pipe *pipe;
struct sps_connect connect;
struct sps_register_event event;
};
/* CE Result DUMP format*/
struct ce_result_dump_format {
uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
__be32 status;
__be32 status2;
};
struct qce_cmdlist_info {
unsigned long cmdlist;
struct sps_command_element *crypto_cfg;
struct sps_command_element *encr_seg_cfg;
struct sps_command_element *encr_seg_size;
struct sps_command_element *encr_seg_start;
struct sps_command_element *encr_key;
struct sps_command_element *encr_xts_key;
struct sps_command_element *encr_cntr_iv;
struct sps_command_element *encr_ccm_cntr_iv;
struct sps_command_element *encr_mask;
struct sps_command_element *encr_xts_du_size;
struct sps_command_element *auth_seg_cfg;
struct sps_command_element *auth_seg_size;
struct sps_command_element *auth_seg_start;
struct sps_command_element *auth_key;
struct sps_command_element *auth_iv;
struct sps_command_element *auth_nonce_info;
struct sps_command_element *auth_bytecount;
struct sps_command_element *seg_size;
struct sps_command_element *go_proc;
ptrdiff_t size;
};
struct qce_cmdlistptr_ops {
struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
struct qce_cmdlist_info cipher_aes_128_ecb;
struct qce_cmdlist_info cipher_aes_256_ecb;
struct qce_cmdlist_info cipher_aes_128_xts;
struct qce_cmdlist_info cipher_aes_256_xts;
struct qce_cmdlist_info cipher_des_cbc;
struct qce_cmdlist_info cipher_des_ecb;
struct qce_cmdlist_info cipher_3des_cbc;
struct qce_cmdlist_info cipher_3des_ecb;
struct qce_cmdlist_info auth_sha1;
struct qce_cmdlist_info auth_sha256;
struct qce_cmdlist_info auth_sha1_hmac;
struct qce_cmdlist_info auth_sha256_hmac;
struct qce_cmdlist_info auth_aes_128_cmac;
struct qce_cmdlist_info auth_aes_256_cmac;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
struct qce_cmdlist_info aead_aes_128_ccm;
struct qce_cmdlist_info aead_aes_256_ccm;
struct qce_cmdlist_info cipher_null;
struct qce_cmdlist_info f8_kasumi;
struct qce_cmdlist_info f8_snow3g;
struct qce_cmdlist_info f9_kasumi;
struct qce_cmdlist_info f9_snow3g;
struct qce_cmdlist_info unlock_all_pipes;
};
struct qce_ce_cfg_reg_setting {
uint32_t crypto_cfg_be;
uint32_t crypto_cfg_le;
uint32_t encr_cfg_aes_cbc_128;
uint32_t encr_cfg_aes_cbc_256;
uint32_t encr_cfg_aes_ecb_128;
uint32_t encr_cfg_aes_ecb_256;
uint32_t encr_cfg_aes_xts_128;
uint32_t encr_cfg_aes_xts_256;
uint32_t encr_cfg_aes_ctr_128;
uint32_t encr_cfg_aes_ctr_256;
uint32_t encr_cfg_aes_ccm_128;
uint32_t encr_cfg_aes_ccm_256;
uint32_t encr_cfg_des_cbc;
uint32_t encr_cfg_des_ecb;
uint32_t encr_cfg_3des_cbc;
uint32_t encr_cfg_3des_ecb;
uint32_t encr_cfg_kasumi;
uint32_t encr_cfg_snow3g;
uint32_t auth_cfg_cmac_128;
uint32_t auth_cfg_cmac_256;
uint32_t auth_cfg_sha1;
uint32_t auth_cfg_sha256;
uint32_t auth_cfg_hmac_sha1;
uint32_t auth_cfg_hmac_sha256;
uint32_t auth_cfg_aes_ccm_128;
uint32_t auth_cfg_aes_ccm_256;
uint32_t auth_cfg_aead_sha1_hmac;
uint32_t auth_cfg_aead_sha256_hmac;
uint32_t auth_cfg_kasumi;
uint32_t auth_cfg_snow3g;
};
struct ce_bam_info {
uint32_t bam_irq;
uint32_t bam_mem;
void __iomem *bam_iobase;
uint32_t ce_device;
uint32_t ce_hw_instance;
uint32_t bam_ee;
unsigned int pipe_pair_index;
unsigned int src_pipe_index;
unsigned int dest_pipe_index;
unsigned long bam_handle;
int ce_burst_size;
uint32_t minor_version;
struct qce_sps_ep_conn_data producer;
struct qce_sps_ep_conn_data consumer;
};
/* SPS data structure with buffers, commandlists & commmand pointer lists */
struct ce_sps_data {
enum qce_pipe_st_enum producer_state; /* Producer pipe state */
int consumer_status; /* consumer pipe status */
int producer_status; /* producer pipe status */
struct sps_transfer in_transfer;
struct sps_transfer out_transfer;
struct qce_cmdlistptr_ops cmdlistptr;
uint32_t result_dump; /* reuslt dump virtual address */
uint32_t result_dump_null;
uint32_t result_dump_phy; /* result dump physical address (32 bits) */
uint32_t result_dump_null_phy;
uint32_t ignore_buffer; /* ignore buffer virtual address */
struct ce_result_dump_format *result; /* ponter to result dump */
struct ce_result_dump_format *result_null;
};
struct ce_request_info {
atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
void *user;
void *areq;
int assoc_nents;
struct scatterlist *asg; /* Formatted associated data sg */
int src_nents;
int dst_nents;
dma_addr_t phy_iv_in;
unsigned char dec_iv[16];
int dir;
enum qce_cipher_mode_enum mode;
dma_addr_t phy_ota_src;
dma_addr_t phy_ota_dst;
unsigned int ota_size;
unsigned int req_len;
};
struct qce_driver_stats {
int no_of_timeouts;
int no_of_dummy_reqs;
int current_mode;
int outstanding_reqs;
};
#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */

22
crypto-qti/qce_ota.h Normal file
View File

@@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver OTA API
*
* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_OTA_H
#define __CRYPTO_MSM_QCE_OTA_H
#include <linux/platform_device.h>
#include "linux/qcota.h"
int qce_f8_req(void *handle, struct qce_f8_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f9_req(void *handle, struct qce_f9_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
#endif /* __CRYPTO_MSM_QCE_OTA_H */

2330
crypto-qti/qcedev.c Normal file

File diff suppressed because it is too large Load Diff

440
crypto-qti/qcedev_smmu.c Normal file
View File

@@ -0,0 +1,440 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/qcom-dma-mapping.h>
#include <linux/list.h>
#include "linux/qcedev.h"
#include "qcedevi.h"
#include "qcedev_smmu.h"
#include "soc/qcom/secure_buffer.h"
#include <linux/mem-buf.h>
static int qcedev_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
if (!dev || !cb) {
pr_err("%s err: invalid input params\n", __func__);
return -EINVAL;
}
cb->dev = dev;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev,
sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
return 0;
}
int qcedev_parse_context_bank(struct platform_device *pdev)
{
struct qcedev_control *podev;
struct context_bank_info *cb = NULL;
struct device_node *np = NULL;
int rc = 0;
if (!pdev) {
pr_err("%s err: invalid platform devices\n", __func__);
return -EINVAL;
}
if (!pdev->dev.parent) {
pr_err("%s err: failed to find a parent for %s\n",
__func__, dev_name(&pdev->dev));
return -EINVAL;
}
podev = dev_get_drvdata(pdev->dev.parent);
np = pdev->dev.of_node;
cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
pr_err("%s ERROR = Failed to allocate cb\n", __func__);
return -ENOMEM;
}
INIT_LIST_HEAD(&cb->list);
list_add_tail(&cb->list, &podev->context_banks);
rc = of_property_read_string(np, "label", &cb->name);
if (rc)
pr_debug("%s ERROR = Unable to read label\n", __func__);
cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
rc = qcedev_setup_context_bank(cb, &pdev->dev);
if (rc) {
pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
goto err_setup_cb;
}
return 0;
err_setup_cb:
list_del(&cb->list);
devm_kfree(&pdev->dev, cb);
return rc;
}
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
{
struct qcedev_mem_client *mem_client = NULL;
if (mtype != MEM_ION) {
pr_err("%s: err: Mem type not supported\n", __func__);
goto err;
}
mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
if (!mem_client)
goto err;
mem_client->mtype = mtype;
return mem_client;
err:
return NULL;
}
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
{
kfree(mem_client);
}
static bool is_iommu_present(struct qcedev_handle *qce_hndl)
{
return !list_empty(&qce_hndl->cntl->context_banks);
}
static struct context_bank_info *get_context_bank(
struct qcedev_handle *qce_hndl, bool is_secure)
{
struct qcedev_control *podev = qce_hndl->cntl;
struct context_bank_info *cb = NULL, *match = NULL;
list_for_each_entry(cb, &podev->context_banks, list) {
if (cb->is_secure == is_secure) {
match = cb;
break;
}
}
return match;
}
static int ion_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
int rc = 0;
struct dma_buf *buf = NULL;
struct dma_buf_attachment *attach = NULL;
struct sg_table *table = NULL;
struct context_bank_info *cb = NULL;
buf = dma_buf_get(fd);
if (IS_ERR_OR_NULL(buf))
return -EINVAL;
if (is_iommu_present(qce_hndl)) {
cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf));
if (!cb) {
pr_err("%s: err: failed to get context bank info\n",
__func__);
rc = -EIO;
goto map_err;
}
/* Prepare a dma buf for dma on the given device */
attach = dma_buf_attach(buf, cb->dev);
if (IS_ERR_OR_NULL(attach)) {
rc = PTR_ERR(attach) ?: -ENOMEM;
pr_err("%s: err: failed to attach dmabuf\n", __func__);
goto map_err;
}
/* Get the scatterlist for the given attachment */
attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(table)) {
rc = PTR_ERR(table) ?: -ENOMEM;
pr_err("%s: err: failed to map table\n", __func__);
goto map_table_err;
}
if (table->sgl) {
binfo->ion_buf.iova = sg_dma_address(table->sgl);
binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
if (binfo->ion_buf.mapped_buf_size < fd_size) {
pr_err("%s: err: mapping failed, size mismatch\n",
__func__);
rc = -ENOMEM;
goto map_sg_err;
}
} else {
pr_err("%s: err: sg list is NULL\n", __func__);
rc = -ENOMEM;
goto map_sg_err;
}
binfo->ion_buf.mapping_info.dev = cb->dev;
binfo->ion_buf.mapping_info.mapping = cb->mapping;
binfo->ion_buf.mapping_info.table = table;
binfo->ion_buf.mapping_info.attach = attach;
binfo->ion_buf.mapping_info.buf = buf;
binfo->ion_buf.ion_fd = fd;
} else {
pr_err("%s: err: smmu not enabled\n", __func__);
rc = -EIO;
goto map_err;
}
return 0;
map_sg_err:
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
map_table_err:
dma_buf_detach(buf, attach);
map_err:
dma_buf_put(buf);
return rc;
}
static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_reg_buf_info *binfo)
{
struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
if (is_iommu_present(qce_hndl)) {
dma_buf_unmap_attachment(mapping_info->attach,
mapping_info->table, DMA_BIDIRECTIONAL);
dma_buf_detach(mapping_info->buf, mapping_info->attach);
dma_buf_put(mapping_info->buf);
}
return 0;
}
static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to map buffer\n", __func__);
return rc;
}
static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client,
struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_unmap_buffer(qce_hndl, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to unmap buffer\n", __func__);
return rc;
}
int qcedev_check_and_map_buffer(void *handle,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr)
{
bool found = false;
struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
int rc = 0;
unsigned long mapped_size = 0;
if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is already mapped */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
if (temp->ion_buf.ion_fd == fd) {
found = true;
*vaddr = temp->ion_buf.iova;
mapped_size = temp->ion_buf.mapped_buf_size;
atomic_inc(&temp->ref_count);
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
/* If buffer fd is not mapped then create a fresh mapping */
if (!found) {
pr_debug("%s: info: ion fd not registered with driver\n",
__func__);
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
pr_err("%s: err: failed to allocate binfo\n",
__func__);
rc = -ENOMEM;
goto error;
}
rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
fd_size, binfo);
if (rc) {
pr_err("%s: err: failed to map fd (%d) error = %d\n",
__func__, fd, rc);
goto error;
}
*vaddr = binfo->ion_buf.iova;
mapped_size = binfo->ion_buf.mapped_buf_size;
atomic_inc(&binfo->ref_count);
/* Add buffer mapping information to regd buffer list */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
/* Make sure the offset is within the mapped range */
if (offset >= mapped_size) {
pr_err(
"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
__func__, offset, mapped_size, fd);
rc = -ERANGE;
goto unmap;
}
/* return the mapped virtual address adjusted by offset */
*vaddr += offset;
return 0;
unmap:
if (!found)
qcedev_unmap_buffer(handle, mem_client, binfo);
error:
kfree(binfo);
return rc;
}
int qcedev_check_and_unmap_buffer(void *handle, int fd)
{
struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
bool found = false;
if (!handle || fd < 0) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is mapped and present in the regd list. */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry_safe(binfo, dummy,
&qce_hndl->registeredbufs.list, list) {
if (binfo->ion_buf.ion_fd == fd) {
found = true;
atomic_dec(&binfo->ref_count);
/* Unmap only if there are no more references */
if (atomic_read(&binfo->ref_count) == 0) {
qcedev_unmap_buffer(qce_hndl,
mem_client, binfo);
list_del(&binfo->list);
kfree(binfo);
}
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
if (!found) {
pr_err("%s: err: calling unmap on unknown fd %d\n",
__func__, fd);
return -EINVAL;
}
return 0;
}
int qcedev_unmap_all_buffers(void *handle)
{
struct qcedev_reg_buf_info *binfo = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
struct list_head *pos;
if (!handle) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
mutex_lock(&qce_hndl->registeredbufs.lock);
while (!list_empty(&qce_hndl->registeredbufs.list)) {
pos = qce_hndl->registeredbufs.list.next;
binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
if (binfo)
qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
list_del(pos);
kfree(binfo);
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
return 0;
}

82
crypto-qti/qcedev_smmu.h Normal file
View File

@@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_PARSE_H_
#define _DRIVERS_CRYPTO_PARSE_H_
#include <linux/dma-iommu.h>
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/iommu.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/msm_ion.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/types.h>
struct context_bank_info {
struct list_head list;
const char *name;
u32 buffer_type;
u32 start_addr;
u32 size;
bool is_secure;
struct device *dev;
struct dma_iommu_mapping *mapping;
};
enum qcedev_mem_type {
MEM_ION,
};
struct qcedev_mem_client {
enum qcedev_mem_type mtype;
};
struct dma_mapping_info {
struct device *dev;
struct dma_iommu_mapping *mapping;
struct sg_table *table;
struct dma_buf_attachment *attach;
struct dma_buf *buf;
};
struct qcedev_ion_buf_info {
struct dma_mapping_info mapping_info;
dma_addr_t iova;
unsigned long mapped_buf_size;
int ion_fd;
};
struct qcedev_reg_buf_info {
struct list_head list;
union {
struct qcedev_ion_buf_info ion_buf;
};
atomic_t ref_count;
};
struct qcedev_buffer_list {
struct list_head list;
struct mutex lock;
};
int qcedev_parse_context_bank(struct platform_device *pdev);
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype);
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client);
int qcedev_check_and_map_buffer(void *qce_hndl,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr);
int qcedev_check_and_unmap_buffer(void *handle, int fd);
int qcedev_unmap_all_buffers(void *handle);
extern struct qcedev_reg_buf_info *global_binfo_in;
extern struct qcedev_reg_buf_info *global_binfo_out;
extern struct qcedev_reg_buf_info *global_binfo_res;
#endif

126
crypto-qti/qcedevi.h Normal file
View File

@@ -0,0 +1,126 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI crypto Driver
*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCEDEVI_H
#define __CRYPTO_MSM_QCEDEVI_H
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <crypto/hash.h>
#include "linux/platform_data/qcom_crypto_device.h"
#include "linux/fips_status.h"
#include "qce.h"
#include "qcedev_smmu.h"
#define CACHE_LINE_SIZE 32
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
enum qcedev_crypto_oper_type {
QCEDEV_CRYPTO_OPER_CIPHER = 0,
QCEDEV_CRYPTO_OPER_SHA = 1,
QCEDEV_CRYPTO_OPER_LAST
};
struct qcedev_handle;
struct qcedev_cipher_req {
struct skcipher_request creq;
void *cookie;
};
struct qcedev_sha_req {
struct ahash_request sreq;
void *cookie;
};
struct qcedev_sha_ctxt {
uint32_t auth_data[4];
uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
uint32_t diglen;
uint8_t trailing_buf[64];
uint32_t trailing_buf_len;
uint8_t first_blk;
uint8_t last_blk;
uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
bool init_done;
};
struct qcedev_async_req {
struct list_head list;
struct completion complete;
enum qcedev_crypto_oper_type op_type;
union {
struct qcedev_cipher_op_req cipher_op_req;
struct qcedev_sha_op_req sha_op_req;
};
union {
struct qcedev_cipher_req cipher_req;
struct qcedev_sha_req sha_req;
};
struct qcedev_handle *handle;
int err;
};
/**********************************************************************
* Register ourselves as a char device to be able to access the dev driver
* from userspace.
*/
#define QCEDEV_DEV "qce"
struct qcedev_control {
/* CE features supported by platform */
struct msm_ce_hw_support platform_support;
uint32_t ce_lock_count;
uint32_t high_bw_req_count;
/* CE features/algorithms supported by HW engine*/
struct ce_hw_support ce_support;
/* replaced msm_bus with interconnect path */
struct icc_path *icc_path;
/* char device */
struct cdev cdev;
int minor;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
unsigned int magic;
struct list_head ready_commands;
struct qcedev_async_req *active_command;
spinlock_t lock;
struct tasklet_struct done_tasklet;
struct list_head context_banks;
struct qcedev_mem_client *mem_client;
};
struct qcedev_handle {
/* qcedev control handle */
struct qcedev_control *cntl;
/* qce internal sha context*/
struct qcedev_sha_ctxt sha_ctxt;
/* qcedev mapped buffer list */
struct qcedev_buffer_list registeredbufs;
};
void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
unsigned char *iv, int ret);
void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
unsigned char *authdata, int ret);
#endif /* __CRYPTO_MSM_QCEDEVI_H */

5495
crypto-qti/qcrypto.c Normal file

File diff suppressed because it is too large Load Diff

521
crypto-qti/qcryptohw_50.h Normal file
View File

@@ -0,0 +1,521 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C
#define CRYPTO_BAM_CD_ENABLE 27
#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE)
#define QCE_AUTH_REG_BYTE_COUNT 4
#define CRYPTO_VERSION_REG 0x1A000
#define CRYPTO_DATA_IN0_REG 0x1A010
#define CRYPTO_DATA_IN1_REG 0x1A014
#define CRYPTO_DATA_IN2_REG 0x1A018
#define CRYPTO_DATA_IN3_REG 0x1A01C
#define CRYPTO_DATA_OUT0_REG 0x1A020
#define CRYPTO_DATA_OUT1_REG 0x1A024
#define CRYPTO_DATA_OUT2_REG 0x1A028
#define CRYPTO_DATA_OUT3_REG 0x1A02C
#define CRYPTO_STATUS_REG 0x1A100
#define CRYPTO_STATUS2_REG 0x1A104
#define CRYPTO_ENGINES_AVAIL 0x1A108
#define CRYPTO_FIFO_SIZES_REG 0x1A10C
#define CRYPTO_SEG_SIZE_REG 0x1A110
#define CRYPTO_GOPROC_REG 0x1A120
#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
#define CRYPTO_ENCR_SEG_START_REG 0x1A208
#define CRYPTO_ENCR_KEY0_REG 0x1D000
#define CRYPTO_ENCR_KEY1_REG 0x1D004
#define CRYPTO_ENCR_KEY2_REG 0x1D008
#define CRYPTO_ENCR_KEY3_REG 0x1D00C
#define CRYPTO_ENCR_KEY4_REG 0x1D010
#define CRYPTO_ENCR_KEY5_REG 0x1D014
#define CRYPTO_ENCR_KEY6_REG 0x1D018
#define CRYPTO_ENCR_KEY7_REG 0x1D01C
#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000
#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004
#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008
#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C
#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010
#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014
#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018
#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C
#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020
#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024
#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028
#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C
#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030
#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034
#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038
#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C
#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040
#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044
#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048
#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C
#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050
#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054
#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058
#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C
#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060
#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064
#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068
#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C
#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070
#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074
#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078
#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C
#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200
#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204
#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208
#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C
#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210
#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214
#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218
#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C
#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220
#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224
#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228
#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C
#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230
#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234
#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238
#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C
#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240
#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244
#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248
#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C
#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250
#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254
#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258
#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C
#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260
#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264
#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268
#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C
#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270
#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274
#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278
#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C
#define CRYPTO_CNTR0_IV0_REG 0x1A20C
#define CRYPTO_CNTR1_IV1_REG 0x1A210
#define CRYPTO_CNTR2_IV2_REG 0x1A214
#define CRYPTO_CNTR3_IV3_REG 0x1A218
#define CRYPTO_CNTR_MASK_REG0 0x1A23C
#define CRYPTO_CNTR_MASK_REG1 0x1A238
#define CRYPTO_CNTR_MASK_REG2 0x1A234
#define CRYPTO_CNTR_MASK_REG 0x1A21C
#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230
#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
#define CRYPTO_AUTH_SEG_START_REG 0x1A308
#define CRYPTO_AUTH_KEY0_REG 0x1D040
#define CRYPTO_AUTH_KEY1_REG 0x1D044
#define CRYPTO_AUTH_KEY2_REG 0x1D048
#define CRYPTO_AUTH_KEY3_REG 0x1D04C
#define CRYPTO_AUTH_KEY4_REG 0x1D050
#define CRYPTO_AUTH_KEY5_REG 0x1D054
#define CRYPTO_AUTH_KEY6_REG 0x1D058
#define CRYPTO_AUTH_KEY7_REG 0x1D05C
#define CRYPTO_AUTH_KEY8_REG 0x1D060
#define CRYPTO_AUTH_KEY9_REG 0x1D064
#define CRYPTO_AUTH_KEY10_REG 0x1D068
#define CRYPTO_AUTH_KEY11_REG 0x1D06C
#define CRYPTO_AUTH_KEY12_REG 0x1D070
#define CRYPTO_AUTH_KEY13_REG 0x1D074
#define CRYPTO_AUTH_KEY14_REG 0x1D078
#define CRYPTO_AUTH_KEY15_REG 0x1D07C
#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880
#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884
#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888
#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C
#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890
#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894
#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898
#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C
#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0
#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4
#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8
#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC
#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0
#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4
#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8
#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC
#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900
#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904
#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908
#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C
#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910
#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914
#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918
#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C
#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920
#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924
#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928
#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C
#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930
#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934
#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938
#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C
#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980
#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984
#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988
#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C
#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990
#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994
#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998
#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C
#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0
#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4
#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8
#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC
#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0
#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4
#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8
#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC
#define CRYPTO_AUTH_IV0_REG 0x1A310
#define CRYPTO_AUTH_IV1_REG 0x1A314
#define CRYPTO_AUTH_IV2_REG 0x1A318
#define CRYPTO_AUTH_IV3_REG 0x1A31C
#define CRYPTO_AUTH_IV4_REG 0x1A320
#define CRYPTO_AUTH_IV5_REG 0x1A324
#define CRYPTO_AUTH_IV6_REG 0x1A328
#define CRYPTO_AUTH_IV7_REG 0x1A32C
#define CRYPTO_AUTH_IV8_REG 0x1A330
#define CRYPTO_AUTH_IV9_REG 0x1A334
#define CRYPTO_AUTH_IV10_REG 0x1A338
#define CRYPTO_AUTH_IV11_REG 0x1A33C
#define CRYPTO_AUTH_IV12_REG 0x1A340
#define CRYPTO_AUTH_IV13_REG 0x1A344
#define CRYPTO_AUTH_IV14_REG 0x1A348
#define CRYPTO_AUTH_IV15_REG 0x1A34C
#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
#define CRYPTO_CONFIG_REG 0x1A400
#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
#define CRYPTO_DEBUG_REG 0x1AF04
/* Register bits */
#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
/* status reg */
#define CRYPTO_MAC_FAILED 31
#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
#define CRYPTO_HSD_ERR 20
#define CRYPTO_ACCESS_VIOL 19
#define CRYPTO_PIPE_ACTIVE_ERR 18
#define CRYPTO_CFG_CHNG_ERR 17
#define CRYPTO_DOUT_ERR 16
#define CRYPTO_DIN_ERR 15
#define CRYPTO_AXI_ERR 14
#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
#define CRYPTO_ENCR_BUSY 9
#define CRYPTO_AUTH_BUSY 8
#define CRYPTO_DOUT_INTR 7
#define CRYPTO_DIN_INTR 6
#define CRYPTO_OP_DONE_INTR 5
#define CRYPTO_ERR_INTR 4
#define CRYPTO_DOUT_RDY 3
#define CRYPTO_DIN_RDY 2
#define CRYPTO_OPERATION_DONE 1
#define CRYPTO_SW_ERR 0
/* status2 reg */
#define CRYPTO_AXI_EXTRA 1
#define CRYPTO_LOCKED 2
/* config reg */
#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
#define CRYPTO_ENUM_1_QUEUED_REQS 0
#define CRYPTO_ENUM_2_QUEUED_REQS 1
#define CRYPTO_ENUM_3_QUEUED_REQS 2
#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
#define CRYPTO_LITTLE_ENDIAN_MODE 9
#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE)
#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
#define CRYPTO_HIGH_SPD_EN_N 4
#define CRYPTO_MASK_DOUT_INTR 3
#define CRYPTO_MASK_DIN_INTR 2
#define CRYPTO_MASK_OP_DONE_INTR 1
#define CRYPTO_MASK_ERR_INTR 0
/* auth_seg_cfg reg */
#define CRYPTO_COMP_EXP_MAC 24
#define CRYPTO_COMP_EXP_MAC_DISABLED 0
#define CRYPTO_COMP_EXP_MAC_ENABLED 1
#define CRYPTO_F9_DIRECTION 23
#define CRYPTO_F9_DIRECTION_UPLINK 0
#define CRYPTO_F9_DIRECTION_DOWNLINK 1
#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
#define CRYPTO_USE_PIPE_KEY_AUTH 19
#define CRYPTO_USE_HW_KEY_AUTH 18
#define CRYPTO_FIRST 17
#define CRYPTO_LAST 16
#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/
#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
#define CRYPTO_AUTH_POS_BEFORE 0
#define CRYPTO_AUTH_POS_AFTER 1
#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
#define CRYPTO_AUTH_SIZE_SHA1 0
#define CRYPTO_AUTH_SIZE_SHA256 1
#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
#define CRYPTO_AUTH_MODE_HASH 0
#define CRYPTO_AUTH_MODE_HMAC 1
#define CRYPTO_AUTH_MODE_CCM 0
#define CRYPTO_AUTH_MODE_CMAC 1
#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
#define CRYPTO_AUTH_KEY_SZ_AES128 0
#define CRYPTO_AUTH_KEY_SZ_AES256 2
#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
#define CRYPTO_AUTH_ALG_MASK 7
#define CRYPTO_AUTH_ALG_NONE 0
#define CRYPTO_AUTH_ALG_SHA 1
#define CRYPTO_AUTH_ALG_AES 2
#define CRYPTO_AUTH_ALG_KASUMI 3
#define CRYPTO_AUTH_ALG_SNOW3G 4
#define CRYPTO_AUTH_ALG_ZUC 5
/* encr_xts_du_size reg */
#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
/* encr_seg_cfg reg */
#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
#define CRYPTO_F8_KEYSTREAM_DISABLED 0
#define CRYPTO_F8_KEYSTREAM_ENABLED 1
#define CRYPTO_F8_DIRECTION 16 /* bit */
#define CRYPTO_F8_DIRECTION_UPLINK 0
#define CRYPTO_F8_DIRECTION_DOWNLINK 1
#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
#define CRYPTO_USE_KEY_REGISTERS 0
#define CRYPTO_USE_HW_KEY_ENCR 14
#define CRYPTO_USE_KEY_REG 0
#define CRYPTO_USE_HW_KEY 1
#define CRYPTO_LAST_CCM 13
#define CRYPTO_LAST_CCM_XFR 1
#define CRYPTO_INTERM_CCM_XFR 0
#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
#define CRYPTO_CNTR_ALG_NIST 0
#define CRYPTO_ENCODE 10
#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
/* only valid when AES */
#define CRYPTO_ENCR_MODE_ECB 0
#define CRYPTO_ENCR_MODE_CBC 1
#define CRYPTO_ENCR_MODE_CTR 2
#define CRYPTO_ENCR_MODE_XTS 3
#define CRYPTO_ENCR_MODE_CCM 4
#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
#define CRYPTO_ENCR_KEY_SZ_DES 0
#define CRYPTO_ENCR_KEY_SZ_3DES 1
#define CRYPTO_ENCR_KEY_SZ_AES128 0
#define CRYPTO_ENCR_KEY_SZ_AES256 2
#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
#define CRYPTO_ENCR_ALG_NONE 0
#define CRYPTO_ENCR_ALG_DES 1
#define CRYPTO_ENCR_ALG_AES 2
#define CRYPTO_ENCR_ALG_KASUMI 4
#define CRYPTO_ENCR_ALG_SNOW_3G 5
#define CRYPTO_ENCR_ALG_ZUC 6
/* goproc reg */
#define CRYPTO_GO 0
#define CRYPTO_CLR_CNTXT 1
#define CRYPTO_RESULTS_DUMP 2
/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
/* F9 definition of CRYPTO_AUTH_IV4 REG */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
(0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
/* engines_avail */
#define CRYPTO_ENCR_AES_SEL 0
#define CRYPTO_DES_SEL 1
#define CRYPTO_ENCR_SNOW3G_SEL 2
#define CRYPTO_ENCR_KASUMI_SEL 3
#define CRYPTO_SHA_SEL 4
#define CRYPTO_SHA512_SEL 5
#define CRYPTO_AUTH_AES_SEL 6
#define CRYPTO_AUTH_SNOW3G_SEL 7
#define CRYPTO_AUTH_KASUMI_SEL 8
#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */
#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */
#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */
#define CRYPTO_ENCR_ZUC_SEL 26
#define CRYPTO_AUTH_ZUC_SEL 27
#define CRYPTO_ZUC_ENABLE 28
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */

View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_CRYPTO_DEVICE__H
#define __QCOM_CRYPTO_DEVICE__H
#include <linux/types.h>
struct msm_ce_hw_support {
uint32_t ce_shared;
uint32_t shared_ce_resource;
uint32_t hw_key_support;
uint32_t sha_hmac;
};
#endif /* __QCOM_CRYPTO_DEVICE__H */

289
linux/qcedev.h Normal file
View File

@@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef _QCEDEV__H
#define _QCEDEV__H
#include <linux/types.h>
#include <linux/ioctl.h>
#include "fips_status.h"
#define QCEDEV_MAX_SHA_BLOCK_SIZE 64
#define QCEDEV_MAX_BEARER 31
#define QCEDEV_MAX_KEY_SIZE 64
#define QCEDEV_MAX_IV_SIZE 32
#define QCEDEV_MAX_BUFFERS 16
#define QCEDEV_MAX_SHA_DIGEST 32
#define QCEDEV_USE_PMEM 1
#define QCEDEV_NO_PMEM 0
#define QCEDEV_AES_KEY_128 16
#define QCEDEV_AES_KEY_192 24
#define QCEDEV_AES_KEY_256 32
/**
*qcedev_oper_enum: Operation types
* @QCEDEV_OPER_ENC: Encrypt
* @QCEDEV_OPER_DEC: Decrypt
* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
* user. Key already set by an external processor.
* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
* user. Key already set by an external processor.
*/
enum qcedev_oper_enum {
QCEDEV_OPER_DEC = 0,
QCEDEV_OPER_ENC = 1,
QCEDEV_OPER_DEC_NO_KEY = 2,
QCEDEV_OPER_ENC_NO_KEY = 3,
QCEDEV_OPER_LAST
};
/**
*qcedev_oper_enum: Cipher algorithm types
* @QCEDEV_ALG_DES: DES
* @QCEDEV_ALG_3DES: 3DES
* @QCEDEV_ALG_AES: AES
*/
enum qcedev_cipher_alg_enum {
QCEDEV_ALG_DES = 0,
QCEDEV_ALG_3DES = 1,
QCEDEV_ALG_AES = 2,
QCEDEV_ALG_LAST
};
/**
*qcedev_cipher_mode_enum : AES mode
* @QCEDEV_AES_MODE_CBC: CBC
* @QCEDEV_AES_MODE_ECB: ECB
* @QCEDEV_AES_MODE_CTR: CTR
* @QCEDEV_AES_MODE_XTS: XTS
* @QCEDEV_AES_MODE_CCM: CCM
* @QCEDEV_DES_MODE_CBC: CBC
* @QCEDEV_DES_MODE_ECB: ECB
*/
enum qcedev_cipher_mode_enum {
QCEDEV_AES_MODE_CBC = 0,
QCEDEV_AES_MODE_ECB = 1,
QCEDEV_AES_MODE_CTR = 2,
QCEDEV_AES_MODE_XTS = 3,
QCEDEV_AES_MODE_CCM = 4,
QCEDEV_DES_MODE_CBC = 5,
QCEDEV_DES_MODE_ECB = 6,
QCEDEV_AES_DES_MODE_LAST
};
/**
*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
* @QCEDEV_ALG_AES_CMAC: Configurable MAC size
*/
enum qcedev_sha_alg_enum {
QCEDEV_ALG_SHA1 = 0,
QCEDEV_ALG_SHA256 = 1,
QCEDEV_ALG_SHA1_HMAC = 2,
QCEDEV_ALG_SHA256_HMAC = 3,
QCEDEV_ALG_AES_CMAC = 4,
QCEDEV_ALG_SHA_ALG_LAST
};
/**
* struct buf_info - Buffer information
* @offset: Offset from the base address of the buffer
* (Used when buffer is allocated using PMEM)
* @vaddr: Virtual buffer address pointer
* @len: Size of the buffer
*/
struct buf_info {
union {
__u32 offset;
__u8 *vaddr;
};
__u32 len;
};
/**
* struct qcedev_vbuf_info - Source and destination Buffer information
* @src: Array of buf_info for input/source
* @dst: Array of buf_info for output/destination
*/
struct qcedev_vbuf_info {
struct buf_info src[QCEDEV_MAX_BUFFERS];
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_pmem_info - Stores PMEM buffer information
* @fd_src: Handle to /dev/adsp_pmem used to allocate
* memory for input/src buffer
* @src: Array of buf_info for input/source
* @fd_dst: Handle to /dev/adsp_pmem used to allocate
* memory for output/dst buffer
* @dst: Array of buf_info for output/destination
* @pmem_src_offset: The offset from input/src buffer
* (allocated by PMEM)
*/
struct qcedev_pmem_info {
int fd_src;
struct buf_info src[QCEDEV_MAX_BUFFERS];
int fd_dst;
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_cipher_op_req - Holds the ciphering request information
* @use_pmem (IN): Flag to indicate if buffer source is PMEM
* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
* @pmem (IN): Stores PMEM buffer information.
* Refer struct qcedev_pmem_info
* @vbuf (IN/OUT): Stores Source and destination Buffer information
* Refer to struct qcedev_vbuf_info
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination
* When using PMEM allocated memory, must set this to 1
* @enckey (IN): 128 bits of confidentiality key
* enckey[0] bit 127-120, enckey[1] bit 119-112,..
* enckey[15] bit 7-0
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver)
* @iv (IN/OUT): Initialisation vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only)
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
* Apllicabel when using AES algorithm only
* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
*
*If use_pmem is set to 0, the driver assumes that memory was not allocated
* via PMEM, and kernel will need to allocate memory and copy data from user
* space buffer (data_src/dta_dst) and process accordingly and copy data back
* to the user space buffer
*
* If use_pmem is set to 1, the driver assumes that memory was allocated via
* PMEM.
* The kernel driver will use the fd_src to determine the kernel virtual address
* base that maps to the user space virtual address base for the buffer
* allocated in user space.
* The final input/src and output/dst buffer pointer will be determined
* by adding the offsets to the kernel virtual addr.
*
* If use of hardware key is supported in the target, user can configure the
* key parameters (encklen, enckey) to use the hardware key.
* In order to use the hardware key, set encklen to 0 and set the enckey
* data array to 0.
*/
struct qcedev_cipher_op_req {
__u8 use_pmem;
union {
struct qcedev_pmem_info pmem;
struct qcedev_vbuf_info vbuf;
};
__u32 entries;
__u32 data_len;
__u8 in_place_op;
__u8 enckey[QCEDEV_MAX_KEY_SIZE];
__u32 encklen;
__u8 iv[QCEDEV_MAX_IV_SIZE];
__u32 ivlen;
__u32 byteoffset;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_oper_enum op;
};
/**
* struct qcedev_sha_op_req - Holds the hashing request information
* @data (IN): Array of pointers to the data to be hashed
* @entries (IN): Number of buf_info entries in the data array
* @data_len (IN): Length of data to be hashed
* @digest (IN/OUT): Returns the hashed data information
* @diglen (OUT): Size of the hashed/digest data
* @authkey (IN): Pointer to authentication key for HMAC
* @authklen (IN): Size of the authentication key
* @alg (IN): Secure Hash algorithm
*/
struct qcedev_sha_op_req {
struct buf_info data[QCEDEV_MAX_BUFFERS];
__u32 entries;
__u32 data_len;
__u8 digest[QCEDEV_MAX_SHA_DIGEST];
__u32 diglen;
__u8 *authkey;
__u32 authklen;
enum qcedev_sha_alg_enum alg;
};
/**
* struct qfips_verify_t - Holds data for FIPS Integrity test
* @kernel_size (IN): Size of kernel Image
* @kernel (IN): pointer to buffer containing the kernel Image
*/
struct qfips_verify_t {
unsigned int kernel_size;
void *kernel;
};
/**
* struct qcedev_map_buf_req - Holds the mapping request information
* fd (IN): Array of fds.
* num_fds (IN): Number of fds in fd[].
* fd_size (IN): Array of sizes corresponding to each fd in fd[].
* fd_offset (IN): Array of offset corresponding to each fd in fd[].
* vaddr (OUT): Array of mapped virtual address corresponding to
* each fd in fd[].
*/
struct qcedev_map_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
__u32 fd_size[QCEDEV_MAX_BUFFERS];
__u32 fd_offset[QCEDEV_MAX_BUFFERS];
__u64 buf_vaddr[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_unmap_buf_req - Holds the hashing request information
* fd (IN): Array of fds to unmap
* num_fds (IN): Number of fds in fd[].
*/
struct qcedev_unmap_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
};
struct file;
#define QCEDEV_IOC_MAGIC 0x87
#define QCEDEV_IOCTL_ENC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_DEC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_SHA_INIT_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_UPDATE_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_FINAL_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_GET_SHA_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_LOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 7)
#define QCEDEV_IOCTL_UNLOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 8)
#define QCEDEV_IOCTL_GET_CMAC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_MAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
#define QCEDEV_IOCTL_UNMAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
#endif /* _QCEDEV__H */

60
linux/qcrypto.h Normal file
View File

@@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#define QCRYPTO_CTX_KEY_MASK 0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev);
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
int qcrypto_cipher_clear_flag(struct skcipher_request *req,
unsigned int flags);
int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
struct crypto_engine_entry {
u32 hw_instance;
u32 ce_device;
int shared;
};
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
struct crypto_engine_entry *arr);
int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
struct qcrypto_func_set {
int (*cipher_set)(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
int (*get_num_engines)(void);
void (*get_engine_list)(size_t num_engines,
struct crypto_engine_entry *arr);
};
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */

95
linux/smcinvoke.h Normal file
View File

@@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_SMCINVOKE_H_
#define _UAPI_SMCINVOKE_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define SMCINVOKE_USERSPACE_OBJ_NULL -1
struct smcinvoke_buf {
__u64 addr;
__u64 size;
};
struct smcinvoke_obj {
__s64 fd;
__s32 cb_server_fd;
__s32 reserved;
};
union smcinvoke_arg {
struct smcinvoke_buf b;
struct smcinvoke_obj o;
};
/*
* struct smcinvoke_cmd_req: This structure is transparently sent to TEE
* @op - Operation to be performed
* @counts - number of aruments passed
* @result - result of invoke operation
* @argsize - size of each of arguments
* @args - args is pointer to buffer having all arguments
*/
struct smcinvoke_cmd_req {
__u32 op;
__u32 counts;
__s32 result;
__u32 argsize;
__u64 args;
};
/*
* struct smcinvoke_accept: structure to process CB req from TEE
* @has_resp: IN: Whether IOCTL is carrying response data
* @txn_id: OUT: An id that should be passed as it is for response
* @result: IN: Outcome of operation op
* @cbobj_id: OUT: Callback object which is target of operation op
* @op: OUT: Operation to be performed on target object
* @counts: OUT: Number of arguments, embedded in buffer pointed by
* buf_addr, to complete operation
* @reserved: IN/OUT: Usage is not defined but should be set to 0.
* @argsize: IN: Size of any argument, all of equal size, embedded
* in buffer pointed by buf_addr
* @buf_len: IN: Len of buffer pointed by buf_addr
* @buf_addr: IN: Buffer containing all arguments which are needed
* to complete operation op
*/
struct smcinvoke_accept {
__u32 has_resp;
__u32 txn_id;
__s32 result;
__s32 cbobj_id;
__u32 op;
__u32 counts;
__s32 reserved;
__u32 argsize;
__u64 buf_len;
__u64 buf_addr;
};
/*
* @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
*/
struct smcinvoke_server {
__u32 cb_buf_size;
};
#define SMCINVOKE_IOC_MAGIC 0x98
#define SMCINVOKE_IOCTL_INVOKE_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
#define SMCINVOKE_IOCTL_ACCEPT_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
#define SMCINVOKE_IOCTL_SERVER_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s32)
#endif /* _UAPI_SMCINVOKE_H_ */

View File

@@ -0,0 +1,8 @@
#Build ssg kernel driver
PRODUCT_PACKAGES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \
$(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \
$(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \
$(KERNEL_MODULES_OUT)/qce50_dlkm.ko \
$(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \

View File

@@ -0,0 +1,5 @@
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko \
$(KERNEL_MODULES_OUT)/tz_log_dlkm.ko \
$(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \
$(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \
$(KERNEL_MODULES_OUT)/qce50_dlkm.ko \

91
smcinvoke/IClientEnv.h Normal file
View File

@@ -0,0 +1,91 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
*/
#define IClientEnv_OP_open 0
#define IClientEnv_OP_registerLegacy 1
#define IClientEnv_OP_register 2
#define IClientEnv_OP_registerWithWhitelist 3
static inline int32_t
IClientEnv_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IClientEnv_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IClientEnv_open(struct Object self, uint32_t uid_val, struct Object *obj_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) };
result = Object_invoke(self, IClientEnv_OP_open, a, ObjectCounts_pack(1, 0, 0, 1));
*obj_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_registerLegacy(struct Object self, const void *credentials_ptr, size_t credentials_len,
struct Object *clientEnv_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { credentials_ptr, credentials_len * 1 };
result = Object_invoke(self, IClientEnv_OP_registerLegacy, a,
ObjectCounts_pack(1, 0, 0, 1));
*clientEnv_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_register(struct Object self, struct Object credentials_val,
struct Object *clientEnv_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].o = credentials_val;
result = Object_invoke(self, IClientEnv_OP_register, a,
ObjectCounts_pack(0, 0, 1, 1));
*clientEnv_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_registerWithWhitelist(struct Object self,
struct Object credentials_val, const uint32_t *uids_ptr,
size_t uids_len, struct Object *clientEnv_ptr)
{
union ObjectArg a[3];
int32_t result;
a[1].o = credentials_val;
a[0].bi = (struct ObjectBufIn) { uids_ptr, uids_len *
sizeof(uint32_t) };
result = Object_invoke(self, IClientEnv_OP_registerWithWhitelist, a,
ObjectCounts_pack(1, 0, 1, 1));
*clientEnv_ptr = a[2].o;
return result;
}

View File

@@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
*/
#include "smcinvoke_object.h"
#define IQSEEComCompat_ERROR_APP_UNAVAILABLE INT32_C(10)
#define IQSEEComCompat_OP_sendRequest 0
#define IQSEEComCompat_OP_disconnect 1
#define IQSEEComCompat_OP_unload 2
static inline int32_t
IQSEEComCompat_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IQSEEComCompat_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IQSEEComCompat_sendRequest(struct Object self,
const void *reqIn_ptr, size_t reqIn_len,
const void *rspIn_ptr, size_t rspIn_len,
void *reqOut_ptr, size_t reqOut_len, size_t *reqOut_lenout,
void *rspOut_ptr, size_t rspOut_len, size_t *rspOut_lenout,
const uint32_t *embeddedBufOffsets_ptr,
size_t embeddedBufOffsets_len, uint32_t is64_val,
struct Object smo1_val, struct Object smo2_val,
struct Object smo3_val, struct Object smo4_val)
{
union ObjectArg a[10];
int32_t result;
a[0].bi = (struct ObjectBufIn) { reqIn_ptr, reqIn_len * 1 };
a[1].bi = (struct ObjectBufIn) { rspIn_ptr, rspIn_len * 1 };
a[4].b = (struct ObjectBuf) { reqOut_ptr, reqOut_len * 1 };
a[5].b = (struct ObjectBuf) { rspOut_ptr, rspOut_len * 1 };
a[2].bi = (struct ObjectBufIn) { embeddedBufOffsets_ptr,
embeddedBufOffsets_len * sizeof(uint32_t) };
a[3].b = (struct ObjectBuf) { &is64_val, sizeof(uint32_t) };
a[6].o = smo1_val;
a[7].o = smo2_val;
a[8].o = smo3_val;
a[9].o = smo4_val;
result = Object_invoke(self, IQSEEComCompat_OP_sendRequest, a,
ObjectCounts_pack(4, 2, 4, 0));
*reqOut_lenout = a[4].b.size / 1;
*rspOut_lenout = a[5].b.size / 1;
return result;
}
static inline int32_t
IQSEEComCompat_disconnect(struct Object self)
{
return Object_invoke(self, IQSEEComCompat_OP_disconnect, 0, 0);
}
static inline int32_t
IQSEEComCompat_unload(struct Object self)
{
return Object_invoke(self, IQSEEComCompat_OP_unload, 0, 0);
}

View File

@@ -0,0 +1,99 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
*/
#include "smcinvoke_object.h"
#define IQSEEComCompatAppLoader_ERROR_INVALID_BUFFER INT32_C(10)
#define IQSEEComCompatAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
#define IQSEEComCompatAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
#define IQSEEComCompatAppLoader_ERROR_METADATA_INVALID INT32_C(13)
#define IQSEEComCompatAppLoader_ERROR_MAX_NUM_APPS INT32_C(14)
#define IQSEEComCompatAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15)
#define IQSEEComCompatAppLoader_ERROR_ALREADY_LOADED INT32_C(16)
#define IQSEEComCompatAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
#define IQSEEComCompatAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
#define IQSEEComCompatAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
#define IQSEEComCompatAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
#define IQSEEComCompatAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(22)
#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(23)
#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(24)
#define IQSEEComCompatAppLoader_OP_loadFromRegion 0
#define IQSEEComCompatAppLoader_OP_loadFromBuffer 1
#define IQSEEComCompatAppLoader_OP_lookupTA 2
static inline int32_t
IQSEEComCompatAppLoader_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IQSEEComCompatAppLoader_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IQSEEComCompatAppLoader_loadFromRegion(struct Object self,
struct Object appElf_val, const void *filename_ptr,
size_t filename_len, struct Object *appCompat_ptr)
{
union ObjectArg a[3];
int32_t result;
a[1].o = appElf_val;
a[0].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromRegion, a,
ObjectCounts_pack(1, 0, 1, 1));
*appCompat_ptr = a[2].o;
return result;
}
static inline int32_t
IQSEEComCompatAppLoader_loadFromBuffer(struct Object self,
const void *appElf_ptr, size_t appElf_len,
const void *filename_ptr, size_t filename_len,
void *distName_ptr, size_t distName_len,
size_t *distName_lenout, struct Object *appCompat_ptr)
{
union ObjectArg a[4];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 };
a[1].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
a[2].b = (struct ObjectBuf) { distName_ptr, distName_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromBuffer,
a, ObjectCounts_pack(2, 1, 0, 1));
*distName_lenout = a[2].b.size / 1;
*appCompat_ptr = a[3].o;
return result;
}
static inline int32_t
IQSEEComCompatAppLoader_lookupTA(struct Object self, const void *appName_ptr,
size_t appName_len, struct Object *appCompat_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_lookupTA,
a, ObjectCounts_pack(1, 0, 0, 1));
*appCompat_ptr = a[1].o;
return result;
}

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __QSEECOM_KERNEL_H_
#define __QSEECOM_KERNEL_H_
#include <linux/types.h>
#define QSEECOM_ALIGN_SIZE 0x40
#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
#define QSEECOM_ALIGN(x) \
((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
/*
* struct qseecom_handle -
* Handle to the qseecom device for kernel clients
* @sbuf - shared buffer pointer
* @sbbuf_len - shared buffer size
*/
struct qseecom_handle {
void *dev; /* in/out */
unsigned char *sbuf; /* in/out */
uint32_t sbuf_len; /* in/out */
};
int qseecom_start_app(struct qseecom_handle **handle,
char *app_name, uint32_t size);
int qseecom_shutdown_app(struct qseecom_handle **handle);
int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
#if IS_ENABLED(CONFIG_QSEECOM)
int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data);
#else
static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data)
{
return -EOPNOTSUPP;
}
#endif
#endif /* __QSEECOM_KERNEL_H_ */

2449
smcinvoke/smcinvoke.c Normal file

File diff suppressed because it is too large Load Diff

103
smcinvoke/smcinvoke.h Normal file
View File

@@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_SMCINVOKE_H_
#define _UAPI_SMCINVOKE_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define SMCINVOKE_USERSPACE_OBJ_NULL -1
struct smcinvoke_buf {
__u64 addr;
__u64 size;
};
struct smcinvoke_obj {
__s64 fd;
__s32 cb_server_fd;
__s32 reserved;
};
union smcinvoke_arg {
struct smcinvoke_buf b;
struct smcinvoke_obj o;
};
/*
* struct smcinvoke_cmd_req: This structure is transparently sent to TEE
* @op - Operation to be performed
* @counts - number of aruments passed
* @result - result of invoke operation
* @argsize - size of each of arguments
* @args - args is pointer to buffer having all arguments
*/
struct smcinvoke_cmd_req {
__u32 op;
__u32 counts;
__s32 result;
__u32 argsize;
__u64 args;
};
/*
* struct smcinvoke_accept: structure to process CB req from TEE
* @has_resp: IN: Whether IOCTL is carrying response data
* @txn_id: OUT: An id that should be passed as it is for response
* @result: IN: Outcome of operation op
* @cbobj_id: OUT: Callback object which is target of operation op
* @op: OUT: Operation to be performed on target object
* @counts: OUT: Number of arguments, embedded in buffer pointed by
* buf_addr, to complete operation
* @reserved: IN/OUT: Usage is not defined but should be set to 0.
* @argsize: IN: Size of any argument, all of equal size, embedded
* in buffer pointed by buf_addr
* @buf_len: IN: Len of buffer pointed by buf_addr
* @buf_addr: IN: Buffer containing all arguments which are needed
* to complete operation op
*/
struct smcinvoke_accept {
__u32 has_resp;
__u32 txn_id;
__s32 result;
__s32 cbobj_id;
__u32 op;
__u32 counts;
__s32 reserved;
__u32 argsize;
__u64 buf_len;
__u64 buf_addr;
};
/*
* @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
*/
struct smcinvoke_server {
__u32 cb_buf_size;
};
#define SMCINVOKE_IOC_MAGIC 0x98
#define SMCINVOKE_IOCTL_INVOKE_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
#define SMCINVOKE_IOCTL_ACCEPT_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
#define SMCINVOKE_IOCTL_SERVER_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s32)
/*
* smcinvoke logging buffer is for communicating with the smcinvoke driver additional
* info for debugging to be included in driver's log (if any)
*/
#define SMCINVOKE_LOG_BUF_SIZE 100
#define SMCINVOKE_IOCTL_LOG \
_IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE)
#endif /* _UAPI_SMCINVOKE_H_ */

View File

@@ -0,0 +1,479 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#if !IS_ENABLED(CONFIG_QSEECOM)
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/elf.h>
#include "smcinvoke.h"
#include "linux/qseecom.h"
#include "smcinvoke_object.h"
#include "misc/qseecom_kernel.h"
#include "IQSEEComCompat.h"
#include "IQSEEComCompatAppLoader.h"
#include "IClientEnv.h"
const uint32_t CQSEEComCompatAppLoader_UID = 122;
struct qseecom_compat_context {
void *dev; /* in/out */
unsigned char *sbuf; /* in/out */
uint32_t sbuf_len; /* in/out */
struct qtee_shm shm;
uint8_t app_arch;
struct Object client_env;
struct Object app_loader;
struct Object app_controller;
};
struct tzobject_context {
int fd;
struct kref refs;
};
static int invoke_over_smcinvoke(void *cxt,
uint32_t op,
union ObjectArg *args,
uint32_t counts);
static struct Object tzobject_new(int fd)
{
struct tzobject_context *me =
kzalloc(sizeof(struct tzobject_context), GFP_KERNEL);
if (!me)
return Object_NULL;
kref_init(&me->refs);
me->fd = fd;
pr_debug("%s: me->fd = %d, me->refs = %u\n", __func__,
me->fd, kref_read(&me->refs));
return (struct Object) { invoke_over_smcinvoke, me };
}
static void tzobject_delete(struct kref *refs)
{
struct tzobject_context *me = container_of(refs,
struct tzobject_context, refs);
pr_info("%s: me->fd = %d, me->refs = %d, files = %p\n",
__func__, me->fd, kref_read(&me->refs), current->files);
/*
* after _close_fd(), ref_cnt will be 0,
* but smcinvoke_release() was still not called,
* so we first call smcinvoke_release_from_kernel_client() to
* free filp and ask TZ to release object, then call _close_fd()
*/
smcinvoke_release_from_kernel_client(me->fd);
close_fd(me->fd);
kfree(me);
}
int getObjectFromHandle(int handle, struct Object *obj)
{
int ret = 0;
if (handle == SMCINVOKE_USERSPACE_OBJ_NULL) {
/* NULL object*/
Object_ASSIGN_NULL(*obj);
} else if (handle > SMCINVOKE_USERSPACE_OBJ_NULL) {
*obj = tzobject_new(handle);
if (Object_isNull(*obj))
ret = OBJECT_ERROR_BADOBJ;
} else {
pr_err("CBobj not supported for handle %d\n", handle);
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
int getHandleFromObject(struct Object obj, int *handle)
{
int ret = 0;
if (Object_isNull(obj)) {
/* set NULL Object's fd to be -1 */
*handle = SMCINVOKE_USERSPACE_OBJ_NULL;
return ret;
}
if (obj.invoke == invoke_over_smcinvoke) {
struct tzobject_context *ctx = (struct tzobject_context *)(obj.context);
if (ctx != NULL) {
*handle = ctx->fd;
} else {
pr_err("Failed to get tzobject_context obj handle, ret = %d\n", ret);
ret = OBJECT_ERROR_BADOBJ;
}
} else {
pr_err("CBobj not supported\n");
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
static int marshalIn(struct smcinvoke_cmd_req *req,
union smcinvoke_arg *argptr,
uint32_t op, union ObjectArg *args,
uint32_t counts)
{
size_t i = 0;
req->op = op;
req->counts = counts;
req->argsize = sizeof(union smcinvoke_arg);
req->args = (uintptr_t)argptr;
FOR_ARGS(i, counts, buffers) {
argptr[i].b.addr = (uintptr_t) args[i].b.ptr;
argptr[i].b.size = args[i].b.size;
}
FOR_ARGS(i, counts, OI) {
int handle = -1, ret;
ret = getHandleFromObject(args[i].o, &handle);
if (ret) {
pr_err("invalid OI[%zu]\n", i);
return OBJECT_ERROR_BADOBJ;
}
argptr[i].o.fd = handle;
}
FOR_ARGS(i, counts, OO) {
argptr[i].o.fd = SMCINVOKE_USERSPACE_OBJ_NULL;
}
return OBJECT_OK;
}
static int marshalOut(struct smcinvoke_cmd_req *req,
union smcinvoke_arg *argptr,
union ObjectArg *args, uint32_t counts,
struct tzobject_context *me)
{
int ret = req->result;
bool failed = false;
size_t i = 0;
argptr = (union smcinvoke_arg *)(uintptr_t)(req->args);
FOR_ARGS(i, counts, BO) {
args[i].b.size = argptr[i].b.size;
}
FOR_ARGS(i, counts, OO) {
ret = getObjectFromHandle(argptr[i].o.fd, &(args[i].o));
if (ret) {
pr_err("Failed to get OO[%zu] from handle = %d\n",
i, (int)argptr[i].o.fd);
failed = true;
break;
}
pr_debug("Succeed to create OO for args[%zu].o, fd = %d\n",
i, (int)argptr[i].o.fd);
}
if (failed) {
FOR_ARGS(i, counts, OO) {
Object_ASSIGN_NULL(args[i].o);
}
/* Only overwrite ret value if invoke result is 0 */
if (ret == 0)
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
static int invoke_over_smcinvoke(void *cxt,
uint32_t op,
union ObjectArg *args,
uint32_t counts)
{
int ret = OBJECT_OK;
struct smcinvoke_cmd_req req = {0, 0, 0, 0, 0};
size_t i = 0;
struct tzobject_context *me = NULL;
uint32_t method;
union smcinvoke_arg *argptr = NULL;
FOR_ARGS(i, counts, OO) {
args[i].o = Object_NULL;
}
me = (struct tzobject_context *)cxt;
method = ObjectOp_methodID(op);
pr_debug("%s: cxt = %p, fd = %d, op = %u, cnt = %x, refs = %u\n",
__func__, me, me->fd, op, counts, kref_read(&me->refs));
if (ObjectOp_isLocal(op)) {
switch (method) {
case Object_OP_retain:
kref_get(&me->refs);
return OBJECT_OK;
case Object_OP_release:
kref_put(&me->refs, tzobject_delete);
return OBJECT_OK;
}
return OBJECT_ERROR_REMOTE;
}
argptr = kcalloc(OBJECT_COUNTS_TOTAL(counts),
sizeof(union smcinvoke_arg), GFP_KERNEL);
if (argptr == NULL)
return OBJECT_ERROR_KMEM;
ret = marshalIn(&req, argptr, op, args, counts);
if (ret)
goto exit;
ret = process_invoke_request_from_kernel_client(me->fd, &req);
if (ret) {
pr_err("INVOKE failed with ret = %d, result = %d\n"
"obj.context = %p, fd = %d, op = %d, counts = 0x%x\n",
ret, req.result, me, me->fd, op, counts);
FOR_ARGS(i, counts, OO) {
struct smcinvoke_obj obj = argptr[i].o;
if (obj.fd >= 0) {
pr_err("Close OO[%zu].fd = %d\n", i, obj.fd);
close_fd(obj.fd);
}
}
ret = OBJECT_ERROR_KMEM;
goto exit;
}
if (!req.result)
ret = marshalOut(&req, argptr, args, counts, me);
exit:
kfree(argptr);
return ret | req.result;
}
static int get_root_obj(struct Object *rootObj)
{
int ret = 0;
int root_fd = -1;
ret = get_root_fd(&root_fd);
if (ret) {
pr_err("Failed to get root fd, ret = %d\n");
return ret;
}
*rootObj = tzobject_new(root_fd);
if (Object_isNull(*rootObj)) {
close_fd(root_fd);
ret = -ENOMEM;
}
return ret;
}
/*
* Get a client environment using CBOR encoded credentials
* with UID of SYSTEM_UID (1000)
*/
static int32_t get_client_env_object(struct Object *clientEnvObj)
{
int32_t ret = OBJECT_ERROR;
struct Object rootObj = Object_NULL;
/* Hardcode self cred buffer in CBOR encoded format.
* CBOR encoded credentials is created using following parameters,
* #define ATTR_UID 1
* #define ATTR_PKG_NAME 3
* #define SYSTEM_UID 1000
* static const uint8_t bufString[] = {"UefiSmcInvoke"};
*/
uint8_t encodedBuf[] = {0xA2, 0x01, 0x19, 0x03, 0xE8, 0x03, 0x6E, 0x55,
0x65, 0x66, 0x69, 0x53, 0x6D, 0x63, 0x49, 0x6E,
0x76, 0x6F, 0x6B, 0x65, 0x0};
/* get rootObj */
ret = get_root_obj(&rootObj);
if (ret) {
pr_err("Failed to create rootobj\n");
return ret;
}
/* get client env */
ret = IClientEnv_registerLegacy(rootObj, encodedBuf,
sizeof(encodedBuf), clientEnvObj);
if (ret)
pr_err("Failed to get ClientEnvObject, ret = %d\n", ret);
Object_release(rootObj);
return ret;
}
static int load_app(struct qseecom_compat_context *cxt, const char *app_name)
{
size_t fw_size = 0;
u8 *imgbuf_va = NULL;
int ret = 0;
char dist_name[MAX_APP_NAME_SIZE] = {0};
size_t dist_name_len = 0;
struct qtee_shm shm = {0};
if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
pr_err("The app_name (%s) with length %zu is not valid\n",
app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
return -EINVAL;
}
ret = IQSEEComCompatAppLoader_lookupTA(cxt->app_loader,
app_name, strlen(app_name), &cxt->app_controller);
if (!ret) {
pr_info("app %s exists\n", app_name);
return ret;
}
imgbuf_va = firmware_request_from_smcinvoke(app_name, &fw_size, &shm);
if (imgbuf_va == NULL) {
pr_err("Failed on firmware_request_from_smcinvoke\n");
return -EINVAL;
}
ret = IQSEEComCompatAppLoader_loadFromBuffer(
cxt->app_loader, imgbuf_va, fw_size,
app_name, strlen(app_name),
dist_name, MAX_APP_NAME_SIZE, &dist_name_len,
&cxt->app_controller);
if (ret) {
pr_err("loadFromBuffer failed for app %s, ret = %d\n",
app_name, ret);
goto exit_release_shm;
}
cxt->app_arch = *(uint8_t *)(imgbuf_va + EI_CLASS);
pr_info("%s %d, loaded app %s, dist_name %s, dist_name_len %zu\n",
__func__, __LINE__, app_name, dist_name, dist_name_len);
exit_release_shm:
qtee_shmbridge_free_shm(&shm);
return ret;
}
int qseecom_start_app(struct qseecom_handle **handle,
char *app_name, uint32_t size)
{
int ret = 0;
struct qseecom_compat_context *cxt = NULL;
pr_warn("%s, start app %s, size %zu\n",
__func__, app_name, size);
if (app_name == NULL || handle == NULL) {
pr_err("app_name is null or invalid handle\n");
return -EINVAL;
}
/* allocate qseecom_compat_context */
cxt = kzalloc(sizeof(struct qseecom_compat_context), GFP_KERNEL);
if (!cxt)
return -ENOMEM;
/* get client env */
ret = get_client_env_object(&cxt->client_env);
if (ret) {
pr_err("failed to get clientEnv when loading app %s, ret %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_free_cxt;
}
/* get apploader with CQSEEComCompatAppLoader_UID */
ret = IClientEnv_open(cxt->client_env, CQSEEComCompatAppLoader_UID,
&cxt->app_loader);
if (ret) {
pr_err("failed to get apploader when loading app %s, ret %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_release_clientenv;
}
/* load app*/
ret = load_app(cxt, app_name);
if (ret) {
pr_err("failed to load app %s, ret = %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_release_apploader;
}
/* Get the physical address of the req/resp buffer */
ret = qtee_shmbridge_allocate_shm(size, &cxt->shm);
if (ret) {
pr_err("qtee_shmbridge_allocate_shm failed, ret :%d\n", ret);
ret = -EINVAL;
goto exit_release_appcontroller;
}
cxt->sbuf = cxt->shm.vaddr;
cxt->sbuf_len = size;
*handle = (struct qseecom_handle *)cxt;
return ret;
exit_release_appcontroller:
Object_release(cxt->app_controller);
exit_release_apploader:
Object_release(cxt->app_loader);
exit_release_clientenv:
Object_release(cxt->client_env);
exit_free_cxt:
kfree(cxt);
return ret;
}
EXPORT_SYMBOL(qseecom_start_app);
int qseecom_shutdown_app(struct qseecom_handle **handle)
{
struct qseecom_compat_context *cxt =
(struct qseecom_compat_context *)(*handle);
if ((handle == NULL) || (*handle == NULL)) {
pr_err("Handle is NULL\n");
return -EINVAL;
}
qtee_shmbridge_free_shm(&cxt->shm);
Object_release(cxt->app_controller);
Object_release(cxt->app_loader);
Object_release(cxt->client_env);
kfree(cxt);
*handle = NULL;
return 0;
}
EXPORT_SYMBOL(qseecom_shutdown_app);
int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
{
struct qseecom_compat_context *cxt =
(struct qseecom_compat_context *)handle;
size_t out_len = 0;
pr_debug("%s, sbuf_len %u, rbuf_len %u\n",
__func__, sbuf_len, rbuf_len);
if (!handle || !send_buf || !resp_buf || !sbuf_len || !rbuf_len) {
pr_err("One of params is invalid. %s, handle %x, send_buf %x,resp_buf %x,sbuf_len %u, rbuf_len %u\n",
__func__, handle, send_buf, resp_buf, sbuf_len, rbuf_len);
return -EINVAL;
}
return IQSEEComCompat_sendRequest(cxt->app_controller,
send_buf, sbuf_len,
resp_buf, rbuf_len,
send_buf, sbuf_len, &out_len,
resp_buf, rbuf_len, &out_len,
NULL, 0, /* embedded offset array */
(cxt->app_arch == ELFCLASS64),
Object_NULL, Object_NULL,
Object_NULL, Object_NULL);
}
EXPORT_SYMBOL(qseecom_send_command);
#endif

View File

@@ -0,0 +1,195 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __SMCINVOKE_OBJECT_H
#define __SMCINVOKE_OBJECT_H
#include <linux/types.h>
#include <linux/firmware.h>
#include <linux/qtee_shmbridge.h>
/*
* Method bits are not modified by transport layers. These describe the
* method (member function) being requested by the client.
*/
#define OBJECT_OP_METHOD_MASK (0x0000FFFFu)
#define OBJECT_OP_METHODID(op) ((op) & OBJECT_OP_METHOD_MASK)
#define OBJECT_OP_RELEASE (OBJECT_OP_METHOD_MASK - 0)
#define OBJECT_OP_RETAIN (OBJECT_OP_METHOD_MASK - 1)
#define OBJECT_OP_MAP_REGION 0
#define OBJECT_OP_YIELD 1
#define OBJECT_COUNTS_MAX_BI 0xF
#define OBJECT_COUNTS_MAX_BO 0xF
#define OBJECT_COUNTS_MAX_OI 0xF
#define OBJECT_COUNTS_MAX_OO 0xF
/* unpack counts */
#define OBJECT_COUNTS_NUM_BI(k) ((size_t) (((k) >> 0) & OBJECT_COUNTS_MAX_BI))
#define OBJECT_COUNTS_NUM_BO(k) ((size_t) (((k) >> 4) & OBJECT_COUNTS_MAX_BO))
#define OBJECT_COUNTS_NUM_OI(k) ((size_t) (((k) >> 8) & OBJECT_COUNTS_MAX_OI))
#define OBJECT_COUNTS_NUM_OO(k) ((size_t) (((k) >> 12) & OBJECT_COUNTS_MAX_OO))
#define OBJECT_COUNTS_NUM_buffers(k) \
(OBJECT_COUNTS_NUM_BI(k) + OBJECT_COUNTS_NUM_BO(k))
#define OBJECT_COUNTS_NUM_objects(k) \
(OBJECT_COUNTS_NUM_OI(k) + OBJECT_COUNTS_NUM_OO(k))
/* Indices into args[] */
#define OBJECT_COUNTS_INDEX_BI(k) 0
#define OBJECT_COUNTS_INDEX_BO(k) \
(OBJECT_COUNTS_INDEX_BI(k) + OBJECT_COUNTS_NUM_BI(k))
#define OBJECT_COUNTS_INDEX_OI(k) \
(OBJECT_COUNTS_INDEX_BO(k) + OBJECT_COUNTS_NUM_BO(k))
#define OBJECT_COUNTS_INDEX_OO(k) \
(OBJECT_COUNTS_INDEX_OI(k) + OBJECT_COUNTS_NUM_OI(k))
#define OBJECT_COUNTS_TOTAL(k) \
(OBJECT_COUNTS_INDEX_OO(k) + OBJECT_COUNTS_NUM_OO(k))
#define OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \
((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \
((in_objs) << 8) | ((out_objs) << 12)))
#define OBJECT_COUNTS_INDEX_buffers(k) OBJECT_COUNTS_INDEX_BI(k)
/* Object_invoke return codes */
#define OBJECT_isOK(err) ((err) == 0)
#define OBJECT_isERROR(err) ((err) != 0)
/* Generic error codes */
#define OBJECT_OK 0 /* non-specific success code */
#define OBJECT_ERROR 1 /* non-specific error */
#define OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */
#define OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */
#define OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */
#define OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */
/* Transport layer error codes */
#define OBJECT_ERROR_DEFUNCT -90 /* object no longer exists */
#define OBJECT_ERROR_ABORT -91 /* calling thread must exit */
#define OBJECT_ERROR_BADOBJ -92 /* invalid object context */
#define OBJECT_ERROR_NOSLOTS -93 /* caller's object table full */
#define OBJECT_ERROR_MAXARGS -94 /* too many args */
#define OBJECT_ERROR_MAXDATA -95 /* buffers too large */
#define OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */
#define OBJECT_ERROR_KMEM -97 /* kernel out of memory */
#define OBJECT_ERROR_REMOTE -98 /* local method sent to remote object */
#define OBJECT_ERROR_BUSY -99 /* Object is busy */
#define Object_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */
#define FOR_ARGS(ndxvar, counts, section) \
for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
+ OBJECT_COUNTS_NUM_##section(counts)); \
++ndxvar)
/* ObjectOp */
#define ObjectOp_METHOD_MASK ((uint32_t) 0x0000FFFFu)
#define ObjectOp_methodID(op) ((op) & ObjectOp_METHOD_MASK)
#define ObjectOp_LOCAL ((uint32_t) 0x00008000U)
#define ObjectOp_isLocal(op) (((op) & ObjectOp_LOCAL) != 0)
#define Object_OP_release (ObjectOp_METHOD_MASK - 0)
#define Object_OP_retain (ObjectOp_METHOD_MASK - 1)
/* Object */
#define ObjectCounts_pack(nBuffersIn, nBuffersOut, nObjectsIn, nObjectsOut) \
((uint32_t) ((nBuffersIn) | \
((nBuffersOut) << 4) | \
((nObjectsIn) << 8) | \
((nObjectsOut) << 12)))
union ObjectArg;
typedef int32_t (*ObjectInvoke)(void *h,
uint32_t op,
union ObjectArg *args,
uint32_t counts);
struct Object {
ObjectInvoke invoke;
void *context;
};
struct ObjectBuf {
void *ptr;
size_t size;
};
struct ObjectBufIn {
const void *ptr;
size_t size;
};
union ObjectArg {
struct ObjectBuf b;
struct ObjectBufIn bi;
struct Object o;
};
static inline int32_t Object_invoke(struct Object o, uint32_t op,
union ObjectArg *args, uint32_t k)
{
return o.invoke(o.context, op, args, k);
}
#define Object_NULL ((struct Object){NULL, NULL})
#define OBJECT_NOT_RETAINED
#define OBJECT_CONSUMED
static inline int32_t Object_release(OBJECT_CONSUMED struct Object o)
{
return Object_invoke((o), Object_OP_release, 0, 0);
}
static inline int32_t Object_retain(struct Object o)
{
return Object_invoke((o), Object_OP_retain, 0, 0);
}
#define Object_isNull(o) ((o).invoke == NULL)
#define Object_RELEASE_IF(o) \
do { \
struct Object o_ = (o); \
if (!Object_isNull(o_)) \
(void) Object_release(o_); \
} while (0)
static inline void Object_replace(struct Object *loc, struct Object objNew)
{
if (!Object_isNull(*loc))
Object_release(*loc);
if (!Object_isNull(objNew))
Object_retain(objNew);
*loc = objNew;
}
#define Object_ASSIGN_NULL(loc) Object_replace(&(loc), Object_NULL)
int smcinvoke_release_from_kernel_client(int fd);
int get_root_fd(int *root_fd);
int process_invoke_request_from_kernel_client(
int fd, struct smcinvoke_cmd_req *req);
char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm);
#endif /* __SMCINVOKE_OBJECT_H */

498
smcinvoke/trace_smcinvoke.h Normal file
View File

@@ -0,0 +1,498 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM smcinvoke
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace_smcinvoke
#if !defined(_TRACE_SMCINVOKE) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SMCINVOKE_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include "smcinvoke.h"
TRACE_EVENT(put_pending_cbobj_locked,
TP_PROTO(uint16_t srvr_id, uint16_t obj_id),
TP_ARGS(srvr_id, obj_id),
TP_STRUCT__entry(
__field(uint16_t, srvr_id)
__field(uint16_t, obj_id)
),
TP_fast_assign(
__entry->srvr_id = srvr_id;
__entry->obj_id = obj_id;
),
TP_printk("srvr_id=0x%x obj_id=0x%x",
__entry->srvr_id, __entry->obj_id)
);
TRACE_EVENT(release_mem_obj_locked,
TP_PROTO(uint32_t tzhandle, size_t buf_len),
TP_ARGS(tzhandle, buf_len),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(size_t, buf_len)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->buf_len = buf_len;
),
TP_printk("tzhandle=0x%08x, buf_len=%zu",
__entry->tzhandle, __entry->buf_len)
);
TRACE_EVENT(invoke_cmd_handler,
TP_PROTO(int cmd, uint64_t response_type, int32_t result, int ret),
TP_ARGS(cmd, response_type, result, ret),
TP_STRUCT__entry(
__field(int, cmd)
__field(uint64_t, response_type)
__field(int32_t, result)
__field(int, ret)
),
TP_fast_assign(
__entry->response_type = response_type;
__entry->result = result;
__entry->ret = ret;
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x (%d), response_type=%ld, result=0x%x (%d), ret=%d",
__entry->cmd, __entry->cmd, __entry->response_type,
__entry->result, __entry->result, __entry->ret)
);
TRACE_EVENT(process_tzcb_req_handle,
TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->op, __entry->counts)
);
TRACE_EVENT(process_tzcb_req_wait,
TP_PROTO(uint32_t tzhandle, int cbobj_retries, uint32_t txn_id, pid_t pid, pid_t tgid,
uint16_t server_state, uint16_t server_id, unsigned int cb_reqs_inflight),
TP_ARGS(tzhandle, cbobj_retries, txn_id, pid, tgid, server_state, server_id,
cb_reqs_inflight),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(int, cbobj_retries)
__field(uint32_t, txn_id)
__field(pid_t, pid)
__field(pid_t, tgid)
__field(uint16_t, server_state)
__field(uint16_t, server_id)
__field(unsigned int, cb_reqs_inflight)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->cbobj_retries = cbobj_retries;
__entry->txn_id = txn_id;
__entry->pid = pid;
__entry->tgid = tgid;
__entry->server_state = server_state;
__entry->server_id = server_id;
__entry->cb_reqs_inflight = cb_reqs_inflight;
),
TP_printk("tzhandle=0x%08x, retries=%d, txn_id=%d, pid %x,tid %x, srvr state=%d, server_id=0x%x, cb_reqs_inflight=%d",
__entry->tzhandle, __entry->cbobj_retries, __entry->txn_id,
__entry->pid, __entry->tgid, __entry->server_state,
__entry->server_id, __entry->cb_reqs_inflight)
);
TRACE_EVENT(process_tzcb_req_result,
TP_PROTO(int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts,
unsigned int cb_reqs_inflight),
TP_ARGS(result, tzhandle, op, counts, cb_reqs_inflight),
TP_STRUCT__entry(
__field(int32_t, result)
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
__field(unsigned int, cb_reqs_inflight)
),
TP_fast_assign(
__entry->result = result;
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
__entry->cb_reqs_inflight = cb_reqs_inflight;
),
TP_printk("result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x, cb_reqs_inflight=%d",
__entry->result, __entry->tzhandle, __entry->op, __entry->counts,
__entry->cb_reqs_inflight)
);
TRACE_EVENT(marshal_out_invoke_req,
TP_PROTO(int i, uint32_t tzhandle, uint16_t server, uint32_t fd),
TP_ARGS(i, tzhandle, server, fd),
TP_STRUCT__entry(
__field(int, i)
__field(uint32_t, tzhandle)
__field(uint16_t, server)
__field(uint32_t, fd)
),
TP_fast_assign(
__entry->i = i;
__entry->tzhandle = tzhandle;
__entry->server = server;
__entry->fd = fd;
),
TP_printk("OO[%d]: tzhandle=0x%x server=0x%x fd=0x%x",
__entry->i, __entry->tzhandle, __entry->server, __entry->fd)
);
TRACE_EVENT(prepare_send_scm_msg,
TP_PROTO(uint64_t response_type, int32_t result),
TP_ARGS(response_type, result),
TP_STRUCT__entry(
__field(uint64_t, response_type)
__field(int32_t, result)
),
TP_fast_assign(
__entry->response_type = response_type;
__entry->result = result;
),
TP_printk("response_type=0x%lx (%ld), result=0x%x (%d)",
__entry->response_type, __entry->response_type,
__entry->result, __entry->result)
);
TRACE_EVENT(marshal_in_invoke_req,
TP_PROTO(int i, int64_t fd, int32_t cb_server_fd, uint32_t tzhandle),
TP_ARGS(i, fd, cb_server_fd, tzhandle),
TP_STRUCT__entry(
__field(int, i)
__field(int64_t, fd)
__field(int32_t, cb_server_fd)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__entry->i = i;
__entry->fd = fd;
__entry->cb_server_fd = cb_server_fd;
__entry->tzhandle = tzhandle;
),
TP_printk("OI[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x",
__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
);
TRACE_EVENT(marshal_in_tzcb_req_handle,
TP_PROTO(uint32_t tzhandle, int srvr_id, int32_t cbobj_id, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, srvr_id, cbobj_id, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(int, srvr_id)
__field(int32_t, cbobj_id)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->srvr_id = srvr_id;
__entry->cbobj_id = cbobj_id;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%x srvr_id=0x%x cbobj_id=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->srvr_id, __entry->cbobj_id,
__entry->op, __entry->counts)
);
TRACE_EVENT(marshal_in_tzcb_req_fd,
TP_PROTO(int i, uint32_t tzhandle, int srvr_id, int32_t fd),
TP_ARGS(i, tzhandle, srvr_id, fd),
TP_STRUCT__entry(
__field(int, i)
__field(uint32_t, tzhandle)
__field(int, srvr_id)
__field(int32_t, fd)
),
TP_fast_assign(
__entry->i = i;
__entry->tzhandle = tzhandle;
__entry->srvr_id = srvr_id;
__entry->fd = fd;
),
TP_printk("OI[%d]: tzhandle=0x%x srvr_id=0x%x fd=0x%x",
__entry->i, __entry->tzhandle, __entry->srvr_id, __entry->fd)
);
TRACE_EVENT(marshal_out_tzcb_req,
TP_PROTO(uint32_t i, int32_t fd, int32_t cb_server_fd, uint32_t tzhandle),
TP_ARGS(i, fd, cb_server_fd, tzhandle),
TP_STRUCT__entry(
__field(int, i)
__field(int32_t, fd)
__field(int32_t, cb_server_fd)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__entry->i = i;
__entry->fd = fd;
__entry->cb_server_fd = cb_server_fd;
__entry->tzhandle = tzhandle;
),
TP_printk("OO[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x",
__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
);
TRACE_EVENT(process_invoke_req_tzhandle,
TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->op, __entry->counts)
);
TRACE_EVENT(process_invoke_req_result,
TP_PROTO(int ret, int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(ret, result, tzhandle, op, counts),
TP_STRUCT__entry(
__field(int, ret)
__field(int32_t, result)
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->ret = ret;
__entry->result = result;
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("ret=%d result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->ret, __entry->result, __entry->tzhandle,
__entry->op, __entry->counts)
);
TRACE_EVENT(process_log_info,
TP_PROTO(char *buf, uint32_t context_type, uint32_t tzhandle),
TP_ARGS(buf, context_type, tzhandle),
TP_STRUCT__entry(
__string(str, buf)
__field(uint32_t, context_type)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__assign_str(str, buf);
__entry->context_type = context_type;
__entry->tzhandle = tzhandle;
),
TP_printk("%s context_type=%d tzhandle=0x%08x",
__get_str(str),
__entry->context_type, __entry->tzhandle)
);
TRACE_EVENT_CONDITION(smcinvoke_ioctl,
TP_PROTO(unsigned int cmd, long ret),
TP_ARGS(cmd, ret),
TP_CONDITION(ret),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(long, ret)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->ret = ret;
),
TP_printk("cmd=%s ret=%ld",
__print_symbolic(__entry->cmd,
{SMCINVOKE_IOCTL_INVOKE_REQ, "SMCINVOKE_IOCTL_INVOKE_REQ"},
{SMCINVOKE_IOCTL_ACCEPT_REQ, "SMCINVOKE_IOCTL_ACCEPT_REQ"},
{SMCINVOKE_IOCTL_SERVER_REQ, "SMCINVOKE_IOCTL_SERVER_REQ"},
{SMCINVOKE_IOCTL_ACK_LOCAL_OBJ, "SMCINVOKE_IOCTL_ACK_LOCAL_OBJ"},
{SMCINVOKE_IOCTL_LOG, "SMCINVOKE_IOCTL_LOG"}
), __entry->ret)
);
TRACE_EVENT(smcinvoke_create_bridge,
TP_PROTO(uint64_t shmbridge_handle, uint16_t mem_region_id),
TP_ARGS(shmbridge_handle, mem_region_id),
TP_STRUCT__entry(
__field(uint64_t, shmbridge_handle)
__field(uint16_t, mem_region_id)
),
TP_fast_assign(
__entry->shmbridge_handle = shmbridge_handle;
__entry->mem_region_id = mem_region_id;
),
TP_printk("created shm bridge handle %llu for mem_region_id %u",
__entry->shmbridge_handle, __entry->mem_region_id)
);
TRACE_EVENT(status,
TP_PROTO(const char *func, const char *status),
TP_ARGS(func, status),
TP_STRUCT__entry(
__string(str, func)
__string(str2, status)
),
TP_fast_assign(
__assign_str(str, func);
__assign_str(str2, status);
),
TP_printk("%s status=%s", __get_str(str), __get_str(str2))
);
TRACE_EVENT(process_accept_req_has_response,
TP_PROTO(pid_t pid, pid_t tgid),
TP_ARGS(pid, tgid),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
),
TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
);
TRACE_EVENT(process_accept_req_ret,
TP_PROTO(pid_t pid, pid_t tgid, int ret),
TP_ARGS(pid, tgid, ret),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
__field(int, ret)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
__entry->ret = ret;
),
TP_printk("pid=0x%x tgid=0x%x ret=%d", __entry->pid, __entry->tgid, __entry->ret)
);
TRACE_EVENT(process_accept_req_placed,
TP_PROTO(pid_t pid, pid_t tgid),
TP_ARGS(pid, tgid),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
),
TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
);
TRACE_EVENT(process_invoke_request_from_kernel_client,
TP_PROTO(int fd, struct file *filp, int f_count),
TP_ARGS(fd, filp, f_count),
TP_STRUCT__entry(
__field(int, fd)
__field(struct file*, filp)
__field(int, f_count)
),
TP_fast_assign(
__entry->fd = fd;
__entry->filp = filp;
__entry->f_count = f_count;
),
TP_printk("fd=%d, filp=%p, f_count=%d",
__entry->fd,
__entry->filp,
__entry->f_count)
);
TRACE_EVENT(smcinvoke_release_filp,
TP_PROTO(struct files_struct *files, struct file *filp,
int f_count, uint32_t context_type),
TP_ARGS(files, filp, f_count, context_type),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
__field(uint32_t, context_type)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
__entry->context_type = context_type;
),
TP_printk("files=%p, filp=%p, f_count=%u, cxt_type=%d",
__entry->files,
__entry->filp,
__entry->f_count,
__entry->context_type)
);
TRACE_EVENT(smcinvoke_release_from_kernel_client,
TP_PROTO(struct files_struct *files, struct file *filp, int f_count),
TP_ARGS(files, filp, f_count),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
),
TP_printk("files=%p, filp=%p, f_count=%u",
__entry->files,
__entry->filp,
__entry->f_count)
);
TRACE_EVENT(smcinvoke_release,
TP_PROTO(struct files_struct *files, struct file *filp,
int f_count, void *private_data),
TP_ARGS(files, filp, f_count, private_data),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
__field(void*, private_data)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
__entry->private_data = private_data;
),
TP_printk("files=%p, filp=%p, f_count=%d, private_data=%p",
__entry->files,
__entry->filp,
__entry->f_count,
__entry->private_data)
);
#endif /* _TRACE_SMCINVOKE_H */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace_smcinvoke
/* This part must be outside protection */
#include <trace/define_trace.h>

96
ssg_kernel_headers.py Normal file
View File

@@ -0,0 +1,96 @@
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import filecmp
import os
import re
import subprocess
import sys
def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
if not h.startswith(prefix):
print('error: expected prefix [%s] on header [%s]' % (prefix, h))
return False
out_h = os.path.join(gen_dir, h[len(prefix):])
(out_h_dirname, out_h_basename) = os.path.split(out_h)
env = os.environ.copy()
env["LOC_UNIFDEF"] = unifdef
cmd = ["sh", headers_install, h, out_h]
if verbose:
print('run_headers_install: cmd is %s' % cmd)
result = subprocess.call(cmd, env=env)
if result != 0:
print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
return False
return True
def gen_audio_headers(verbose, gen_dir, headers_install, unifdef, audio_include_uapi):
error_count = 0
for h in audio_include_uapi:
audio_uapi_include_prefix = os.path.join(h.split('/include/uapi/')[0],
'include',
'uapi',
'audio') + os.sep
if not run_headers_install(
verbose, gen_dir, headers_install, unifdef,
audio_uapi_include_prefix, h): error_count += 1
return error_count
def main():
"""Parse command line arguments and perform top level control."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments that apply to every invocation of this script.
parser.add_argument(
'--verbose', action='store_true',
help='Print output that describes the workings of this script.')
parser.add_argument(
'--header_arch', required=True,
help='The arch for which to generate headers.')
parser.add_argument(
'--gen_dir', required=True,
help='Where to place the generated files.')
parser.add_argument(
'--audio_include_uapi', required=True, nargs='*',
help='The list of techpack/*/include/uapi header files.')
parser.add_argument(
'--headers_install', required=True,
help='The headers_install tool to process input headers.')
parser.add_argument(
'--unifdef',
required=True,
help='The unifdef tool used by headers_install.')
args = parser.parse_args()
if args.verbose:
print('header_arch [%s]' % args.header_arch)
print('gen_dir [%s]' % args.gen_dir)
print('audio_include_uapi [%s]' % args.audio_include_uapi)
print('headers_install [%s]' % args.headers_install)
print('unifdef [%s]' % args.unifdef)
return gen_audio_headers(args.verbose, args.gen_dir,
args.headers_install, args.unifdef, args.audio_include_uapi)
if __name__ == '__main__':
sys.exit(main())

1689
tz_log/tz_log.c Normal file

File diff suppressed because it is too large Load Diff