
In case of the fence command setup thread being pre-empted, the fence command can be processed by HW even before the setup thread returns. In that case, the setup thread may be using wrong fence number, causing it resends the HFI packet that has been processed. The consequence could be SMMU fault. Change-Id: I8be9808e993a27a1c7664e68a5382d145a8bc3cc Signed-off-by: George Shen <quic_sqiao@quicinc.com>
1725 line
40 KiB
C
1725 line
40 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
|
*/
|
|
|
|
#include "msm_cvp.h"
|
|
#include "cvp_hfi.h"
|
|
#include "cvp_core_hfi.h"
|
|
#include "msm_cvp_buf.h"
|
|
#include "cvp_comm_def.h"
|
|
#include "cvp_power.h"
|
|
#include "cvp_hfi_api.h"
|
|
static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
|
struct eva_kmd_hfi_packet *in_pkt,
|
|
unsigned int in_offset,
|
|
unsigned int in_buf_num);
|
|
|
|
int msm_cvp_get_session_info(struct msm_cvp_inst *inst, u32 *session)
|
|
{
|
|
int rc = 0;
|
|
struct msm_cvp_inst *s;
|
|
|
|
if (!inst || !inst->core || !session) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
*session = hash32_ptr(inst->session);
|
|
dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, *session);
|
|
|
|
cvp_put_inst(s);
|
|
return rc;
|
|
}
|
|
|
|
|
|
|
|
static bool cvp_msg_pending(struct cvp_session_queue *sq,
|
|
struct cvp_session_msg **msg, u64 *ktid)
|
|
{
|
|
struct cvp_session_msg *mptr = NULL, *dummy;
|
|
bool result = false;
|
|
|
|
if (!sq)
|
|
return false;
|
|
spin_lock(&sq->lock);
|
|
if (sq->state == QUEUE_INIT || sq->state == QUEUE_INVALID) {
|
|
/* The session is being deleted */
|
|
spin_unlock(&sq->lock);
|
|
*msg = NULL;
|
|
return true;
|
|
}
|
|
result = list_empty(&sq->msgs);
|
|
if (!result) {
|
|
mptr = list_first_entry(&sq->msgs,
|
|
struct cvp_session_msg,
|
|
node);
|
|
if (!ktid) {
|
|
if (mptr) {
|
|
list_del_init(&mptr->node);
|
|
sq->msg_count--;
|
|
}
|
|
} else {
|
|
result = true;
|
|
list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
|
|
if (*ktid == mptr->pkt.client_data.kdata) {
|
|
list_del_init(&mptr->node);
|
|
sq->msg_count--;
|
|
result = false;
|
|
break;
|
|
}
|
|
}
|
|
if (result)
|
|
mptr = NULL;
|
|
}
|
|
}
|
|
spin_unlock(&sq->lock);
|
|
*msg = mptr;
|
|
return !result;
|
|
}
|
|
|
|
static int cvp_wait_process_message(struct msm_cvp_inst *inst,
|
|
struct cvp_session_queue *sq, u64 *ktid,
|
|
unsigned long timeout,
|
|
struct eva_kmd_hfi_packet *out)
|
|
{
|
|
struct cvp_session_msg *msg = NULL;
|
|
struct cvp_hfi_msg_session_hdr *hdr;
|
|
int rc = 0;
|
|
|
|
if (wait_event_timeout(sq->wq,
|
|
cvp_msg_pending(sq, &msg, ktid), timeout) == 0) {
|
|
dprintk(CVP_WARN, "session queue wait timeout\n");
|
|
if(inst && inst->core && inst->core->device){
|
|
print_hfi_queue_info(inst->core->device);
|
|
}
|
|
rc = -ETIMEDOUT;
|
|
goto exit;
|
|
}
|
|
|
|
if (msg == NULL) {
|
|
dprintk(CVP_WARN, "%s: queue state %d, msg cnt %d\n", __func__,
|
|
sq->state, sq->msg_count);
|
|
|
|
if (inst->state >= MSM_CVP_CLOSE_DONE ||
|
|
(sq->state != QUEUE_ACTIVE &&
|
|
sq->state != QUEUE_START)) {
|
|
rc = -ECONNRESET;
|
|
goto exit;
|
|
}
|
|
|
|
msm_cvp_comm_kill_session(inst);
|
|
goto exit;
|
|
}
|
|
|
|
if (!out) {
|
|
cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
|
|
goto exit;
|
|
}
|
|
|
|
hdr = (struct cvp_hfi_msg_session_hdr *)&msg->pkt;
|
|
memcpy(out, &msg->pkt, get_msg_size(hdr));
|
|
if (hdr->client_data.kdata >= ARRAY_SIZE(cvp_hfi_defs))
|
|
msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
|
|
cvp_kmem_cache_free(&cvp_driver->msg_cache, msg);
|
|
|
|
exit:
|
|
return rc;
|
|
}
|
|
|
|
static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_hfi_packet *out_pkt)
|
|
{
|
|
unsigned long wait_time;
|
|
struct cvp_session_queue *sq;
|
|
struct msm_cvp_inst *s;
|
|
int rc = 0;
|
|
bool clock_check = false;
|
|
|
|
if (!inst) {
|
|
dprintk(CVP_ERR, "%s invalid session\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
|
|
sq = &inst->session_queue;
|
|
|
|
rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
|
|
|
|
clock_check = check_clock_required(inst, out_pkt);
|
|
if (clock_check)
|
|
cvp_check_clock(inst,
|
|
(struct cvp_hfi_msg_session_hdr_ext *)out_pkt);
|
|
|
|
cvp_put_inst(inst);
|
|
return rc;
|
|
}
|
|
|
|
static int msm_cvp_session_process_hfi(
|
|
struct msm_cvp_inst *inst,
|
|
struct eva_kmd_hfi_packet *in_pkt,
|
|
unsigned int in_offset,
|
|
unsigned int in_buf_num)
|
|
{
|
|
int pkt_idx, rc = 0;
|
|
|
|
unsigned int offset = 0, buf_num = 0, signal;
|
|
struct cvp_session_queue *sq;
|
|
struct msm_cvp_inst *s;
|
|
struct cvp_hfi_cmd_session_hdr *pkt_hdr;
|
|
bool is_config_pkt;
|
|
|
|
|
|
if (!inst || !inst->core || !in_pkt) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
|
|
pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
|
|
if (pkt_idx < 0) {
|
|
dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
|
|
in_pkt->pkt_data[0],
|
|
in_pkt->pkt_data[1]);
|
|
goto exit;
|
|
} else {
|
|
signal = cvp_hfi_defs[pkt_idx].resp;
|
|
is_config_pkt = cvp_hfi_defs[pkt_idx].is_config_pkt;
|
|
}
|
|
|
|
if (is_config_pkt)
|
|
pr_info(CVP_DBG_TAG "inst %pK config %s\n", "sess",
|
|
inst, cvp_hfi_defs[pkt_idx].name);
|
|
|
|
if (signal == HAL_NO_RESP) {
|
|
/* Frame packets are not allowed before session starts*/
|
|
sq = &inst->session_queue;
|
|
spin_lock(&sq->lock);
|
|
if ((sq->state != QUEUE_START && !is_config_pkt) ||
|
|
(sq->state >= QUEUE_INVALID)) {
|
|
/*
|
|
* A init packet is allowed in case of
|
|
* QUEUE_ACTIVE, QUEUE_START, QUEUE_STOP
|
|
* A frame packet is only allowed in case of
|
|
* QUEUE_START
|
|
*/
|
|
spin_unlock(&sq->lock);
|
|
dprintk(CVP_ERR, "%s: invalid queue state %d\n",
|
|
__func__, sq->state);
|
|
rc = -EINVAL;
|
|
goto exit;
|
|
}
|
|
spin_unlock(&sq->lock);
|
|
}
|
|
|
|
if (in_offset && in_buf_num) {
|
|
offset = in_offset;
|
|
buf_num = in_buf_num;
|
|
}
|
|
if (!is_buf_param_valid(buf_num, offset)) {
|
|
dprintk(CVP_ERR, "Incorrect buffer num and offset in cmd\n");
|
|
rc = -EINVAL;
|
|
goto exit;
|
|
}
|
|
|
|
rc = msm_cvp_proc_oob(inst, in_pkt);
|
|
if (rc) {
|
|
dprintk(CVP_ERR, "%s: failed to process OOB buffer", __func__);
|
|
goto exit;
|
|
}
|
|
|
|
rc = cvp_enqueue_pkt(inst, in_pkt, offset, buf_num);
|
|
if (rc) {
|
|
pkt_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
|
dprintk(CVP_ERR, "Failed to enqueue pkt, inst %pK "
|
|
"pkt_type %08x ktid %llu transaction_id %u\n",
|
|
inst, pkt_hdr->packet_type,
|
|
pkt_hdr->client_data.kdata,
|
|
pkt_hdr->client_data.transaction_id);
|
|
}
|
|
|
|
exit:
|
|
cvp_put_inst(inst);
|
|
return rc;
|
|
}
|
|
|
|
static bool cvp_fence_wait(struct cvp_fence_queue *q,
|
|
struct cvp_fence_command **fence,
|
|
enum queue_state *state)
|
|
{
|
|
struct cvp_fence_command *f;
|
|
|
|
if (!q)
|
|
return false;
|
|
|
|
*fence = NULL;
|
|
mutex_lock(&q->lock);
|
|
*state = q->state;
|
|
if (*state != QUEUE_START) {
|
|
mutex_unlock(&q->lock);
|
|
return true;
|
|
}
|
|
|
|
if (list_empty(&q->wait_list)) {
|
|
mutex_unlock(&q->lock);
|
|
return false;
|
|
}
|
|
|
|
f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
|
|
list_del_init(&f->list);
|
|
list_add_tail(&f->list, &q->sched_list);
|
|
|
|
mutex_unlock(&q->lock);
|
|
*fence = f;
|
|
|
|
return true;
|
|
}
|
|
|
|
static int cvp_fence_proc(struct msm_cvp_inst *inst,
|
|
struct cvp_fence_command *fc,
|
|
struct cvp_hfi_cmd_session_hdr *pkt)
|
|
{
|
|
int rc = 0;
|
|
unsigned long timeout;
|
|
u64 ktid;
|
|
int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
|
|
struct cvp_hfi_device *hdev;
|
|
struct cvp_session_queue *sq;
|
|
u32 hfi_err = HFI_ERR_NONE;
|
|
struct cvp_hfi_msg_session_hdr_ext hdr;
|
|
bool clock_check = false;
|
|
|
|
dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
|
|
|
|
if (!inst || !inst->core)
|
|
return -EINVAL;
|
|
|
|
hdev = inst->core->device;
|
|
sq = &inst->session_queue_fence;
|
|
ktid = pkt->client_data.kdata;
|
|
|
|
rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_INPUT_SYNX,
|
|
fc, &synx_state);
|
|
if (rc) {
|
|
msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
|
|
goto exit;
|
|
}
|
|
|
|
rc = call_hfi_op(hdev, session_send, (void *)inst->session,
|
|
(struct eva_kmd_hfi_packet *)pkt);
|
|
if (rc) {
|
|
dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
|
|
current->comm, __func__, pkt->size, pkt->packet_type);
|
|
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
|
goto exit;
|
|
}
|
|
|
|
timeout = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
|
|
rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
|
|
(struct eva_kmd_hfi_packet *)&hdr);
|
|
|
|
/* Only FD support dcvs at certain FW */
|
|
clock_check = check_clock_required(inst,
|
|
(struct eva_kmd_hfi_packet *)&hdr);
|
|
|
|
hfi_err = hdr.error_type;
|
|
if (rc) {
|
|
dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
|
|
current->comm, __func__, rc);
|
|
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
|
goto exit;
|
|
}
|
|
if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
|
|
dprintk(CVP_SYNX, "%s %s: cvp_wait_process_message flushed\n",
|
|
current->comm, __func__);
|
|
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
|
} else if (hfi_err == HFI_ERR_SESSION_STREAM_CORRUPT) {
|
|
dprintk(CVP_INFO, "%s %s: cvp_wait_process_msg non-fatal %d\n",
|
|
current->comm, __func__, hfi_err);
|
|
synx_state = SYNX_STATE_SIGNALED_SUCCESS;
|
|
} else if (hfi_err != HFI_ERR_NONE) {
|
|
dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi err %d\n",
|
|
current->comm, __func__, hfi_err);
|
|
synx_state = SYNX_STATE_SIGNALED_CANCEL;
|
|
}
|
|
|
|
exit:
|
|
rc = inst->core->synx_ftbl->cvp_synx_ops(inst, CVP_OUTPUT_SYNX,
|
|
fc, &synx_state);
|
|
if (clock_check)
|
|
cvp_check_clock(inst,
|
|
(struct cvp_hfi_msg_session_hdr_ext *)&hdr);
|
|
return rc;
|
|
}
|
|
|
|
static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
|
|
{
|
|
struct cvp_fence_command *fcmd;
|
|
int alloc_size = sizeof(struct cvp_hfi_msg_session_hdr_ext);
|
|
|
|
fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
|
|
if (!fcmd)
|
|
return -ENOMEM;
|
|
|
|
alloc_size = (alloc_size >= size) ? alloc_size : size;
|
|
fcmd->pkt = kzalloc(alloc_size, GFP_KERNEL);
|
|
if (!fcmd->pkt) {
|
|
kfree(fcmd);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
*f = fcmd;
|
|
return 0;
|
|
}
|
|
|
|
static void cvp_free_fence_data(struct cvp_fence_command *f)
|
|
{
|
|
kfree(f->pkt);
|
|
f->pkt = NULL;
|
|
kfree(f);
|
|
f = NULL;
|
|
}
|
|
|
|
static int cvp_fence_thread(void *data)
|
|
{
|
|
int rc = 0, num_fences;
|
|
struct msm_cvp_inst *inst;
|
|
struct cvp_fence_queue *q;
|
|
enum queue_state state;
|
|
struct cvp_fence_command *f;
|
|
struct cvp_hfi_cmd_session_hdr *pkt;
|
|
u32 *synx;
|
|
u64 ktid = 0;
|
|
|
|
dprintk(CVP_SYNX, "Enter %s\n", current->comm);
|
|
|
|
inst = (struct msm_cvp_inst *)data;
|
|
if (!inst || !inst->core || !inst->core->device) {
|
|
dprintk(CVP_ERR, "%s invalid inst %pK\n", current->comm, inst);
|
|
rc = -EINVAL;
|
|
goto exit;
|
|
}
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
wait:
|
|
dprintk(CVP_SYNX, "%s starts wait\n", current->comm);
|
|
|
|
f = NULL;
|
|
wait_event_interruptible(q->wq, cvp_fence_wait(q, &f, &state));
|
|
if (state != QUEUE_START)
|
|
goto exit;
|
|
|
|
if (!f)
|
|
goto wait;
|
|
|
|
pkt = f->pkt;
|
|
synx = (u32 *)f->synx;
|
|
|
|
num_fences = f->num_fences - f->output_index;
|
|
/*
|
|
* If there is output fence, go through fence path
|
|
* Otherwise, go through non-fenced path
|
|
*/
|
|
if (num_fences)
|
|
ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
|
|
|
|
dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
|
|
current->comm, pkt->packet_type, ktid, f->frame_id);
|
|
|
|
rc = cvp_fence_proc(inst, f, pkt);
|
|
|
|
mutex_lock(&q->lock);
|
|
inst->core->synx_ftbl->cvp_release_synx(inst, f);
|
|
list_del_init(&f->list);
|
|
state = q->state;
|
|
mutex_unlock(&q->lock);
|
|
|
|
dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
|
|
current->comm, pkt->packet_type, ktid, f->frame_id, rc);
|
|
|
|
cvp_free_fence_data(f);
|
|
|
|
if (rc && state != QUEUE_START)
|
|
goto exit;
|
|
|
|
goto wait;
|
|
|
|
exit:
|
|
dprintk(CVP_SYNX, "%s exit\n", current->comm);
|
|
cvp_put_inst(inst);
|
|
return rc;
|
|
}
|
|
|
|
static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
int rc = 0;
|
|
int idx;
|
|
struct eva_kmd_hfi_fence_packet *fence_pkt;
|
|
struct eva_kmd_hfi_synx_packet *synx_pkt;
|
|
struct eva_kmd_fence_ctrl *kfc;
|
|
struct cvp_hfi_cmd_session_hdr *pkt;
|
|
unsigned int offset = 0, buf_num = 0, in_offset, in_buf_num;
|
|
struct msm_cvp_inst *s;
|
|
struct cvp_fence_command *f;
|
|
struct cvp_fence_queue *q;
|
|
u32 *fence;
|
|
enum op_mode mode;
|
|
bool is_config_pkt;
|
|
|
|
if (!inst || !inst->core || !arg || !inst->core->device) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
mutex_lock(&q->lock);
|
|
mode = q->mode;
|
|
mutex_unlock(&q->lock);
|
|
|
|
if (mode == OP_DRAINING) {
|
|
dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
|
|
rc = -EBUSY;
|
|
goto exit;
|
|
}
|
|
|
|
in_offset = arg->buf_offset;
|
|
in_buf_num = arg->buf_num;
|
|
|
|
fence_pkt = &arg->data.hfi_fence_pkt;
|
|
pkt = (struct cvp_hfi_cmd_session_hdr *)&fence_pkt->pkt_data;
|
|
idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)pkt);
|
|
|
|
if (idx < 0 ||
|
|
(pkt->size > MAX_HFI_FENCE_OFFSET * sizeof(unsigned int))) {
|
|
dprintk(CVP_ERR, "%s incorrect packet %d %#x\n", __func__,
|
|
pkt->size, pkt->packet_type);
|
|
goto exit;
|
|
} else {
|
|
is_config_pkt = cvp_hfi_defs[idx].is_config_pkt;
|
|
}
|
|
|
|
if (in_offset && in_buf_num) {
|
|
offset = in_offset;
|
|
buf_num = in_buf_num;
|
|
}
|
|
|
|
if (!is_buf_param_valid(buf_num, offset)) {
|
|
dprintk(CVP_ERR, "Incorrect buf num and offset in cmd\n");
|
|
goto exit;
|
|
}
|
|
|
|
if (is_config_pkt)
|
|
pr_info(CVP_DBG_TAG "inst %pK config %s\n",
|
|
"pkt", inst, cvp_hfi_defs[idx].name);
|
|
|
|
rc = msm_cvp_map_frame(inst, (struct eva_kmd_hfi_packet *)pkt, offset,
|
|
buf_num);
|
|
if (rc)
|
|
goto exit;
|
|
|
|
rc = cvp_alloc_fence_data(&f, pkt->size);
|
|
if (rc)
|
|
goto exit;
|
|
|
|
f->type = cvp_hfi_defs[idx].type;
|
|
f->mode = OP_NORMAL;
|
|
|
|
synx_pkt = &arg->data.hfi_synx_pkt;
|
|
if (synx_pkt->fence_data[0] != 0xFEEDFACE) {
|
|
dprintk(CVP_ERR, "%s deprecated synx path\n", __func__);
|
|
cvp_free_fence_data(f);
|
|
msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
|
|
goto exit;
|
|
} else {
|
|
kfc = &synx_pkt->fc;
|
|
fence = (u32 *)&kfc->fences;
|
|
f->frame_id = kfc->frame_id;
|
|
f->signature = 0xFEEDFACE;
|
|
f->num_fences = kfc->num_fences;
|
|
f->output_index = kfc->output_index;
|
|
}
|
|
|
|
|
|
dprintk(CVP_SYNX, "%s: frameID %llu ktid %llu\n",
|
|
__func__, f->frame_id, pkt->client_data.kdata);
|
|
|
|
memcpy(f->pkt, pkt, pkt->size);
|
|
|
|
f->pkt->client_data.kdata |= FENCE_BIT;
|
|
|
|
rc = inst->core->synx_ftbl->cvp_import_synx(inst, f, fence);
|
|
if (rc) {
|
|
cvp_free_fence_data(f);
|
|
goto exit;
|
|
}
|
|
|
|
mutex_lock(&q->lock);
|
|
list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
|
|
mutex_unlock(&q->lock);
|
|
|
|
wake_up(&inst->fence_cmd_queue.wq);
|
|
|
|
exit:
|
|
cvp_put_inst(s);
|
|
return rc;
|
|
}
|
|
|
|
|
|
static int cvp_populate_fences( struct eva_kmd_hfi_packet *in_pkt,
|
|
unsigned int offset, unsigned int num, struct msm_cvp_inst *inst)
|
|
{
|
|
#ifdef CVP_CONFIG_SYNX_V2
|
|
u32 i, buf_offset, fence_cnt;
|
|
struct eva_kmd_fence fences[MAX_HFI_FENCE_SIZE];
|
|
struct cvp_fence_command *f;
|
|
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
|
struct cvp_fence_queue *q;
|
|
enum op_mode mode;
|
|
struct cvp_buf_type *buf;
|
|
|
|
int rc = 0;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
mutex_lock(&q->lock);
|
|
mode = q->mode;
|
|
mutex_unlock(&q->lock);
|
|
|
|
if (mode == OP_DRAINING) {
|
|
dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
|
|
rc = -EBUSY;
|
|
goto exit;
|
|
}
|
|
|
|
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
|
rc = cvp_alloc_fence_data((&f), cmd_hdr->size);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,"%s: Failed to alloc fence data", __func__);
|
|
goto exit;
|
|
}
|
|
|
|
f->type = cmd_hdr->packet_type;
|
|
f->mode = OP_NORMAL;
|
|
f->signature = 0xFEEDFACE;
|
|
f->num_fences = 0;
|
|
f->output_index = 0;
|
|
buf_offset = offset;
|
|
|
|
if (!cvp_kernel_fence_enabled) {
|
|
for (i = 0; i < num; i++) {
|
|
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[buf_offset];
|
|
buf_offset += sizeof(*buf) >> 2;
|
|
|
|
if (buf->input_handle || buf->output_handle) {
|
|
f->num_fences++;
|
|
if (buf->input_handle)
|
|
f->output_index++;
|
|
}
|
|
}
|
|
f->signature = 0xB0BABABE;
|
|
if (f->num_fences)
|
|
goto fence_cmd_queue;
|
|
|
|
goto free_exit;
|
|
}
|
|
|
|
/* First pass to find INPUT synx handles */
|
|
for (i = 0; i < num; i++) {
|
|
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[buf_offset];
|
|
buf_offset += sizeof(*buf) >> 2;
|
|
|
|
if (buf->input_handle) {
|
|
/* Check fence_type? */
|
|
fences[f->num_fences].h_synx = buf->input_handle;
|
|
f->num_fences++;
|
|
buf->fence_type &= ~INPUT_FENCE_BITMASK;
|
|
buf->input_handle = 0;
|
|
}
|
|
}
|
|
f->output_index = f->num_fences;
|
|
|
|
dprintk(CVP_SYNX, "%s:Input Fence passed - Number of Fences is %d\n",
|
|
__func__, f->num_fences);
|
|
|
|
/*
|
|
* Second pass to find OUTPUT synx handle
|
|
* If no of fences is 0 dont execute the below portion until line 911, return 0
|
|
*/
|
|
buf_offset = offset;
|
|
for (i = 0; i < num; i++) {
|
|
buf = (struct cvp_buf_type*)&in_pkt->pkt_data[buf_offset];
|
|
buf_offset += sizeof(*buf) >> 2;
|
|
|
|
if (buf->output_handle) {
|
|
/* Check fence_type? */
|
|
fences[f->num_fences].h_synx = buf->output_handle;
|
|
f->num_fences++;
|
|
buf->fence_type &= ~OUTPUT_FENCE_BITMASK;
|
|
buf->output_handle = 0;
|
|
}
|
|
}
|
|
dprintk(CVP_SYNX, "%s:Output Fence passed - Number of Fences is %d\n",
|
|
__func__, f->num_fences);
|
|
|
|
if (f->num_fences == 0)
|
|
goto free_exit;
|
|
|
|
rc = inst->core->synx_ftbl->cvp_import_synx(inst, f, (u32*)fences);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,"%s: Failed to import fences", __func__);
|
|
goto free_exit;
|
|
}
|
|
|
|
fence_cmd_queue:
|
|
fence_cnt = f->num_fences;
|
|
memcpy(f->pkt, cmd_hdr, cmd_hdr->size);
|
|
f->pkt->client_data.kdata |= FENCE_BIT;
|
|
|
|
mutex_lock(&q->lock);
|
|
list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
|
|
mutex_unlock(&q->lock);
|
|
|
|
wake_up(&inst->fence_cmd_queue.wq);
|
|
|
|
return fence_cnt;
|
|
|
|
free_exit:
|
|
cvp_free_fence_data(f);
|
|
exit:
|
|
#endif /* CVP_CONFIG_SYNX_V2 */
|
|
return rc;
|
|
}
|
|
|
|
|
|
static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
|
struct eva_kmd_hfi_packet *in_pkt,
|
|
unsigned int in_offset,
|
|
unsigned int in_buf_num)
|
|
{
|
|
struct cvp_hfi_device *hdev;
|
|
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
|
int pkt_type, rc = 0;
|
|
enum buf_map_type map_type;
|
|
|
|
hdev = inst->core->device;
|
|
|
|
pkt_type = in_pkt->pkt_data[1];
|
|
map_type = cvp_find_map_type(pkt_type);
|
|
|
|
cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
|
|
/* The kdata will be overriden by transaction ID if the cmd has buf */
|
|
cmd_hdr->client_data.kdata = 0;
|
|
|
|
if (map_type == MAP_PERSIST)
|
|
rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
|
else if (map_type == UNMAP_PERSIST)
|
|
rc = msm_cvp_mark_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
|
else
|
|
rc = msm_cvp_map_frame(inst, in_pkt, in_offset, in_buf_num);
|
|
|
|
if (rc)
|
|
return rc;
|
|
|
|
rc = cvp_populate_fences(in_pkt, in_offset, in_buf_num, inst);
|
|
if (rc == 0) {
|
|
rc = call_hfi_op(hdev, session_send, (void*)inst->session,
|
|
in_pkt);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,"%s: Failed in call_hfi_op %d, %x\n",
|
|
__func__, in_pkt->pkt_data[0],
|
|
in_pkt->pkt_data[1]);
|
|
if (map_type == MAP_FRAME)
|
|
msm_cvp_unmap_frame(inst,
|
|
cmd_hdr->client_data.kdata);
|
|
}
|
|
} else if (rc > 0) {
|
|
dprintk(CVP_SYNX, "Going fenced path\n");
|
|
rc = 0;
|
|
} else {
|
|
dprintk(CVP_ERR,"%s: Failed to populate fences\n",
|
|
__func__);
|
|
if (map_type == MAP_FRAME)
|
|
msm_cvp_unmap_frame(inst, cmd_hdr->client_data.kdata);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static inline int div_by_1dot5(unsigned int a)
|
|
{
|
|
unsigned long i = a << 1;
|
|
|
|
return (unsigned int) i/3;
|
|
}
|
|
|
|
int msm_cvp_session_delete(struct msm_cvp_inst *inst)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int msm_cvp_session_create(struct msm_cvp_inst *inst)
|
|
{
|
|
int rc = 0;
|
|
struct cvp_session_queue *sq;
|
|
|
|
if (!inst || !inst->core)
|
|
return -EINVAL;
|
|
|
|
if (inst->state >= MSM_CVP_CLOSE_DONE)
|
|
return -ECONNRESET;
|
|
|
|
if (inst->state != MSM_CVP_CORE_INIT_DONE ||
|
|
inst->state > MSM_CVP_OPEN_DONE) {
|
|
dprintk(CVP_ERR,
|
|
"%s Incorrect CVP state %d to create session\n",
|
|
__func__, inst->state);
|
|
return -EINVAL;
|
|
}
|
|
|
|
rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,
|
|
"Failed to move instance to open done state\n");
|
|
goto fail_init;
|
|
}
|
|
|
|
rc = cvp_comm_set_arp_buffers(inst);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,
|
|
"Failed to set ARP buffers\n");
|
|
goto fail_init;
|
|
}
|
|
|
|
inst->core->synx_ftbl->cvp_sess_init_synx(inst);
|
|
sq = &inst->session_queue;
|
|
spin_lock(&sq->lock);
|
|
sq->state = QUEUE_ACTIVE;
|
|
spin_unlock(&sq->lock);
|
|
|
|
fail_init:
|
|
return rc;
|
|
}
|
|
|
|
static int session_state_check_init(struct msm_cvp_inst *inst)
|
|
{
|
|
mutex_lock(&inst->lock);
|
|
if (inst->state == MSM_CVP_OPEN || inst->state == MSM_CVP_OPEN_DONE) {
|
|
mutex_unlock(&inst->lock);
|
|
return 0;
|
|
}
|
|
mutex_unlock(&inst->lock);
|
|
|
|
return msm_cvp_session_create(inst);
|
|
}
|
|
|
|
static int cvp_fence_thread_start(struct msm_cvp_inst *inst)
|
|
{
|
|
u32 tnum = 0;
|
|
u32 i = 0;
|
|
int rc = 0;
|
|
char tname[16];
|
|
struct task_struct *thread;
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_session_queue *sq;
|
|
|
|
if (!inst->prop.fthread_nr)
|
|
return 0;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
mutex_lock(&q->lock);
|
|
q->state = QUEUE_START;
|
|
mutex_unlock(&q->lock);
|
|
|
|
for (i = 0; i < inst->prop.fthread_nr; ++i) {
|
|
if (!cvp_get_inst_validate(inst->core, inst)) {
|
|
rc = -ECONNRESET;
|
|
goto exit;
|
|
}
|
|
|
|
snprintf(tname, sizeof(tname), "fthread_%d", tnum++);
|
|
thread = kthread_run(cvp_fence_thread, inst, tname);
|
|
if (!thread) {
|
|
dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
|
|
rc = -ECHILD;
|
|
goto exit;
|
|
}
|
|
}
|
|
|
|
sq = &inst->session_queue_fence;
|
|
spin_lock(&sq->lock);
|
|
sq->state = QUEUE_START;
|
|
spin_unlock(&sq->lock);
|
|
|
|
exit:
|
|
if (rc) {
|
|
mutex_lock(&q->lock);
|
|
q->state = QUEUE_STOP;
|
|
mutex_unlock(&q->lock);
|
|
wake_up_all(&q->wq);
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
static int cvp_fence_thread_stop(struct msm_cvp_inst *inst)
|
|
{
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_session_queue *sq;
|
|
|
|
if (!inst->prop.fthread_nr)
|
|
return 0;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
mutex_lock(&q->lock);
|
|
q->state = QUEUE_STOP;
|
|
mutex_unlock(&q->lock);
|
|
|
|
sq = &inst->session_queue_fence;
|
|
spin_lock(&sq->lock);
|
|
sq->state = QUEUE_STOP;
|
|
spin_unlock(&sq->lock);
|
|
|
|
wake_up_all(&q->wq);
|
|
wake_up_all(&sq->wq);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int msm_cvp_session_start(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
struct cvp_session_queue *sq;
|
|
struct cvp_hfi_device *hdev;
|
|
int rc;
|
|
enum queue_state old_state;
|
|
|
|
if (!inst || !inst->core) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
sq = &inst->session_queue;
|
|
spin_lock(&sq->lock);
|
|
if (sq->msg_count) {
|
|
dprintk(CVP_ERR, "session start failed queue not empty%d\n",
|
|
sq->msg_count);
|
|
spin_unlock(&sq->lock);
|
|
rc = -EINVAL;
|
|
goto exit;
|
|
}
|
|
old_state = sq->state;
|
|
sq->state = QUEUE_START;
|
|
spin_unlock(&sq->lock);
|
|
|
|
hdev = inst->core->device;
|
|
if (inst->prop.type == HFI_SESSION_FD
|
|
|| inst->prop.type == HFI_SESSION_DMM) {
|
|
spin_lock(&inst->core->resources.pm_qos.lock);
|
|
inst->core->resources.pm_qos.off_vote_cnt++;
|
|
spin_unlock(&inst->core->resources.pm_qos.lock);
|
|
call_hfi_op(hdev, pm_qos_update, hdev->hfi_device_data);
|
|
}
|
|
/*
|
|
* cvp_fence_thread_start will increment reference to instance.
|
|
* It guarantees the EVA session won't be deleted. Use of session
|
|
* functions, such as session_start requires the session to be valid.
|
|
*/
|
|
rc = cvp_fence_thread_start(inst);
|
|
if (rc)
|
|
goto restore_state;
|
|
|
|
/* Send SESSION_START command */
|
|
rc = call_hfi_op(hdev, session_start, (void *)inst->session);
|
|
if (rc) {
|
|
dprintk(CVP_WARN, "%s: session start failed rc %d\n",
|
|
__func__, rc);
|
|
goto stop_thread;
|
|
}
|
|
|
|
/* Wait for FW response */
|
|
rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_START_DONE);
|
|
if (rc) {
|
|
dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
|
|
__func__, rc);
|
|
goto stop_thread;
|
|
}
|
|
|
|
dprintk(CVP_SESS, "session %llx (%#x) started\n", inst, hash32_ptr(inst->session));
|
|
|
|
return 0;
|
|
|
|
stop_thread:
|
|
cvp_fence_thread_stop(inst);
|
|
restore_state:
|
|
spin_lock(&sq->lock);
|
|
sq->state = old_state;
|
|
spin_unlock(&sq->lock);
|
|
exit:
|
|
return rc;
|
|
}
|
|
|
|
int msm_cvp_session_stop(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
struct cvp_session_queue *sq;
|
|
struct eva_kmd_session_control *sc = NULL;
|
|
struct msm_cvp_inst *s;
|
|
struct cvp_hfi_device *hdev;
|
|
int rc;
|
|
|
|
if (!inst || !inst->core) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (arg)
|
|
sc = &arg->data.session_ctrl;
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
sq = &inst->session_queue;
|
|
|
|
spin_lock(&sq->lock);
|
|
if (sq->msg_count) {
|
|
dprintk(CVP_ERR, "session stop incorrect: queue not empty%d\n",
|
|
sq->msg_count);
|
|
if (sc)
|
|
sc->ctrl_data[0] = sq->msg_count;
|
|
spin_unlock(&sq->lock);
|
|
rc = -EUCLEAN;
|
|
goto exit;
|
|
}
|
|
sq->state = QUEUE_STOP;
|
|
|
|
dprintk(CVP_SESS, "Stop session: %pK session_id = %d\n",
|
|
inst, hash32_ptr(inst->session));
|
|
spin_unlock(&sq->lock);
|
|
|
|
hdev = inst->core->device;
|
|
/* Send SESSION_STOP command */
|
|
rc = call_hfi_op(hdev, session_stop, (void *)inst->session);
|
|
if (rc) {
|
|
dprintk(CVP_WARN, "%s: session stop failed rc %d\n",
|
|
__func__, rc);
|
|
goto stop_thread;
|
|
}
|
|
|
|
/* Wait for FW response */
|
|
rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_STOP_DONE);
|
|
if (rc) {
|
|
dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
|
|
__func__, rc);
|
|
goto stop_thread;
|
|
}
|
|
|
|
stop_thread:
|
|
wake_up_all(&inst->session_queue.wq);
|
|
|
|
cvp_fence_thread_stop(inst);
|
|
exit:
|
|
cvp_put_inst(s);
|
|
return rc;
|
|
}
|
|
|
|
int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst)
|
|
{
|
|
struct cvp_session_queue *sq;
|
|
|
|
sq = &inst->session_queue;
|
|
|
|
spin_lock(&sq->lock);
|
|
|
|
if (sq->state == QUEUE_STOP) {
|
|
spin_unlock(&sq->lock);
|
|
return 0;
|
|
}
|
|
|
|
sq->state = QUEUE_STOP;
|
|
|
|
dprintk(CVP_SESS, "Stop session queue: %pK session_id = %d\n",
|
|
inst, hash32_ptr(inst->session));
|
|
spin_unlock(&sq->lock);
|
|
|
|
wake_up_all(&inst->session_queue.wq);
|
|
|
|
return cvp_fence_thread_stop(inst);
|
|
}
|
|
|
|
static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
struct eva_kmd_session_control *ctrl = &arg->data.session_ctrl;
|
|
int rc = 0;
|
|
unsigned int ctrl_type;
|
|
|
|
ctrl_type = ctrl->ctrl_type;
|
|
|
|
if (!inst && ctrl_type != SESSION_CREATE) {
|
|
dprintk(CVP_ERR, "%s invalid session\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
switch (ctrl_type) {
|
|
case SESSION_STOP:
|
|
rc = msm_cvp_session_stop(inst, arg);
|
|
break;
|
|
case SESSION_START:
|
|
rc = msm_cvp_session_start(inst, arg);
|
|
break;
|
|
case SESSION_CREATE:
|
|
rc = msm_cvp_session_create(inst);
|
|
break;
|
|
case SESSION_DELETE:
|
|
rc = msm_cvp_session_delete(inst);
|
|
break;
|
|
case SESSION_INFO:
|
|
default:
|
|
dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
|
|
__func__, ctrl->ctrl_type);
|
|
rc = -EINVAL;
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
struct eva_kmd_sys_properties *props = &arg->data.sys_properties;
|
|
struct cvp_hfi_device *hdev;
|
|
struct iris_hfi_device *hfi;
|
|
struct cvp_session_prop *session_prop;
|
|
int i, rc = 0;
|
|
|
|
if (!inst || !inst->core || !inst->core->device) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
hdev = inst->core->device;
|
|
hfi = hdev->hfi_device_data;
|
|
|
|
if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
|
|
dprintk(CVP_ERR, "Too many properties %d to get\n",
|
|
props->prop_num);
|
|
return -E2BIG;
|
|
}
|
|
|
|
session_prop = &inst->prop;
|
|
|
|
for (i = 0; i < props->prop_num; i++) {
|
|
switch (props->prop_data[i].prop_type) {
|
|
case EVA_KMD_PROP_HFI_VERSION:
|
|
{
|
|
props->prop_data[i].data = hfi->version;
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_SESSION_DUMPOFFSET:
|
|
{
|
|
props->prop_data[i].data =
|
|
session_prop->dump_offset;
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_SESSION_DUMPSIZE:
|
|
{
|
|
props->prop_data[i].data =
|
|
session_prop->dump_size;
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_FDU:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_FDU);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_ICA:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_ICA);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_OD:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_OD);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_MPU:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_MPU);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_VADL:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_VADL);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_TOF:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_TOF);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_RGE:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_RGE);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_XRA:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_XRA);
|
|
break;
|
|
}
|
|
case EVA_KMD_PROP_PWR_LSR:
|
|
{
|
|
props->prop_data[i].data =
|
|
msm_cvp_get_hw_aggregate_cycles(HFI_HW_LSR);
|
|
break;
|
|
}
|
|
default:
|
|
dprintk(CVP_ERR, "unrecognized sys property %d\n",
|
|
props->prop_data[i].prop_type);
|
|
rc = -EFAULT;
|
|
}
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
|
|
struct eva_kmd_arg *arg)
|
|
{
|
|
struct eva_kmd_sys_properties *props = &arg->data.sys_properties;
|
|
struct eva_kmd_sys_property *prop_array;
|
|
struct cvp_session_prop *session_prop;
|
|
int i, rc = 0;
|
|
|
|
if (!inst) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (props->prop_num > MAX_KMD_PROP_NUM_PER_PACKET) {
|
|
dprintk(CVP_ERR, "Too many properties %d to set\n",
|
|
props->prop_num);
|
|
return -E2BIG;
|
|
}
|
|
|
|
prop_array = &arg->data.sys_properties.prop_data[0];
|
|
session_prop = &inst->prop;
|
|
|
|
for (i = 0; i < props->prop_num; i++) {
|
|
switch (prop_array[i].prop_type) {
|
|
case EVA_KMD_PROP_SESSION_TYPE:
|
|
session_prop->type = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_KERNELMASK:
|
|
session_prop->kernel_mask = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_PRIORITY:
|
|
session_prop->priority = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_SECURITY:
|
|
session_prop->is_secure = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_DSPMASK:
|
|
session_prop->dsp_mask = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FDU:
|
|
session_prop->cycles[HFI_HW_FDU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_ICA:
|
|
session_prop->cycles[HFI_HW_ICA] =
|
|
div_by_1dot5(prop_array[i].data);
|
|
break;
|
|
case EVA_KMD_PROP_PWR_OD:
|
|
session_prop->cycles[HFI_HW_OD] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_MPU:
|
|
session_prop->cycles[HFI_HW_MPU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_VADL:
|
|
session_prop->cycles[HFI_HW_VADL] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_TOF:
|
|
session_prop->cycles[HFI_HW_TOF] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_RGE:
|
|
session_prop->cycles[HFI_HW_RGE] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_XRA:
|
|
session_prop->cycles[HFI_HW_XRA] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_LSR:
|
|
session_prop->cycles[HFI_HW_LSR] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FW:
|
|
session_prop->fw_cycles =
|
|
div_by_1dot5(prop_array[i].data);
|
|
break;
|
|
case EVA_KMD_PROP_PWR_DDR:
|
|
session_prop->ddr_bw = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_SYSCACHE:
|
|
session_prop->ddr_cache = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FDU_OP:
|
|
session_prop->op_cycles[HFI_HW_FDU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_ICA_OP:
|
|
session_prop->op_cycles[HFI_HW_ICA] =
|
|
div_by_1dot5(prop_array[i].data);
|
|
break;
|
|
case EVA_KMD_PROP_PWR_OD_OP:
|
|
session_prop->op_cycles[HFI_HW_OD] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_MPU_OP:
|
|
session_prop->op_cycles[HFI_HW_MPU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_VADL_OP:
|
|
session_prop->op_cycles[HFI_HW_VADL] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_TOF_OP:
|
|
session_prop->op_cycles[HFI_HW_TOF] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_RGE_OP:
|
|
session_prop->op_cycles[HFI_HW_RGE] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_XRA_OP:
|
|
session_prop->op_cycles[HFI_HW_XRA] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_LSR_OP:
|
|
session_prop->op_cycles[HFI_HW_LSR] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FW_OP:
|
|
session_prop->fw_op_cycles =
|
|
div_by_1dot5(prop_array[i].data);
|
|
break;
|
|
case EVA_KMD_PROP_PWR_DDR_OP:
|
|
session_prop->ddr_op_bw = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_SYSCACHE_OP:
|
|
session_prop->ddr_op_cache = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_FDU:
|
|
session_prop->fps[HFI_HW_FDU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_MPU:
|
|
session_prop->fps[HFI_HW_MPU] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_OD:
|
|
session_prop->fps[HFI_HW_OD] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_ICA:
|
|
session_prop->fps[HFI_HW_ICA] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_VADL:
|
|
session_prop->fps[HFI_HW_VADL] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_TOF:
|
|
session_prop->fps[HFI_HW_TOF] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_RGE:
|
|
session_prop->fps[HFI_HW_RGE] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_XRA:
|
|
session_prop->fps[HFI_HW_XRA] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_PWR_FPS_LSR:
|
|
session_prop->fps[HFI_HW_LSR] = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_DUMPOFFSET:
|
|
session_prop->dump_offset = prop_array[i].data;
|
|
break;
|
|
case EVA_KMD_PROP_SESSION_DUMPSIZE:
|
|
session_prop->dump_size = prop_array[i].data;
|
|
break;
|
|
default:
|
|
dprintk(CVP_ERR,
|
|
"unrecognized sys property to set %d\n",
|
|
prop_array[i].prop_type);
|
|
rc = -EFAULT;
|
|
}
|
|
}
|
|
return rc;
|
|
}
|
|
|
|
static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
|
|
{
|
|
unsigned long wait_time;
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_fence_command *f;
|
|
int rc = 0;
|
|
int count = 0, max_count = 0;
|
|
u64 ktid;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
if (!q)
|
|
return -EINVAL;
|
|
|
|
f = list_first_entry(&q->sched_list,
|
|
struct cvp_fence_command,
|
|
list);
|
|
if (!f)
|
|
return rc;
|
|
|
|
mutex_lock(&q->lock);
|
|
list_for_each_entry(f, &q->sched_list, list) {
|
|
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
|
dprintk(CVP_SYNX, "%s: frame %llu %llu is in sched_list\n",
|
|
__func__, ktid, f->frame_id);
|
|
++count;
|
|
}
|
|
mutex_unlock(&q->lock);
|
|
wait_time = count * CVP_MAX_WAIT_TIME * 1000;
|
|
|
|
dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
|
|
__func__, wait_time, count);
|
|
|
|
count = 0;
|
|
max_count = wait_time / 100;
|
|
|
|
retry:
|
|
mutex_lock(&q->lock);
|
|
if (list_empty(&q->sched_list)) {
|
|
mutex_unlock(&q->lock);
|
|
return rc;
|
|
}
|
|
|
|
mutex_unlock(&q->lock);
|
|
usleep_range(100, 200);
|
|
++count;
|
|
if (count < max_count) {
|
|
goto retry;
|
|
} else {
|
|
rc = -ETIMEDOUT;
|
|
dprintk(CVP_ERR, "%s: timed out!\n", __func__);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void cvp_clean_fence_queue(struct msm_cvp_inst *inst, int synx_state)
|
|
{
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_fence_command *f, *d;
|
|
u64 ktid;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
|
|
if (!q)
|
|
return;
|
|
|
|
mutex_lock(&q->lock);
|
|
q->mode = OP_DRAINING;
|
|
|
|
f = list_first_entry(&q->wait_list,
|
|
struct cvp_fence_command,
|
|
list);
|
|
if (!f)
|
|
goto check_sched;
|
|
|
|
list_for_each_entry_safe(f, d, &q->wait_list, list) {
|
|
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
|
|
|
dprintk(CVP_SYNX, "%s: (%#x) flush frame %llu %llu wait_list\n",
|
|
__func__, hash32_ptr(inst->session), ktid, f->frame_id);
|
|
|
|
list_del_init(&f->list);
|
|
msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
|
|
inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_OUTPUT_SYNX,
|
|
f, synx_state);
|
|
inst->core->synx_ftbl->cvp_release_synx(inst, f);
|
|
cvp_free_fence_data(f);
|
|
}
|
|
|
|
check_sched:
|
|
f = list_first_entry(&q->sched_list,
|
|
struct cvp_fence_command,
|
|
list);
|
|
if (!f) {
|
|
mutex_unlock(&q->lock);
|
|
return;
|
|
}
|
|
|
|
list_for_each_entry(f, &q->sched_list, list) {
|
|
ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
|
|
|
|
dprintk(CVP_SYNX, "%s: (%#x)flush frame %llu %llu sched_list\n",
|
|
__func__, hash32_ptr(inst->session), ktid, f->frame_id);
|
|
inst->core->synx_ftbl->cvp_cancel_synx(inst, CVP_INPUT_SYNX,
|
|
f, synx_state);
|
|
}
|
|
|
|
mutex_unlock(&q->lock);
|
|
}
|
|
|
|
int cvp_clean_session_queues(struct msm_cvp_inst *inst)
|
|
{
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_session_queue *sq;
|
|
u32 count = 0, max_retries = 100;
|
|
|
|
q = &inst->fence_cmd_queue;
|
|
mutex_lock(&q->lock);
|
|
if (q->state == QUEUE_START) {
|
|
mutex_unlock(&q->lock);
|
|
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
|
|
} else {
|
|
dprintk(CVP_WARN, "Incorrect fence cmd queue state %d\n",
|
|
q->state);
|
|
mutex_unlock(&q->lock);
|
|
}
|
|
|
|
cvp_fence_thread_stop(inst);
|
|
|
|
/* Waiting for all output synx sent */
|
|
retry:
|
|
mutex_lock(&q->lock);
|
|
if (list_empty(&q->sched_list)) {
|
|
mutex_unlock(&q->lock);
|
|
return 0;
|
|
}
|
|
mutex_unlock(&q->lock);
|
|
usleep_range(500, 1000);
|
|
if (++count > max_retries)
|
|
return -EBUSY;
|
|
|
|
goto retry;
|
|
|
|
sq = &inst->session_queue_fence;
|
|
spin_lock(&sq->lock);
|
|
sq->state = QUEUE_INVALID;
|
|
spin_unlock(&sq->lock);
|
|
}
|
|
|
|
static int cvp_flush_all(struct msm_cvp_inst *inst)
|
|
{
|
|
int rc = 0;
|
|
struct msm_cvp_inst *s;
|
|
struct cvp_fence_queue *q;
|
|
struct cvp_hfi_device *hdev;
|
|
|
|
if (!inst || !inst->core) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
s = cvp_get_inst_validate(inst->core, inst);
|
|
if (!s)
|
|
return -ECONNRESET;
|
|
|
|
dprintk(CVP_SESS, "session %llx (%#x)flush all starts\n",
|
|
inst, hash32_ptr(inst->session));
|
|
q = &inst->fence_cmd_queue;
|
|
hdev = inst->core->device;
|
|
|
|
cvp_clean_fence_queue(inst, SYNX_STATE_SIGNALED_CANCEL);
|
|
|
|
dprintk(CVP_SESS, "%s: (%#x) send flush to fw\n",
|
|
__func__, hash32_ptr(inst->session));
|
|
|
|
/* Send flush to FW */
|
|
rc = call_hfi_op(hdev, session_flush, (void *)inst->session);
|
|
if (rc) {
|
|
dprintk(CVP_WARN, "%s: continue flush without fw. rc %d\n",
|
|
__func__, rc);
|
|
goto exit;
|
|
}
|
|
|
|
/* Wait for FW response */
|
|
rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_FLUSH_DONE);
|
|
if (rc)
|
|
dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
|
|
__func__, rc);
|
|
|
|
dprintk(CVP_SESS, "%s: (%#x) received flush from fw\n",
|
|
__func__, hash32_ptr(inst->session));
|
|
|
|
exit:
|
|
rc = cvp_drain_fence_sched_list(inst);
|
|
|
|
mutex_lock(&q->lock);
|
|
q->mode = OP_NORMAL;
|
|
mutex_unlock(&q->lock);
|
|
|
|
cvp_put_inst(s);
|
|
return rc;
|
|
}
|
|
|
|
int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct eva_kmd_arg *arg)
|
|
{
|
|
int rc = 0;
|
|
|
|
if (!inst || !arg) {
|
|
dprintk(CVP_ERR, "%s: invalid args\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
dprintk(CVP_HFI, "%s: arg->type = %x", __func__, arg->type);
|
|
|
|
if (arg->type != EVA_KMD_SESSION_CONTROL &&
|
|
arg->type != EVA_KMD_SET_SYS_PROPERTY &&
|
|
arg->type != EVA_KMD_GET_SYS_PROPERTY) {
|
|
|
|
rc = session_state_check_init(inst);
|
|
if (rc) {
|
|
dprintk(CVP_ERR,
|
|
"Incorrect session state %d for command %#x",
|
|
inst->state, arg->type);
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
switch (arg->type) {
|
|
case EVA_KMD_GET_SESSION_INFO:
|
|
{
|
|
struct eva_kmd_session_info *session =
|
|
(struct eva_kmd_session_info *)&arg->data.session;
|
|
|
|
rc = msm_cvp_get_session_info(inst, &session->session_id);
|
|
break;
|
|
}
|
|
case EVA_KMD_UPDATE_POWER:
|
|
{
|
|
rc = msm_cvp_update_power(inst);
|
|
break;
|
|
}
|
|
case EVA_KMD_REGISTER_BUFFER:
|
|
{
|
|
struct eva_kmd_buffer *buf =
|
|
(struct eva_kmd_buffer *)&arg->data.regbuf;
|
|
|
|
rc = msm_cvp_register_buffer(inst, buf);
|
|
break;
|
|
}
|
|
case EVA_KMD_UNREGISTER_BUFFER:
|
|
{
|
|
struct eva_kmd_buffer *buf =
|
|
(struct eva_kmd_buffer *)&arg->data.unregbuf;
|
|
|
|
rc = msm_cvp_unregister_buffer(inst, buf);
|
|
break;
|
|
}
|
|
case EVA_KMD_RECEIVE_MSG_PKT:
|
|
{
|
|
struct eva_kmd_hfi_packet *out_pkt =
|
|
(struct eva_kmd_hfi_packet *)&arg->data.hfi_pkt;
|
|
rc = msm_cvp_session_receive_hfi(inst, out_pkt);
|
|
break;
|
|
}
|
|
case EVA_KMD_SEND_CMD_PKT:
|
|
{
|
|
struct eva_kmd_hfi_packet *in_pkt =
|
|
(struct eva_kmd_hfi_packet *)&arg->data.hfi_pkt;
|
|
|
|
rc = msm_cvp_session_process_hfi(inst, in_pkt,
|
|
arg->buf_offset, arg->buf_num);
|
|
break;
|
|
}
|
|
case EVA_KMD_SEND_FENCE_CMD_PKT:
|
|
{
|
|
rc = msm_cvp_session_process_hfi_fence(inst, arg);
|
|
break;
|
|
}
|
|
case EVA_KMD_SESSION_CONTROL:
|
|
rc = msm_cvp_session_ctrl(inst, arg);
|
|
break;
|
|
case EVA_KMD_GET_SYS_PROPERTY:
|
|
rc = msm_cvp_get_sysprop(inst, arg);
|
|
break;
|
|
case EVA_KMD_SET_SYS_PROPERTY:
|
|
rc = msm_cvp_set_sysprop(inst, arg);
|
|
break;
|
|
case EVA_KMD_FLUSH_ALL:
|
|
rc = cvp_flush_all(inst);
|
|
break;
|
|
case EVA_KMD_FLUSH_FRAME:
|
|
dprintk(CVP_WARN, "EVA_KMD_FLUSH_FRAME IOCTL deprecated\n");
|
|
rc = 0;
|
|
break;
|
|
default:
|
|
dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
|
|
__func__, arg->type);
|
|
rc = -ENOTSUPP;
|
|
break;
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
|
|
{
|
|
int rc = 0;
|
|
struct cvp_hal_session *session;
|
|
|
|
if (!inst || !inst->core) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
|
|
inst, hash32_ptr(inst->session));
|
|
|
|
session = (struct cvp_hal_session *)inst->session;
|
|
if (!session)
|
|
return rc;
|
|
|
|
rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
|
|
if (rc)
|
|
dprintk(CVP_ERR, "%s: close failed\n", __func__);
|
|
|
|
rc = msm_cvp_session_deinit_buffers(inst);
|
|
return rc;
|
|
}
|
|
|
|
int msm_cvp_session_init(struct msm_cvp_inst *inst)
|
|
{
|
|
int rc = 0;
|
|
|
|
if (!inst) {
|
|
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
|
|
inst, hash32_ptr(inst->session));
|
|
|
|
/* set default frequency */
|
|
inst->clk_data.core_id = 0;
|
|
inst->clk_data.min_freq = 1000;
|
|
inst->clk_data.ddr_bw = 1000;
|
|
inst->clk_data.sys_cache_bw = 1000;
|
|
|
|
inst->prop.type = 1;
|
|
inst->prop.kernel_mask = 0xFFFFFFFF;
|
|
inst->prop.priority = 0;
|
|
inst->prop.is_secure = 0;
|
|
inst->prop.dsp_mask = 0;
|
|
inst->prop.fthread_nr = 3;
|
|
|
|
return rc;
|
|
}
|