video-driver: Move HFI queue functions out of venus_hfi

Split HFI queue functions in separate file. This will
unload venus_hfi and also will make possible to reuse/shared
with other drivers.

Change-Id: I16155e649f216afc6f26db76b565aad28b114cc7
Signed-off-by: Stanimir Varbanov <quic_c_svarba@quicinc.com>
This commit is contained in:
Stanimir Varbanov
2022-08-12 09:20:00 -07:00
committed by Ashish Patil
parent e77d578972
commit 1020319ca4
8 changed files with 773 additions and 689 deletions

1
Kbuild
View File

@@ -71,6 +71,7 @@ msm_video-objs += driver/vidc/src/msm_vidc_v4l2.o \
driver/vidc/src/msm_vidc_memory.o \ driver/vidc/src/msm_vidc_memory.o \
driver/vidc/src/msm_vidc_fence.o \ driver/vidc/src/msm_vidc_fence.o \
driver/vidc/src/venus_hfi.o \ driver/vidc/src/venus_hfi.o \
driver/vidc/src/venus_hfi_queue.o \
driver/vidc/src/hfi_packet.o \ driver/vidc/src/hfi_packet.o \
driver/vidc/src/venus_hfi_response.o \ driver/vidc/src/venus_hfi_response.o \
driver/platform/common/src/msm_vidc_platform.o driver/platform/common/src/msm_vidc_platform.o

View File

@@ -9,6 +9,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include "msm_vidc_internal.h" #include "msm_vidc_internal.h"
#include "venus_hfi_queue.h"
struct msm_vidc_core; struct msm_vidc_core;

View File

@@ -679,74 +679,6 @@ enum signal_session_response {
MAX_SIGNAL, MAX_SIGNAL,
}; };
#define HFI_MASK_QHDR_TX_TYPE 0xFF000000
#define HFI_MASK_QHDR_RX_TYPE 0x00FF0000
#define HFI_MASK_QHDR_PRI_TYPE 0x0000FF00
#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000FF
#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q 0x00
#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q 0x01
#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q 0x02
#define HFI_MASK_QHDR_STATUS 0x000000FF
#define VIDC_IFACEQ_NUMQ 3
#define VIDC_IFACEQ_CMDQ_IDX 0
#define VIDC_IFACEQ_MSGQ_IDX 1
#define VIDC_IFACEQ_DBGQ_IDX 2
#define VIDC_IFACEQ_MAX_BUF_COUNT 50
#define VIDC_IFACE_MAX_PARALLEL_CLNTS 16
#define VIDC_IFACEQ_DFLT_QHDR 0x01010000
struct hfi_queue_table_header {
u32 qtbl_version;
u32 qtbl_size;
u32 qtbl_qhdr0_offset;
u32 qtbl_qhdr_size;
u32 qtbl_num_q;
u32 qtbl_num_active_q;
void *device_addr;
char name[256];
};
struct hfi_queue_header {
u32 qhdr_status;
u32 qhdr_start_addr;
u32 qhdr_type;
u32 qhdr_q_size;
u32 qhdr_pkt_size;
u32 qhdr_pkt_drop_cnt;
u32 qhdr_rx_wm;
u32 qhdr_tx_wm;
u32 qhdr_rx_req;
u32 qhdr_tx_req;
u32 qhdr_rx_irq_status;
u32 qhdr_tx_irq_status;
u32 qhdr_read_idx;
u32 qhdr_write_idx;
};
#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) \
+ sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \
VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
(void *)((ptr + sizeof(struct hfi_queue_table_header)) + \
(i * sizeof(struct hfi_queue_header)))
#define QDSS_SIZE 4096
#define SFR_SIZE 4096
#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ))
#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
ALIGNED_QDSS_SIZE, SZ_1M)
#define TOTAL_QSIZE (SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE)
struct profile_data { struct profile_data {
u64 start; u64 start;
u64 stop; u64 stop;

View File

@@ -72,8 +72,6 @@ int venus_hfi_set_ir_period(struct msm_vidc_inst *inst, u32 ir_type,
void venus_hfi_pm_work_handler(struct work_struct *work); void venus_hfi_pm_work_handler(struct work_struct *work);
irqreturn_t venus_hfi_isr(int irq, void *data); irqreturn_t venus_hfi_isr(int irq, void *data);
irqreturn_t venus_hfi_isr_handler(int irq, void *data); irqreturn_t venus_hfi_isr_handler(int irq, void *data);
int venus_hfi_interface_queues_init(struct msm_vidc_core *core);
void venus_hfi_interface_queues_deinit(struct msm_vidc_core *core);
int __write_register_masked(struct msm_vidc_core *core, int __write_register_masked(struct msm_vidc_core *core,
u32 reg, u32 value, u32 mask); u32 reg, u32 value, u32 mask);
@@ -82,10 +80,6 @@ int __write_register(struct msm_vidc_core *core,
int __read_register(struct msm_vidc_core *core, u32 reg, u32 *value); int __read_register(struct msm_vidc_core *core, u32 reg, u32 *value);
int __read_register_with_poll_timeout(struct msm_vidc_core *core, int __read_register_with_poll_timeout(struct msm_vidc_core *core,
u32 reg, u32 mask, u32 exp_val, u32 sleep_us, u32 timeout_us); u32 reg, u32 mask, u32 exp_val, u32 sleep_us, u32 timeout_us);
int __iface_cmdq_write(struct msm_vidc_core *core,
void *pkt);
int __iface_msgq_read(struct msm_vidc_core *core, void *pkt);
int __iface_dbgq_read(struct msm_vidc_core *core, void *pkt);
int __set_clocks(struct msm_vidc_core *core, u32 freq); int __set_clocks(struct msm_vidc_core *core, u32 freq);
int __scale_clocks(struct msm_vidc_core *core); int __scale_clocks(struct msm_vidc_core *core);
int __set_clk_rate(struct msm_vidc_core *core, int __set_clk_rate(struct msm_vidc_core *core,

View File

@@ -0,0 +1,92 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
*/
#ifndef _VENUS_HFI_QUEUE_H_
#define _VENUS_HFI_QUEUE_H_
#include <linux/types.h>
#include "msm_vidc_internal.h"
#define HFI_MASK_QHDR_TX_TYPE 0xff000000
#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
#define HFI_MASK_QHDR_Q_ID_TYPE 0x000000ff
#define HFI_Q_ID_HOST_TO_CTRL_CMD_Q 0
#define HFI_Q_ID_CTRL_TO_HOST_MSG_Q 1
#define HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q 2
#define HFI_MASK_QHDR_STATUS 0x000000ff
#define VIDC_IFACEQ_NUMQ 3
#define VIDC_IFACEQ_CMDQ_IDX 0
#define VIDC_IFACEQ_MSGQ_IDX 1
#define VIDC_IFACEQ_DBGQ_IDX 2
#define VIDC_IFACEQ_MAX_BUF_COUNT 50
#define VIDC_IFACE_MAX_PARALLEL_CLNTS 16
#define VIDC_IFACEQ_DFLT_QHDR 0x01010000
struct hfi_queue_table_header {
u32 qtbl_version;
u32 qtbl_size;
u32 qtbl_qhdr0_offset;
u32 qtbl_qhdr_size;
u32 qtbl_num_q;
u32 qtbl_num_active_q;
void *device_addr;
char name[256];
};
struct hfi_queue_header {
u32 qhdr_status;
u32 qhdr_start_addr;
u32 qhdr_type;
u32 qhdr_q_size;
u32 qhdr_pkt_size;
u32 qhdr_pkt_drop_cnt;
u32 qhdr_rx_wm;
u32 qhdr_tx_wm;
u32 qhdr_rx_req;
u32 qhdr_tx_req;
u32 qhdr_rx_irq_status;
u32 qhdr_tx_irq_status;
u32 qhdr_read_idx;
u32 qhdr_write_idx;
};
#define VIDC_IFACEQ_TABLE_SIZE (sizeof(struct hfi_queue_table_header) + \
sizeof(struct hfi_queue_header) * VIDC_IFACEQ_NUMQ)
#define VIDC_IFACEQ_QUEUE_SIZE (VIDC_IFACEQ_MAX_PKT_SIZE * \
VIDC_IFACEQ_MAX_BUF_COUNT * VIDC_IFACE_MAX_PARALLEL_CLNTS)
#define VIDC_IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
(void *)((ptr + sizeof(struct hfi_queue_table_header)) + \
(i * sizeof(struct hfi_queue_header)))
#define QDSS_SIZE 4096
#define SFR_SIZE 4096
#define QUEUE_SIZE (VIDC_IFACEQ_TABLE_SIZE + \
(VIDC_IFACEQ_QUEUE_SIZE * VIDC_IFACEQ_NUMQ))
#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
ALIGNED_QDSS_SIZE, SZ_1M)
#define TOTAL_QSIZE (SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE)
struct msm_vidc_core;
int venus_hfi_queue_cmd_write(struct msm_vidc_core *core, void *pkt);
int venus_hfi_queue_cmd_write_intr(struct msm_vidc_core *core, void *pkt,
bool allow_intr);
int venus_hfi_queue_msg_read(struct msm_vidc_core *core, void *pkt);
int venus_hfi_queue_dbg_read(struct msm_vidc_core *core, void *pkt);
void venus_hfi_queue_deinit(struct msm_vidc_core *core);
int venus_hfi_queue_init(struct msm_vidc_core *core);
int venus_hfi_reset_queue_header(struct msm_vidc_core *core);
#endif

View File

@@ -379,7 +379,7 @@ static int msm_vidc_component_bind(struct device *dev)
return rc; return rc;
} }
rc = venus_hfi_interface_queues_init(core); rc = venus_hfi_queue_init(core);
if (rc) { if (rc) {
d_vpr_e("%s: interface queues init failed\n", __func__); d_vpr_e("%s: interface queues init failed\n", __func__);
goto queues_deinit; goto queues_deinit;
@@ -396,7 +396,7 @@ static int msm_vidc_component_bind(struct device *dev)
return 0; return 0;
queues_deinit: queues_deinit:
venus_hfi_interface_queues_deinit(core); venus_hfi_queue_deinit(core);
/** /**
* queues and core can be inited again during session_open. * queues and core can be inited again during session_open.
* So don't declare as probe failure. * So don't declare as probe failure.
@@ -411,7 +411,7 @@ static void msm_vidc_component_unbind(struct device *dev)
d_vpr_h("%s(): %s\n", __func__, dev_name(dev)); d_vpr_h("%s(): %s\n", __func__, dev_name(dev));
msm_vidc_core_deinit(core, true); msm_vidc_core_deinit(core, true);
venus_hfi_interface_queues_deinit(core); venus_hfi_queue_deinit(core);
component_unbind_all(dev, core); component_unbind_all(dev, core);
d_vpr_h("%s(): succssful\n", __func__); d_vpr_h("%s(): succssful\n", __func__);

View File

@@ -26,6 +26,7 @@
#include "msm_vidc_debug.h" #include "msm_vidc_debug.h"
#include "hfi_packet.h" #include "hfi_packet.h"
#include "venus_hfi_response.h" #include "venus_hfi_response.h"
#include "venus_hfi_queue.h"
#include "msm_vidc_events.h" #include "msm_vidc_events.h"
#define MAX_FIRMWARE_NAME_SIZE 128 #define MAX_FIRMWARE_NAME_SIZE 128
@@ -135,28 +136,6 @@ void __dump(struct dump dump[], int len)
} }
} }
static void __dump_packet(u8 *packet, const char *function, void *qinfo)
{
u32 c = 0, session_id, packet_size = *(u32 *)packet;
const int row_size = 32;
/*
* row must contain enough for 0xdeadbaad * 8 to be converted into
* "de ad ba ab " * 8 + '\0'
*/
char row[3 * 32];
session_id = *((u32 *)packet + 1);
d_vpr_t("%08x: %s: %pK\n", session_id, function, qinfo);
for (c = 0; c * row_size < packet_size; ++c) {
int bytes_to_read = ((c + 1) * row_size > packet_size) ?
packet_size % row_size : row_size;
hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
row_size, 4, row, sizeof(row), false);
d_vpr_t("%08x: %s\n", session_id, row);
}
}
static void __fatal_error(bool fatal) static void __fatal_error(bool fatal)
{ {
WARN_ON(fatal); WARN_ON(fatal);
@@ -733,369 +712,6 @@ int __scale_clocks(struct msm_vidc_core *core)
return 0; return 0;
} }
static int __write_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
bool *rx_req_is_set)
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_write_idx;
u32 empty_space, read_idx, write_idx;
u32 *write_ptr;
if (!qinfo || !packet) {
d_vpr_e("%s: invalid params %pK %pK\n",
__func__, qinfo, packet);
return -EINVAL;
} else if (!qinfo->q_array.align_virtual_addr) {
d_vpr_e("Queues have already been freed\n");
return -EINVAL;
}
queue = (struct hfi_queue_header *) qinfo->q_hdr;
if (!queue) {
d_vpr_e("queue not present\n");
return -ENOENT;
}
if (msm_vidc_debug & VIDC_PKT)
__dump_packet(packet, __func__, qinfo);
// TODO: handle writing packet
//d_vpr_e("skip writing packet\n");
//return 0;
packet_size_in_words = (*(u32 *)packet) >> 2;
if (!packet_size_in_words || packet_size_in_words >
qinfo->q_array.mem_size>>2) {
d_vpr_e("Invalid packet size\n");
return -ENODATA;
}
read_idx = queue->qhdr_read_idx;
write_idx = queue->qhdr_write_idx;
empty_space = (write_idx >= read_idx) ?
((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
(read_idx - write_idx);
if (empty_space <= packet_size_in_words) {
queue->qhdr_tx_req = 1;
d_vpr_e("Insufficient size (%d) to write (%d)\n",
empty_space, packet_size_in_words);
return -ENOTEMPTY;
}
queue->qhdr_tx_req = 0;
new_write_idx = write_idx + packet_size_in_words;
write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
(write_idx << 2));
if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
qinfo->q_array.mem_size)) {
d_vpr_e("Invalid write index\n");
return -ENODATA;
}
if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(write_ptr, packet, packet_size_in_words << 2);
} else {
new_write_idx -= qinfo->q_array.mem_size >> 2;
memcpy(write_ptr, packet, (packet_size_in_words -
new_write_idx) << 2);
memcpy((void *)qinfo->q_array.align_virtual_addr,
packet + ((packet_size_in_words - new_write_idx) << 2),
new_write_idx << 2);
}
/*
* Memory barrier to make sure packet is written before updating the
* write index
*/
mb();
queue->qhdr_write_idx = new_write_idx;
if (rx_req_is_set)
*rx_req_is_set = true;
/*
* Memory barrier to make sure write index is updated before an
* interrupt is raised on venus.
*/
mb();
return 0;
}
static int __read_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
u32 *pb_tx_req_is_set)
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
u32 receive_request = 0;
u32 read_idx, write_idx;
int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) {
d_vpr_e("%s: invalid params %pK %pK %pK\n",
__func__, qinfo, packet, pb_tx_req_is_set);
return -EINVAL;
} else if (!qinfo->q_array.align_virtual_addr) {
d_vpr_e("Queues have already been freed\n");
return -EINVAL;
}
/*
* Memory barrier to make sure data is valid before
*reading it
*/
mb();
queue = (struct hfi_queue_header *) qinfo->q_hdr;
if (!queue) {
d_vpr_e("Queue memory is not allocated\n");
return -ENOMEM;
}
/*
* Do not set receive request for debug queue, if set,
* Venus generates interrupt for debug messages even
* when there is no response message available.
* In general debug queue will not become full as it
* is being emptied out for every interrupt from Venus.
* Venus will anyway generates interrupt if it is full.
*/
if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
receive_request = 1;
read_idx = queue->qhdr_read_idx;
write_idx = queue->qhdr_write_idx;
if (read_idx == write_idx) {
queue->qhdr_rx_req = receive_request;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
*/
mb();
*pb_tx_req_is_set = 0;
d_vpr_l(
"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
receive_request ? "message" : "debug",
queue->qhdr_rx_req, queue->qhdr_tx_req,
queue->qhdr_read_idx);
return -ENODATA;
}
read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
(read_idx << 2));
if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
qinfo->q_array.mem_size - sizeof(*read_ptr))) {
d_vpr_e("Invalid read index\n");
return -ENODATA;
}
packet_size_in_words = (*read_ptr) >> 2;
if (!packet_size_in_words) {
d_vpr_e("Zero packet size\n");
return -ENODATA;
}
new_read_idx = read_idx + packet_size_in_words;
if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
read_idx <= (qinfo->q_array.mem_size >> 2)) {
if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(packet, read_ptr,
packet_size_in_words << 2);
} else {
new_read_idx -= (qinfo->q_array.mem_size >> 2);
memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
memcpy(packet + ((packet_size_in_words -
new_read_idx) << 2),
(u8 *)qinfo->q_array.align_virtual_addr,
new_read_idx << 2);
}
} else {
d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n",
read_idx, packet_size_in_words << 2);
d_vpr_e("Dropping this packet\n");
new_read_idx = write_idx;
rc = -ENODATA;
}
queue->qhdr_rx_req = receive_request;
queue->qhdr_read_idx = new_read_idx;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
*/
mb();
*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
if ((msm_vidc_debug & VIDC_PKT) &&
!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
__dump_packet(packet, __func__, qinfo);
}
return rc;
}
/* Writes into cmdq without raising an interrupt */
static int __iface_cmdq_write_relaxed(struct msm_vidc_core *core,
void *pkt, bool *requires_interrupt)
{
struct msm_vidc_iface_q_info *q_info;
//struct vidc_hal_cmd_pkt_hdr *cmd_packet;
int rc = -E2BIG;
if (!core || !pkt) {
d_vpr_e("%s: invalid params %pK %pK\n",
__func__, core, pkt);
return -EINVAL;
}
rc = __strict_check(core, __func__);
if (rc)
return rc;
if (!__core_in_valid_state(core)) {
d_vpr_e("%s: fw not in init state\n", __func__);
rc = -EINVAL;
goto err_q_null;
}
//cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
//core->last_packet_type = cmd_packet->packet_type;
q_info = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
if (!q_info) {
d_vpr_e("cannot write to shared Q's\n");
goto err_q_null;
}
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot write to shared CMD Q's\n");
rc = -ENODATA;
goto err_q_null;
}
rc = __resume(core);
if (rc) {
d_vpr_e("%s: Power on failed\n", __func__);
goto err_q_write;
}
if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
__schedule_power_collapse_work(core);
rc = 0;
} else {
d_vpr_e("__iface_cmdq_write: queue full\n");
}
err_q_write:
err_q_null:
return rc;
}
int __iface_cmdq_write(struct msm_vidc_core *core,
void *pkt)
{
bool needs_interrupt = false;
int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
if (!rc && needs_interrupt)
call_venus_op(core, raise_interrupt, core);
return rc;
}
static int __iface_cmdq_write_intr(struct msm_vidc_core *core,
void *pkt, bool allow)
{
bool needs_interrupt = false;
int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
if (!rc && allow && needs_interrupt)
call_venus_op(core, raise_interrupt, core);
return rc;
}
int __iface_msgq_read(struct msm_vidc_core *core, void *pkt)
{
u32 tx_req_is_set = 0;
int rc = 0;
struct msm_vidc_iface_q_info *q_info;
if (!pkt) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (!__core_in_valid_state(core)) {
d_vpr_e("%s: fw not in init state\n", __func__);
rc = -EINVAL;
goto read_error_null;
}
q_info = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot read from shared MSG Q's\n");
rc = -ENODATA;
goto read_error_null;
}
if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
if (tx_req_is_set) {
//call_venus_op(core, raise_interrupt, core);
d_vpr_e("%s: queue is full\n", __func__);
rc = -EINVAL;
goto read_error_null;
}
rc = 0;
} else {
rc = -ENODATA;
}
read_error_null:
return rc;
}
int __iface_dbgq_read(struct msm_vidc_core *core, void *pkt)
{
u32 tx_req_is_set = 0;
int rc = 0;
struct msm_vidc_iface_q_info *q_info;
if (!pkt) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
q_info = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot read from shared DBG Q's\n");
rc = -ENODATA;
goto dbg_error_null;
}
if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
if (tx_req_is_set) {
d_vpr_e("%s: queue is full\n", __func__);
//call_venus_op(core, raise_interrupt, core);
rc = -EINVAL;
goto dbg_error_null;
}
rc = 0;
} else {
rc = -ENODATA;
}
dbg_error_null:
return rc;
}
static void __flush_debug_queue(struct msm_vidc_core *core, static void __flush_debug_queue(struct msm_vidc_core *core,
u8 *packet, u32 packet_size) u8 *packet, u32 packet_size)
{ {
@@ -1124,7 +740,7 @@ static void __flush_debug_queue(struct msm_vidc_core *core,
log_level |= FW_PRINTK; log_level |= FW_PRINTK;
} }
while (!__iface_dbgq_read(core, packet)) { while (!venus_hfi_queue_dbg_read(core, packet)) {
pkt = (struct hfi_debug_header *) packet; pkt = (struct hfi_debug_header *) packet;
if (pkt->size < sizeof(struct hfi_debug_header)) { if (pkt->size < sizeof(struct hfi_debug_header)) {
@@ -1154,6 +770,36 @@ static void __flush_debug_queue(struct msm_vidc_core *core,
msm_vidc_vmem_free((void **)&packet); msm_vidc_vmem_free((void **)&packet);
} }
static int __cmdq_write(struct msm_vidc_core *core, void *pkt)
{
int rc;
rc = __resume(core);
if (rc)
return rc;
rc = venus_hfi_queue_cmd_write(core, pkt);
if (!rc)
__schedule_power_collapse_work(core);
return rc;
}
static int __cmdq_write_intr(struct msm_vidc_core *core, void *pkt, bool allow_intr)
{
int rc;
rc = __resume(core);
if (rc)
return rc;
rc = venus_hfi_queue_cmd_write_intr(core, pkt, allow_intr);
if (!rc)
__schedule_power_collapse_work(core);
return rc;
}
static int __sys_set_debug(struct msm_vidc_core *core, u32 debug) static int __sys_set_debug(struct msm_vidc_core *core, u32 debug)
{ {
int rc = 0; int rc = 0;
@@ -1163,7 +809,7 @@ static int __sys_set_debug(struct msm_vidc_core *core, u32 debug)
if (rc) if (rc)
goto exit; goto exit;
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
goto exit; goto exit;
@@ -1185,7 +831,7 @@ static int __sys_set_coverage(struct msm_vidc_core *core,
return -ENOTEMPTY; return -ENOTEMPTY;
} }
//if (__iface_cmdq_write(core, pkt, sid)) { //if (__cmdq_write(core, pkt, sid)) {
// d_vpr_e("Failed to send coverage pkt to f/w\n"); // d_vpr_e("Failed to send coverage pkt to f/w\n");
// return -ENOTEMPTY; // return -ENOTEMPTY;
//} //}
@@ -1208,7 +854,7 @@ static int __sys_set_power_control(struct msm_vidc_core *core, bool enable)
if (rc) if (rc)
return rc; return rc;
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
return rc; return rc;
@@ -1228,7 +874,7 @@ int __prepare_pc(struct msm_vidc_core *core)
goto err_pc_prep; goto err_pc_prep;
} }
if (__iface_cmdq_write(core, core->packet)) if (__cmdq_write(core, core->packet))
rc = -ENOTEMPTY; rc = -ENOTEMPTY;
if (rc) if (rc)
d_vpr_e("Failed to prepare venus for power off"); d_vpr_e("Failed to prepare venus for power off");
@@ -1336,7 +982,7 @@ static int __core_set_resource(struct msm_vidc_core *core,
goto err_create_pkt; goto err_create_pkt;
} }
//rc = __iface_cmdq_write(core, core->packet); //rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
rc = -ENOTEMPTY; rc = -ENOTEMPTY;
@@ -1361,7 +1007,7 @@ static int __core_release_resource(struct msm_vidc_core *core,
goto err_create_pkt; goto err_create_pkt;
} }
//rc = __iface_cmdq_write(core, core->packet); //rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
rc = -ENOTEMPTY; rc = -ENOTEMPTY;
@@ -1874,7 +1520,7 @@ static int __release_subcaches(struct msm_vidc_core *core)
} }
/* Set resource to Venus for activated subcaches */ /* Set resource to Venus for activated subcaches */
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
return rc; return rc;
@@ -1995,7 +1641,7 @@ static int __set_subcaches(struct msm_vidc_core *core)
} }
/* Set resource to Venus for activated subcaches */ /* Set resource to Venus for activated subcaches */
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
goto err_fail_set_subacaches; goto err_fail_set_subacaches;
@@ -2031,7 +1677,7 @@ static int __set_ubwc_config(struct msm_vidc_core *core)
if (rc) if (rc)
return rc; return rc;
//rc = __iface_cmdq_write(core, core->packet)); //rc = __cmdq_write(core, core->packet));
if (rc) if (rc)
return rc; return rc;
@@ -2191,201 +1837,6 @@ err_venus_power_on:
return rc; return rc;
} }
static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
{
q_hdr->qhdr_status = 0x1;
q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
q_hdr->qhdr_pkt_size = 0;
q_hdr->qhdr_rx_wm = 0x1;
q_hdr->qhdr_tx_wm = 0x1;
q_hdr->qhdr_rx_req = 0x1;
q_hdr->qhdr_tx_req = 0x0;
q_hdr->qhdr_rx_irq_status = 0x0;
q_hdr->qhdr_tx_irq_status = 0x0;
q_hdr->qhdr_read_idx = 0x0;
q_hdr->qhdr_write_idx = 0x0;
}
void venus_hfi_interface_queues_deinit(struct msm_vidc_core *core)
{
int i;
d_vpr_h("%s()\n", __func__);
if (!core->iface_q_table.align_virtual_addr) {
d_vpr_h("%s: queues already deallocated\n", __func__);
return;
}
msm_vidc_memory_unmap(core, &core->iface_q_table.map);
msm_vidc_memory_free(core, &core->iface_q_table.alloc);
msm_vidc_memory_unmap(core, &core->sfr.map);
msm_vidc_memory_free(core, &core->sfr.alloc);
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
core->iface_queues[i].q_hdr = NULL;
core->iface_queues[i].q_array.align_virtual_addr = NULL;
core->iface_queues[i].q_array.align_device_addr = 0;
}
core->iface_q_table.align_virtual_addr = NULL;
core->iface_q_table.align_device_addr = 0;
core->sfr.align_virtual_addr = NULL;
core->sfr.align_device_addr = 0;
}
static int venus_hfi_reset_queue_header(struct msm_vidc_core *core)
{
struct msm_vidc_iface_q_info *iface_q;
struct hfi_queue_header *q_hdr;
int i, rc = 0;
if (!core) {
d_vpr_e("%s: invalid param\n", __func__);
return -EINVAL;
}
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
iface_q = &core->iface_queues[i];
__set_queue_hdr_defaults(iface_q->q_hdr);
}
iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
q_hdr->qhdr_rx_req = 0;
return rc;
}
int venus_hfi_interface_queues_init(struct msm_vidc_core *core)
{
int rc = 0;
struct hfi_queue_table_header *q_tbl_hdr;
struct msm_vidc_iface_q_info *iface_q;
struct msm_vidc_alloc alloc;
struct msm_vidc_map map;
int offset = 0;
u32 i;
d_vpr_h("%s()\n", __func__);
if (core->iface_q_table.align_virtual_addr) {
d_vpr_h("%s: queues already allocated\n", __func__);
venus_hfi_reset_queue_header(core);
return 0;
}
memset(&alloc, 0, sizeof(alloc));
alloc.type = MSM_VIDC_BUF_QUEUE;
alloc.region = MSM_VIDC_NON_SECURE;
alloc.size = TOTAL_QSIZE;
alloc.secure = false;
alloc.map_kernel = true;
rc = msm_vidc_memory_alloc(core, &alloc);
if (rc) {
d_vpr_e("%s: alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->iface_q_table.align_virtual_addr = alloc.kvaddr;
core->iface_q_table.alloc = alloc;
memset(&map, 0, sizeof(map));
map.type = alloc.type;
map.region = alloc.region;
map.dmabuf = alloc.dmabuf;
rc = msm_vidc_memory_map(core, &map);
if (rc) {
d_vpr_e("%s: alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->iface_q_table.align_device_addr = map.device_addr;
core->iface_q_table.map = map;
core->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
offset += core->iface_q_table.mem_size;
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
iface_q = &core->iface_queues[i];
iface_q->q_array.align_device_addr = map.device_addr + offset;
iface_q->q_array.align_virtual_addr = (void*)((char*)alloc.kvaddr + offset);
iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
offset += iface_q->q_array.mem_size;
iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
core->iface_q_table.align_virtual_addr, i);
}
q_tbl_hdr = (struct hfi_queue_table_header *)
core->iface_q_table.align_virtual_addr;
q_tbl_hdr->qtbl_version = 0;
q_tbl_hdr->device_addr = (void *)core;
strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
/* reset hfi queue header fields */
rc = venus_hfi_reset_queue_header(core);
if (rc) {
d_vpr_e("%s: init queue header failed\n", __func__);
goto fail_alloc_queue;
}
/* sfr buffer */
memset(&alloc, 0, sizeof(alloc));
alloc.type = MSM_VIDC_BUF_QUEUE;
alloc.region = MSM_VIDC_NON_SECURE;
alloc.size = ALIGNED_SFR_SIZE;
alloc.secure = false;
alloc.map_kernel = true;
rc = msm_vidc_memory_alloc(core, &alloc);
if (rc) {
d_vpr_e("%s: sfr alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->sfr.align_virtual_addr = alloc.kvaddr;
core->sfr.alloc = alloc;
memset(&map, 0, sizeof(map));
map.type = alloc.type;
map.region = alloc.region;
map.dmabuf = alloc.dmabuf;
rc = msm_vidc_memory_map(core, &map);
if (rc) {
d_vpr_e("%s: sfr map failed\n", __func__);
goto fail_alloc_queue;
}
core->sfr.align_device_addr = map.device_addr;
core->sfr.map = map;
core->sfr.mem_size = ALIGNED_SFR_SIZE;
/* write sfr buffer size in first word */
*((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size;
return 0;
fail_alloc_queue:
return -ENOMEM;
}
static int __load_fw_to_memory(struct platform_device *pdev, static int __load_fw_to_memory(struct platform_device *pdev,
const char *fw_name) const char *fw_name)
{ {
@@ -2594,7 +2045,7 @@ static int __response_handler(struct msm_vidc_core *core)
} }
memset(core->response_packet, 0, core->packet_size); memset(core->response_packet, 0, core->packet_size);
while (!__iface_msgq_read(core, core->response_packet)) { while (!venus_hfi_queue_msg_read(core, core->response_packet)) {
rc = handle_response(core, core->response_packet); rc = handle_response(core, core->response_packet);
if (rc) if (rc)
continue; continue;
@@ -2712,7 +2163,7 @@ static int __sys_init(struct msm_vidc_core *core)
if (rc) if (rc)
return rc; return rc;
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
return rc; return rc;
@@ -2727,7 +2178,7 @@ static int __sys_image_version(struct msm_vidc_core *core)
if (rc) if (rc)
return rc; return rc;
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
return rc; return rc;
@@ -2748,7 +2199,7 @@ int venus_hfi_core_init(struct msm_vidc_core *core)
if (rc) if (rc)
return rc; return rc;
rc = venus_hfi_interface_queues_init(core); rc = venus_hfi_queue_init(core);
if (rc) if (rc)
goto error; goto error;
@@ -2909,7 +2360,7 @@ int venus_hfi_trigger_ssr(struct msm_vidc_core *core, u32 type,
if (rc) if (rc)
goto exit; goto exit;
rc = __iface_cmdq_write(core, core->packet); rc = __cmdq_write(core, core->packet);
if (rc) if (rc)
goto exit; goto exit;
@@ -2958,7 +2409,7 @@ int venus_hfi_trigger_stability(struct msm_vidc_inst *inst, u32 type,
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(core, inst->packet); rc = __cmdq_write(core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3005,7 +2456,7 @@ int venus_hfi_reserve_hardware(struct msm_vidc_inst *inst, u32 duration)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(core, inst->packet); rc = __cmdq_write(core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3046,7 +2497,7 @@ int venus_hfi_session_open(struct msm_vidc_inst *inst)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3090,7 +2541,7 @@ int venus_hfi_session_set_codec(struct msm_vidc_inst *inst)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3134,7 +2585,7 @@ int venus_hfi_session_set_secure_mode(struct msm_vidc_inst *inst)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3220,7 +2671,7 @@ int venus_hfi_session_property(struct msm_vidc_inst *inst,
goto unlock; goto unlock;
} }
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3259,7 +2710,7 @@ int venus_hfi_session_close(struct msm_vidc_inst *inst)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3302,7 +2753,7 @@ int venus_hfi_start(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3346,7 +2797,7 @@ int venus_hfi_stop(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3389,7 +2840,7 @@ int venus_hfi_session_pause(struct msm_vidc_inst *inst, enum msm_vidc_port_type
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3433,7 +2884,7 @@ int venus_hfi_session_resume(struct msm_vidc_inst *inst,
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3477,7 +2928,7 @@ int venus_hfi_session_drain(struct msm_vidc_inst *inst, enum msm_vidc_port_type
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3523,7 +2974,7 @@ int venus_hfi_session_command(struct msm_vidc_inst *inst,
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3637,7 +3088,8 @@ int venus_hfi_queue_super_buffer(struct msm_vidc_inst *inst,
} }
/* Raise interrupt only for last pkt in the batch */ /* Raise interrupt only for last pkt in the batch */
rc = __iface_cmdq_write_intr(inst->core, inst->packet, (cnt == batch_size - 1)); rc = __cmdq_write_intr(inst->core, inst->packet,
(cnt == batch_size - 1));
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3774,7 +3226,7 @@ int venus_hfi_queue_buffer(struct msm_vidc_inst *inst,
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3827,7 +3279,7 @@ int venus_hfi_release_buffer(struct msm_vidc_inst *inst,
if (rc) if (rc)
goto unlock; goto unlock;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) if (rc)
goto unlock; goto unlock;
@@ -3941,7 +3393,7 @@ int venus_hfi_set_ir_period(struct msm_vidc_inst *inst, u32 ir_type,
if (rc) if (rc)
goto exit; goto exit;
rc = __iface_cmdq_write(inst->core, inst->packet); rc = __cmdq_write(inst->core, inst->packet);
if (rc) { if (rc) {
i_vpr_e(inst, "%s: failed to set cap[%d] %s to fw\n", i_vpr_e(inst, "%s: failed to set cap[%d] %s to fw\n",
__func__, cap_id, cap_name(cap_id)); __func__, cap_id, cap_name(cap_id));

View File

@@ -0,0 +1,612 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
/* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. */
#include "venus_hfi_queue.h"
#include "msm_vidc_debug.h"
#include "msm_vidc_core.h"
#include "msm_vidc_memory.h"
static int __strict_check(struct msm_vidc_core *core, const char *function)
{
bool fatal = !mutex_is_locked(&core->lock);
WARN_ON(fatal);
if (fatal)
d_vpr_e("%s: strict check failed\n", function);
return fatal ? -EINVAL : 0;
}
static bool __core_in_valid_state(struct msm_vidc_core *core)
{
return core->state != MSM_VIDC_CORE_DEINIT;
}
static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
{
q_hdr->qhdr_status = 0x1;
q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
q_hdr->qhdr_pkt_size = 0;
q_hdr->qhdr_rx_wm = 0x1;
q_hdr->qhdr_tx_wm = 0x1;
q_hdr->qhdr_rx_req = 0x1;
q_hdr->qhdr_tx_req = 0x0;
q_hdr->qhdr_rx_irq_status = 0x0;
q_hdr->qhdr_tx_irq_status = 0x0;
q_hdr->qhdr_read_idx = 0x0;
q_hdr->qhdr_write_idx = 0x0;
}
static void __dump_packet(u8 *packet, const char *function, void *qinfo)
{
u32 c = 0, session_id, packet_size = *(u32 *)packet;
const int row_size = 32;
/*
* row must contain enough for 0xdeadbaad * 8 to be converted into
* "de ad ba ab " * 8 + '\0'
*/
char row[3 * 32];
session_id = *((u32 *)packet + 1);
d_vpr_t("%08x: %s: %pK\n", session_id, function, qinfo);
for (c = 0; c * row_size < packet_size; ++c) {
int bytes_to_read = ((c + 1) * row_size > packet_size) ?
packet_size % row_size : row_size;
hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
row_size, 4, row, sizeof(row), false);
d_vpr_t("%08x: %s\n", session_id, row);
}
}
static int __write_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
bool *rx_req_is_set)
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_write_idx;
u32 empty_space, read_idx, write_idx;
u32 *write_ptr;
if (!qinfo || !packet) {
d_vpr_e("%s: invalid params %pK %pK\n",
__func__, qinfo, packet);
return -EINVAL;
} else if (!qinfo->q_array.align_virtual_addr) {
d_vpr_e("Queues have already been freed\n");
return -EINVAL;
}
queue = (struct hfi_queue_header *) qinfo->q_hdr;
if (!queue) {
d_vpr_e("queue not present\n");
return -ENOENT;
}
if (msm_vidc_debug & VIDC_PKT)
__dump_packet(packet, __func__, qinfo);
// TODO: handle writing packet
//d_vpr_e("skip writing packet\n");
//return 0;
packet_size_in_words = (*(u32 *)packet) >> 2;
if (!packet_size_in_words || packet_size_in_words >
qinfo->q_array.mem_size>>2) {
d_vpr_e("Invalid packet size\n");
return -ENODATA;
}
read_idx = queue->qhdr_read_idx;
write_idx = queue->qhdr_write_idx;
empty_space = (write_idx >= read_idx) ?
((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
(read_idx - write_idx);
if (empty_space <= packet_size_in_words) {
queue->qhdr_tx_req = 1;
d_vpr_e("Insufficient size (%d) to write (%d)\n",
empty_space, packet_size_in_words);
return -ENOTEMPTY;
}
queue->qhdr_tx_req = 0;
new_write_idx = write_idx + packet_size_in_words;
write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
(write_idx << 2));
if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
qinfo->q_array.mem_size)) {
d_vpr_e("Invalid write index\n");
return -ENODATA;
}
if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(write_ptr, packet, packet_size_in_words << 2);
} else {
new_write_idx -= qinfo->q_array.mem_size >> 2;
memcpy(write_ptr, packet, (packet_size_in_words -
new_write_idx) << 2);
memcpy((void *)qinfo->q_array.align_virtual_addr,
packet + ((packet_size_in_words - new_write_idx) << 2),
new_write_idx << 2);
}
/*
* Memory barrier to make sure packet is written before updating the
* write index
*/
mb();
queue->qhdr_write_idx = new_write_idx;
if (rx_req_is_set)
*rx_req_is_set = true;
/*
* Memory barrier to make sure write index is updated before an
* interrupt is raised on venus.
*/
mb();
return 0;
}
static int __read_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
u32 *pb_tx_req_is_set)
{
struct hfi_queue_header *queue;
u32 packet_size_in_words, new_read_idx;
u32 *read_ptr;
u32 receive_request = 0;
u32 read_idx, write_idx;
int rc = 0;
if (!qinfo || !packet || !pb_tx_req_is_set) {
d_vpr_e("%s: invalid params %pK %pK %pK\n",
__func__, qinfo, packet, pb_tx_req_is_set);
return -EINVAL;
} else if (!qinfo->q_array.align_virtual_addr) {
d_vpr_e("Queues have already been freed\n");
return -EINVAL;
}
/*
* Memory barrier to make sure data is valid before
*reading it
*/
mb();
queue = (struct hfi_queue_header *) qinfo->q_hdr;
if (!queue) {
d_vpr_e("Queue memory is not allocated\n");
return -ENOMEM;
}
/*
* Do not set receive request for debug queue, if set,
* Venus generates interrupt for debug messages even
* when there is no response message available.
* In general debug queue will not become full as it
* is being emptied out for every interrupt from Venus.
* Venus will anyway generates interrupt if it is full.
*/
if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
receive_request = 1;
read_idx = queue->qhdr_read_idx;
write_idx = queue->qhdr_write_idx;
if (read_idx == write_idx) {
queue->qhdr_rx_req = receive_request;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
*/
mb();
*pb_tx_req_is_set = 0;
d_vpr_l(
"%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
receive_request ? "message" : "debug",
queue->qhdr_rx_req, queue->qhdr_tx_req,
queue->qhdr_read_idx);
return -ENODATA;
}
read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
(read_idx << 2));
if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
qinfo->q_array.mem_size - sizeof(*read_ptr))) {
d_vpr_e("Invalid read index\n");
return -ENODATA;
}
packet_size_in_words = (*read_ptr) >> 2;
if (!packet_size_in_words) {
d_vpr_e("Zero packet size\n");
return -ENODATA;
}
new_read_idx = read_idx + packet_size_in_words;
if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
read_idx <= (qinfo->q_array.mem_size >> 2)) {
if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
memcpy(packet, read_ptr,
packet_size_in_words << 2);
} else {
new_read_idx -= (qinfo->q_array.mem_size >> 2);
memcpy(packet, read_ptr,
(packet_size_in_words - new_read_idx) << 2);
memcpy(packet + ((packet_size_in_words -
new_read_idx) << 2),
(u8 *)qinfo->q_array.align_virtual_addr,
new_read_idx << 2);
}
} else {
d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n",
read_idx, packet_size_in_words << 2);
d_vpr_e("Dropping this packet\n");
new_read_idx = write_idx;
rc = -ENODATA;
}
queue->qhdr_rx_req = receive_request;
queue->qhdr_read_idx = new_read_idx;
/*
* mb() to ensure qhdr is updated in main memory
* so that venus reads the updated header values
*/
mb();
*pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
if ((msm_vidc_debug & VIDC_PKT) &&
!(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
__dump_packet(packet, __func__, qinfo);
}
return rc;
}
/* Writes into cmdq without raising an interrupt */
static int __iface_cmdq_write_relaxed(struct msm_vidc_core *core,
void *pkt, bool *requires_interrupt)
{
struct msm_vidc_iface_q_info *q_info;
//struct vidc_hal_cmd_pkt_hdr *cmd_packet;
int rc = -E2BIG;
if (!core || !pkt) {
d_vpr_e("%s: invalid params %pK %pK\n",
__func__, core, pkt);
return -EINVAL;
}
rc = __strict_check(core, __func__);
if (rc)
return rc;
if (!__core_in_valid_state(core)) {
d_vpr_e("%s: fw not in init state\n", __func__);
rc = -EINVAL;
goto err_q_null;
}
//cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
//core->last_packet_type = cmd_packet->packet_type;
q_info = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
if (!q_info) {
d_vpr_e("cannot write to shared Q's\n");
goto err_q_null;
}
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot write to shared CMD Q's\n");
rc = -ENODATA;
goto err_q_null;
}
if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
rc = 0;
} else {
d_vpr_e("queue full\n");
}
err_q_null:
return rc;
}
int venus_hfi_queue_cmd_write(struct msm_vidc_core *core, void *pkt)
{
bool needs_interrupt = false;
int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
if (!rc && needs_interrupt)
call_venus_op(core, raise_interrupt, core);
return rc;
}
int venus_hfi_queue_cmd_write_intr(struct msm_vidc_core *core, void *pkt,
bool allow_intr)
{
bool needs_interrupt = false;
int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
if (!rc && allow_intr && needs_interrupt)
call_venus_op(core, raise_interrupt, core);
return rc;
}
int venus_hfi_queue_msg_read(struct msm_vidc_core *core, void *pkt)
{
u32 tx_req_is_set = 0;
int rc = 0;
struct msm_vidc_iface_q_info *q_info;
if (!pkt) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
if (!__core_in_valid_state(core)) {
d_vpr_e("%s: fw not in init state\n", __func__);
rc = -EINVAL;
goto read_error_null;
}
q_info = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot read from shared MSG Q's\n");
rc = -ENODATA;
goto read_error_null;
}
if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
if (tx_req_is_set) {
//call_venus_op(core, raise_interrupt, core);
d_vpr_e("%s: queue is full\n", __func__);
rc = -EINVAL;
goto read_error_null;
}
rc = 0;
} else {
rc = -ENODATA;
}
read_error_null:
return rc;
}
int venus_hfi_queue_dbg_read(struct msm_vidc_core *core, void *pkt)
{
u32 tx_req_is_set = 0;
int rc = 0;
struct msm_vidc_iface_q_info *q_info;
if (!pkt) {
d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL;
}
q_info = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
if (!q_info->q_array.align_virtual_addr) {
d_vpr_e("cannot read from shared DBG Q's\n");
rc = -ENODATA;
goto dbg_error_null;
}
if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
if (tx_req_is_set) {
d_vpr_e("%s: queue is full\n", __func__);
//call_venus_op(core, raise_interrupt, core);
rc = -EINVAL;
goto dbg_error_null;
}
rc = 0;
} else {
rc = -ENODATA;
}
dbg_error_null:
return rc;
}
void venus_hfi_queue_deinit(struct msm_vidc_core *core)
{
int i;
d_vpr_h("%s()\n", __func__);
if (!core->iface_q_table.align_virtual_addr) {
d_vpr_h("%s: queues already deallocated\n", __func__);
return;
}
msm_vidc_memory_unmap(core, &core->iface_q_table.map);
msm_vidc_memory_free(core, &core->iface_q_table.alloc);
msm_vidc_memory_unmap(core, &core->sfr.map);
msm_vidc_memory_free(core, &core->sfr.alloc);
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
core->iface_queues[i].q_hdr = NULL;
core->iface_queues[i].q_array.align_virtual_addr = NULL;
core->iface_queues[i].q_array.align_device_addr = 0;
}
core->iface_q_table.align_virtual_addr = NULL;
core->iface_q_table.align_device_addr = 0;
core->sfr.align_virtual_addr = NULL;
core->sfr.align_device_addr = 0;
}
int venus_hfi_reset_queue_header(struct msm_vidc_core *core)
{
struct msm_vidc_iface_q_info *iface_q;
struct hfi_queue_header *q_hdr;
int i, rc = 0;
if (!core) {
d_vpr_e("%s: invalid param\n", __func__);
return -EINVAL;
}
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
iface_q = &core->iface_queues[i];
__set_queue_hdr_defaults(iface_q->q_hdr);
}
iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
q_hdr->qhdr_rx_req = 0;
return rc;
}
int venus_hfi_queue_init(struct msm_vidc_core *core)
{
int rc = 0;
struct hfi_queue_table_header *q_tbl_hdr;
struct hfi_queue_header *q_hdr;
struct msm_vidc_iface_q_info *iface_q;
struct msm_vidc_alloc alloc;
struct msm_vidc_map map;
int offset = 0;
u32 i;
d_vpr_h("%s()\n", __func__);
if (core->iface_q_table.align_virtual_addr) {
d_vpr_h("%s: queues already allocated\n", __func__);
venus_hfi_reset_queue_header(core);
return 0;
}
memset(&alloc, 0, sizeof(alloc));
alloc.type = MSM_VIDC_BUF_QUEUE;
alloc.region = MSM_VIDC_NON_SECURE;
alloc.size = TOTAL_QSIZE;
alloc.secure = false;
alloc.map_kernel = true;
rc = msm_vidc_memory_alloc(core, &alloc);
if (rc) {
d_vpr_e("%s: alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->iface_q_table.align_virtual_addr = alloc.kvaddr;
core->iface_q_table.alloc = alloc;
memset(&map, 0, sizeof(map));
map.type = alloc.type;
map.region = alloc.region;
map.dmabuf = alloc.dmabuf;
rc = msm_vidc_memory_map(core, &map);
if (rc) {
d_vpr_e("%s: alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->iface_q_table.align_device_addr = map.device_addr;
core->iface_q_table.map = map;
core->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
offset += core->iface_q_table.mem_size;
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
iface_q = &core->iface_queues[i];
iface_q->q_array.align_device_addr = map.device_addr + offset;
iface_q->q_array.align_virtual_addr = (void*)((char*)alloc.kvaddr + offset);
iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
offset += iface_q->q_array.mem_size;
iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
core->iface_q_table.align_virtual_addr, i);
__set_queue_hdr_defaults(iface_q->q_hdr);
}
q_tbl_hdr = (struct hfi_queue_table_header *)
core->iface_q_table.align_virtual_addr;
q_tbl_hdr->qtbl_version = 0;
q_tbl_hdr->device_addr = (void *)core;
strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
q_hdr = iface_q->q_hdr;
q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
q_hdr->qhdr_rx_req = 0;
/* sfr buffer */
memset(&alloc, 0, sizeof(alloc));
alloc.type = MSM_VIDC_BUF_QUEUE;
alloc.region = MSM_VIDC_NON_SECURE;
alloc.size = ALIGNED_SFR_SIZE;
alloc.secure = false;
alloc.map_kernel = true;
rc = msm_vidc_memory_alloc(core, &alloc);
if (rc) {
d_vpr_e("%s: sfr alloc failed\n", __func__);
goto fail_alloc_queue;
}
core->sfr.align_virtual_addr = alloc.kvaddr;
core->sfr.alloc = alloc;
memset(&map, 0, sizeof(map));
map.type = alloc.type;
map.region = alloc.region;
map.dmabuf = alloc.dmabuf;
rc = msm_vidc_memory_map(core, &map);
if (rc) {
d_vpr_e("%s: sfr map failed\n", __func__);
goto fail_alloc_queue;
}
core->sfr.align_device_addr = map.device_addr;
core->sfr.map = map;
core->sfr.mem_size = ALIGNED_SFR_SIZE;
/* write sfr buffer size in first word */
*((u32 *)core->sfr.align_virtual_addr) = core->sfr.mem_size;
return 0;
fail_alloc_queue:
return -ENOMEM;
}