mm-drivers: hw_fence: add device-tree configurable queue padding

Add device-tree configurable padding in bytes before and after queue
header(s). This enables support for 32-byte aligned queue write_idx,
which is a requirement to satisfy hardware constraints by some clients.

Change-Id: Icfd6bb385c825a8629974c72522efdc3cbfe3303
Signed-off-by: Grace An <quic_gracan@quicinc.com>
This commit is contained in:
Grace An
2022-12-13 11:49:57 -08:00
orang tua 5827c82e14
melakukan 2348b03273
5 mengubah file dengan 178 tambahan dan 97 penghapusan

Melihat File

@@ -172,6 +172,7 @@ enum payload_type {
* number of sub-clients (e.g. ife clients)
* @mem_descriptor: hfi header memory descriptor
* @queues: queues descriptor
* @queues_num: number of client queues
* @ipc_signal_id: id of the signal to be triggered for this client
* @ipc_client_vid: virtual id of the ipc client for this hw fence driver client
* @ipc_client_pid: physical id of the ipc client for this hw fence driver client
@@ -187,6 +188,7 @@ struct msm_hw_fence_client {
enum hw_fence_client_id client_id_ext;
struct msm_hw_fence_mem_addr mem_descriptor;
struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
int queues_num;
int ipc_signal_id;
int ipc_client_vid;
int ipc_client_pid;
@@ -239,24 +241,48 @@ struct msm_hw_fence_dbg_data {
};
/**
* struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client.
* struct hw_fence_client_type_desc - Structure holding client type properties, including static
* properties and client queue properties read from device-tree.
*
* @queues_num: number of client queues
* @queue_entries: number of queue entries per client queue
* @mem_size: size of memory allocated for client queues
* @start_offset: start offset of client queue memory region, from beginning of carved-out memory
* allocation for hw fence driver
* @name: name of client type, used to parse properties from device-tree
* @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
* HW_FENCE_CLIENT_ID_CTL0 for DPU clients
* @max_clients_num: maximum number of clients of given client type
* @clients_num: number of clients of given client type
* @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
* two (for both Tx and Rx Queues)
* @queue_entries: number of entries per client queue of given client type
* @start_padding: size of padding between queue table header and first queue header in bytes
* @end_padding: size of padding between queue header(s) and first queue payload in bytes
* @mem_size: size of memory allocated for client queue(s) per client in bytes
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
*/
struct hw_fence_client_queue_size_desc {
struct hw_fence_client_type_desc {
char *name;
enum hw_fence_client_id init_id;
u32 max_clients_num;
u32 clients_num;
u32 queues_num;
u32 queue_entries;
u32 start_padding;
u32 end_padding;
u32 mem_size;
u32 start_offset;
bool skip_txq_wr_idx;
};
/**
* struct hw_fence_client_queue_desc - Structure holding client queue properties for a client.
*
* @type: pointer to client queue properties of client type
* @start_offset: start offset of client queue memory region, from beginning of carved-out memory
* allocation for hw fence driver
*/
struct hw_fence_client_queue_desc {
struct hw_fence_client_type_desc *type;
u32 start_offset;
};
/**
* struct hw_fence_driver_data - Structure holding internal hw-fence driver data
*
@@ -268,6 +294,7 @@ struct hw_fence_client_queue_size_desc {
* @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
* @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
* @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
* @hw_fence_client_types: descriptors of properties for each hw fence client type
* @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
* @clients_num: number of supported hw fence clients (configured based on device-tree)
* @hw_fences_tbl: pointer to the hw-fences table
@@ -320,7 +347,7 @@ struct hw_fence_driver_data {
u32 hw_fence_ctrl_queue_size;
u32 hw_fence_mem_ctrl_queues_size;
/* client queues */
struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size;
struct hw_fence_client_queue_desc *hw_fence_client_queue_size;
struct hw_fence_client_type_desc *hw_fence_client_types;
u32 rxq_clients_num;
u32 clients_num;

Melihat File

@@ -38,33 +38,6 @@ enum hw_fence_mem_reserve {
HW_FENCE_MEM_RESERVE_CLIENT_QUEUE
};
/**
* struct hw_fence_client_type_desc - Structure holding client type properties, including static
* properties and client queue properties read from device-tree.
*
* @name: name of client type, used to parse properties from device-tree
* @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
* HW_FENCE_CLIENT_ID_CTL0 for DPU clients
* @max_clients_num: maximum number of clients of given client type
* @clients_num: number of clients of given client type
* @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
* two (for both Tx and Rx Queues)
* @queue_entries: number of entries per client queue of given client type
* @mem_size: size of memory allocated for client queue(s) per client
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
*/
struct hw_fence_client_type_desc {
char *name;
enum hw_fence_client_id init_id;
u32 max_clients_num;
u32 clients_num;
u32 queues_num;
u32 queue_entries;
u32 mem_size;
bool skip_txq_wr_idx;
};
/**
* global_atomic_store() - Inter-processor lock
* @drv_data: hw fence driver data
@@ -173,6 +146,16 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id client_id);
/**
* hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id.
*
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* Returns: number of client queues
*/
int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index
* is not updated in hw fence driver. Instead,

Melihat File

@@ -32,10 +32,12 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
{
struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
struct hw_fence_client_type_desc *desc;
void *ptr, *qptr;
phys_addr_t phys, qphys;
u32 size, start_queue_offset;
int headers_size, queue_size, payload_size;
int start_padding = 0, end_padding = 0;
int i, ret = 0;
HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
@@ -46,14 +48,19 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
break;
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
if (client_id >= drv_data->clients_num) {
HWFNC_ERR("Invalid client_id: %d\n", client_id);
if (client_id >= drv_data->clients_num ||
!drv_data->hw_fence_client_queue_size[client_id].type) {
HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id,
drv_data->clients_num);
return -EINVAL;
}
headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num);
queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
drv_data->hw_fence_client_queue_size[client_id].queue_entries;
desc = drv_data->hw_fence_client_queue_size[client_id].type;
start_padding = desc->start_padding;
end_padding = desc->end_padding;
headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding +
end_padding;
queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
break;
default:
@@ -75,16 +82,15 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
mem_descriptor->size = size; /* bytes */
mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
HWFNC_DBG_INIT("Initialize headers\n");
HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n",
headers_size, start_padding, end_padding);
/* Initialize headers info within hfi memory */
hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
hfi_table_header->version = 0;
hfi_table_header->size = size; /* bytes */
/* Offset, from the Base Address, where the first queue header starts */
hfi_table_header->qhdr0_offset =
sizeof(struct msm_hw_fence_hfi_queue_table_header);
hfi_table_header->qhdr_size =
sizeof(struct msm_hw_fence_hfi_queue_header);
hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding;
hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE;
hfi_table_header->num_q = queues_num; /* number of queues */
hfi_table_header->num_active_q = queues_num;
@@ -96,7 +102,7 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
*/
HWFNC_DBG_INIT("Initialize queues\n");
hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE);
((char *)ptr + hfi_table_header->qhdr0_offset);
for (i = 0; i < queues_num; i++) {
HWFNC_DBG_INIT("init queue[%d]\n", i);
@@ -251,10 +257,9 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
u32 *wr_ptr;
int ret = 0;
if (queue_type >=
drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) {
HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type,
hw_fence_client->client_id);
if (queue_type >= hw_fence_client->queues_num) {
HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type,
hw_fence_client->client_id, hw_fence_client->queues_num);
return -EINVAL;
}
@@ -539,10 +544,16 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
{
int ret;
if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) {
HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n",
hw_fence_client->client_id);
return -EINVAL;
}
/* Init client queues */
ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
&hw_fence_client->mem_descriptor, hw_fence_client->queues,
drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num,
drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num,
hw_fence_client->client_id);
if (ret) {
HWFNC_ERR("Failure to init the queue for client:%d\n",

Melihat File

@@ -77,23 +77,23 @@
*/
struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
{"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false},
{"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false},
{"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false},
{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false},
{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false},
{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true},
};
static void _lock(uint64_t *wait)
@@ -549,23 +549,16 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
*size = drv_data->hw_fence_mem_fences_table_size;
break;
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
if (client_id >= drv_data->clients_num) {
HWFNC_ERR("unexpected client_id:%d\n", client_id);
if (client_id >= drv_data->clients_num ||
!drv_data->hw_fence_client_queue_size[client_id].type) {
HWFNC_ERR("unexpected client_id:%d for clients_num:%lu\n", client_id,
drv_data->clients_num);
ret = -EINVAL;
goto exit;
}
start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
*size = drv_data->hw_fence_client_queue_size[client_id].mem_size;
/*
* If this error occurs when client should be valid, check that support for this
* client has been configured in device-tree properties.
*/
if (!*size) {
HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id);
ret = -EINVAL;
}
*size = drv_data->hw_fence_client_queue_size[client_id].type->mem_size;
break;
default:
HWFNC_ERR("Invalid mem reserve type:%d\n", type);
@@ -592,6 +585,49 @@ exit:
return ret;
}
static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_type_desc *desc)
{
char name[40];
u32 tmp[2];
int ret;
snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name);
ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2);
/* extra dt props not set */
if (ret)
return 0;
desc->start_padding = tmp[0];
desc->end_padding = tmp[1];
if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) ||
(desc->start_padding + desc->end_padding) % sizeof(u64)) {
HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n",
desc->name, desc->start_padding, desc->end_padding);
return -EINVAL;
}
if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) {
HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n",
desc->name, desc->queues_num, desc->start_padding);
return -EINVAL;
}
if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) -
desc->start_padding) {
HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n",
desc->name, desc->queues_num, desc->start_padding, desc->end_padding);
return -EINVAL;
}
HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name,
desc->start_padding, desc->end_padding);
return 0;
}
static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_type_desc *desc)
{
@@ -600,7 +636,7 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
u32 queue_size;
int ret;
/* parse client queue property from device-tree */
/* parse client queue properties from device-tree */
snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
if (ret) {
@@ -626,6 +662,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
return -EINVAL;
}
/* parse extra client queue properties from device-tree */
ret = _parse_client_queue_dt_props_extra(drv_data, desc);
if (ret) {
HWFNC_ERR("%s failed to parse extra dt props\n", desc->name);
return -EINVAL;
}
/* compute mem_size */
if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
@@ -635,17 +678,18 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
if (queue_size >= ((U32_MAX & PAGE_MASK) -
HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) {
HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n",
desc->name, queue_size);
(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
desc->start_padding + desc->end_padding)) / desc->queues_num) {
HWFNC_ERR("%s client queue_sz:%lu start_p:%lu end_p:%lu will overflow mem size\n",
desc->name, queue_size, desc->start_padding, desc->end_padding);
return -EINVAL;
}
desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
(queue_size * desc->queues_num));
(queue_size * desc->queues_num) + desc->start_padding + desc->end_padding);
if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n",
HWFNC_ERR("%s client queue mem_size:%lu greater than max mem size:%lu\n",
desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
return -EINVAL;
}
@@ -690,7 +734,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
/* allocate memory for client queue size descriptors */
size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc);
size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_desc);
drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
if (!drv_data->hw_fence_client_queue_size)
return -ENOMEM;
@@ -707,9 +751,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
drv_data->hw_fence_client_queue_size[client_id] =
(struct hw_fence_client_queue_size_desc)
{desc->queues_num, desc->queue_entries, desc->mem_size,
start_offset, desc->skip_txq_wr_idx};
(struct hw_fence_client_queue_desc){desc, start_offset};
HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
desc->name, client_id_ext, client_id, start_offset);
start_offset += desc->mem_size;
@@ -929,10 +971,24 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver
return client_id_priv;
}
int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num ||
!drv_data->hw_fence_client_queue_size[client_id].type) {
HWFNC_ERR("invalid access to client:%d queues_num\n", client_id);
return 0;
}
return drv_data->hw_fence_client_queue_size[client_id].type->queues_num;
}
bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
if (!drv_data || client_id >= drv_data->clients_num ||
!drv_data->hw_fence_client_queue_size[client_id].type) {
HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id);
return false;
}
return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx;
return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx;
}

Melihat File

@@ -87,15 +87,18 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
}
hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
if (hw_fence_client->update_rxq &&
hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num <
HW_FENCE_CLIENT_QUEUES) {
HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id);
hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id);
if (!hw_fence_client->queues_num || (hw_fence_client->update_rxq &&
hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES)) {
HWFNC_ERR("client:%d invalid q_num:%lu for updates_rxq:%s\n", client_id,
hw_fence_client->queues_num,
hw_fence_client->update_rxq ? "true" : "false");
ret = -EINVAL;
goto error;
}
hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data,
client_id);
@@ -118,9 +121,10 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
if (ret)
goto error;
HWFNC_DBG_INIT("-- Initialized ptr:0x%p client_id:%d ipc_signal_id:%d ipc vid:%d pid:%d\n",
hw_fence_client, hw_fence_client->client_id, hw_fence_client->ipc_signal_id,
hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid);
HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n",
hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num,
hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid,
hw_fence_client->ipc_client_pid);
#if IS_ENABLED(CONFIG_DEBUG_FS)
init_waitqueue_head(&hw_fence_client->wait_queue);