diff --git a/hw_fence/include/hw_fence_drv_priv.h b/hw_fence/include/hw_fence_drv_priv.h index 2e03faba41..f1786831fb 100644 --- a/hw_fence/include/hw_fence_drv_priv.h +++ b/hw_fence/include/hw_fence_drv_priv.h @@ -150,12 +150,19 @@ enum hw_fence_client_data_id { * @q_size_bytes: size of the queue * @va_header: pointer to the hfi header virtual address * @pa_queue: physical address of the queue + * @rd_wr_idx_start: start read and write indexes for client queue (zero by default) + * @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default) + * @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and + * hfi_header->tx_wm is updated instead */ struct msm_hw_fence_queue { void *va_queue; u32 q_size_bytes; void *va_header; phys_addr_t pa_queue; + u32 rd_wr_idx_start; + u32 rd_wr_idx_factor; + bool skip_wr_idx; }; /** @@ -178,8 +185,6 @@ enum payload_type { * @ipc_client_pid: physical id of the ipc client for this hw fence driver client * @update_rxq: bool to indicate if client uses rx-queue * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences - * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence - * driver and hfi_header->tx_wm is updated instead * @wait_queue: wait queue for the validation clients * @val_signal: doorbell flag to signal the validation clients in the wait queue */ @@ -194,7 +199,6 @@ struct msm_hw_fence_client { int ipc_client_pid; bool update_rxq; bool send_ipc; - bool skip_txq_wr_idx; #if IS_ENABLED(CONFIG_DEBUG_FS) wait_queue_head_t wait_queue; atomic_t val_signal; @@ -255,6 +259,8 @@ struct msm_hw_fence_dbg_data { * @start_padding: size of padding between queue table header and first queue header in bytes * @end_padding: size of padding between queue header(s) and first queue payload in bytes * @mem_size: size of memory allocated for client queue(s) per client in bytes + * @txq_idx_start: start read and write indexes for client tx queue (zero by default) + * @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default) * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence * driver and hfi_header->tx_wm is updated instead */ @@ -268,6 +274,8 @@ struct hw_fence_client_type_desc { u32 start_padding; u32 end_padding; u32 mem_size; + u32 txq_idx_start; + u32 txq_idx_factor; bool skip_txq_wr_idx; }; diff --git a/hw_fence/include/hw_fence_drv_utils.h b/hw_fence/include/hw_fence_drv_utils.h index 454b5b570d..6b35962f41 100644 --- a/hw_fence/include/hw_fence_drv_utils.h +++ b/hw_fence/include/hw_fence_drv_utils.h @@ -156,17 +156,4 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver */ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id); -/** - * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index - * is not updated in hw fence driver. Instead, - * hfi_header->tx_wm tracks where payload is written within - * the queue. - * - * @drv_data: driver data - * @client_id: hw fence driver client id - * - * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise - */ -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id); - #endif /* __HW_FENCE_DRV_UTILS_H */ diff --git a/hw_fence/src/hw_fence_drv_priv.c b/hw_fence/src/hw_fence_drv_priv.c index ea931f1510..f47abca728 100644 --- a/hw_fence/src/hw_fence_drv_priv.c +++ b/hw_fence/src/hw_fence_drv_priv.c @@ -15,6 +15,8 @@ /* Global atomic lock */ #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val) +#define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1) + inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data) { #ifdef HWFENCE_USE_SLEEP_TIMER @@ -35,10 +37,11 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc; void *ptr, *qptr; phys_addr_t phys, qphys; - u32 size, start_queue_offset; + u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1; int headers_size, queue_size, payload_size; int start_padding = 0, end_padding = 0; int i, ret = 0; + bool skip_txq_wr_idx = false; HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id); switch (mem_reserve_id) { @@ -62,6 +65,9 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, end_padding; queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries; payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD; + txq_idx_start = desc->txq_idx_start; + txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1; + skip_txq_wr_idx = desc->skip_txq_wr_idx; break; default: HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id); @@ -115,7 +121,8 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, hfi_queue_header->start_addr = qphys; /* Set the queue type (i.e. RX or TX queue) */ - hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE; + hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE : + HW_FENCE_RX_QUEUE; /* Set the size of this header */ hfi_queue_header->queue_size = queue_size; @@ -123,6 +130,20 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, /* Set the payload size */ hfi_queue_header->pkt_size = payload_size; + /* Set write index for clients' tx queues that index from nonzero value */ + if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) { + if (skip_txq_wr_idx) + hfi_queue_header->tx_wm = txq_idx_start; + hfi_queue_header->read_index = txq_idx_start; + hfi_queue_header->write_index = txq_idx_start; + HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id, + skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx", + txq_idx_start); + } + + /* Update memory for hfi_queue_header */ + wmb(); + /* Store Memory info in the Client data */ queues[i].va_queue = qptr; queues[i].pa_queue = qphys; @@ -133,6 +154,18 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data, client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header, queues[i].q_size_bytes, payload_size); + /* Store additional tx queue rd_wr_idx properties */ + if (IS_HW_FENCE_TX_QUEUE(i)) { + queues[i].rd_wr_idx_start = txq_idx_start; + queues[i].rd_wr_idx_factor = txq_idx_factor; + queues[i].skip_wr_idx = skip_txq_wr_idx; + } else { + queues[i].rd_wr_idx_factor = 1; + } + HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n", + queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor, + queues[i].skip_wr_idx ? "true" : "false"); + /* Next header */ hfi_queue_header++; } @@ -189,6 +222,14 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, read_idx = readl_relaxed(&hfi_header->read_index); write_idx = readl_relaxed(&hfi_header->write_index); + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n", hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue); @@ -215,6 +256,13 @@ int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client, if (to_read_idx >= q_size_u32) to_read_idx = 0; + /* translate to_read_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Read the Client Queue */ payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id); payload->seqno = readq_relaxed(&read_ptr_payload->seqno); @@ -275,8 +323,8 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, return -EINVAL; } - /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */ - if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx) + /* if skipping update wr_index, then use hfi_header->tx_wm instead */ + if (queue->skip_wr_idx) wr_ptr = &hfi_header->tx_wm; else wr_ptr = &hfi_header->write_index; @@ -310,8 +358,15 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n", hw_fence_client->client_id, &hfi_header->read_index, wr_ptr, - read_idx, write_idx, queue, queue_type, - hw_fence_client->skip_txq_wr_idx ? "true" : "false"); + read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false"); + + /* translate read and write indexes from custom indexing to dwords with no offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor; + HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n", + read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } /* Check queue to make sure message will fit */ q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) : @@ -346,6 +401,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data, if (to_write_idx >= q_size_u32) to_write_idx = 0; + /* translate to_write_idx to custom indexing with offset */ + if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) { + to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start; + HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n", + to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor); + } + /* Update Client Queue */ writeq_relaxed(payload_size, &write_ptr_payload->size); writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type); @@ -1462,8 +1524,12 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* For the client TxQ: set the read-index same as last write that was done by the client */ mb(); /* make sure data is ready before read */ wr_idx = readl_relaxed(&hfi_header->write_index); + if (queue->skip_wr_idx) + hfi_header->tx_wm = wr_idx; writel_relaxed(wr_idx, &hfi_header->read_index); wmb(); /* make sure data is updated after write the index*/ + HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n", + queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx); /* For the client RxQ: set the write-index same as last read done by the client */ if (hw_fence_client->update_rxq) { @@ -1489,6 +1555,7 @@ void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data, /* unlock */ GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); + HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx); } } diff --git a/hw_fence/src/hw_fence_drv_utils.c b/hw_fence/src/hw_fence_drv_utils.c index e2dc4b04ca..a42329ecb7 100644 --- a/hw_fence/src/hw_fence_drv_utils.c +++ b/hw_fence/src/hw_fence_drv_utils.c @@ -77,23 +77,31 @@ */ struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = { {"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, {"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, - HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, false}, - {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, - {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, true}, + HW_FENCE_CLIENT_QUEUES, 0, 0, 0, 0, 0, 0, false}, + {"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0, HW_FENCE_CLIENT_QUEUES, + 0, 0, 0, 0, 0, 0, false}, + {"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, + {"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, 0, 0, 0, 0, + true}, }; static void _lock(uint64_t *wait) @@ -588,44 +596,87 @@ exit: static int _parse_client_queue_dt_props_extra(struct hw_fence_driver_data *drv_data, struct hw_fence_client_type_desc *desc) { + u32 max_idx_from_zero, payload_size_u32 = HW_FENCE_CLIENT_QUEUE_PAYLOAD / sizeof(u32); char name[40]; - u32 tmp[2]; - int ret; + u32 tmp[4]; + bool idx_by_payload = false; + int count, ret; snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s-extra", desc->name); - ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 2); - /* extra dt props not set */ - if (ret) + /* check if property is present */ + ret = of_property_read_bool(drv_data->dev->of_node, name); + if (!ret) return 0; + count = of_property_count_u32_elems(drv_data->dev->of_node, name); + if (count <= 0 || count > 4) { + HWFNC_ERR("invalid %s extra dt props count:%d\n", desc->name, count); + return -EINVAL; + } + + ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, count); + if (ret) { + HWFNC_ERR("Failed to read %s extra dt properties ret=%d count=%d\n", desc->name, + ret, count); + ret = -EINVAL; + goto exit; + } + desc->start_padding = tmp[0]; - desc->end_padding = tmp[1]; + if (count >= 2) + desc->end_padding = tmp[1]; + if (count >= 3) + desc->txq_idx_start = tmp[2]; + if (count >= 4) { + if (tmp[3] > 1) { + HWFNC_ERR("%s invalid txq_idx_by_payload prop:%lu\n", desc->name, tmp[3]); + ret = -EINVAL; + goto exit; + } + idx_by_payload = tmp[3]; + desc->txq_idx_factor = idx_by_payload ? payload_size_u32 : 1; + } if (desc->start_padding % sizeof(u32) || desc->end_padding % sizeof(u32) || (desc->start_padding + desc->end_padding) % sizeof(u64)) { HWFNC_ERR("%s start_padding:%lu end_padding:%lu violates mem alignment\n", desc->name, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->start_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) { HWFNC_ERR("%s client queues_num:%lu start_padding:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } if (desc->end_padding >= U32_MAX - HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) - desc->start_padding) { HWFNC_ERR("%s client q_num:%lu start_p:%lu end_p:%lu will overflow mem_size\n", desc->name, desc->queues_num, desc->start_padding, desc->end_padding); - return -EINVAL; + ret = -EINVAL; + goto exit; } - HWFNC_DBG_INIT("%s: start_padding_size=%lu end_padding_size=%lu\n", desc->name, - desc->start_padding, desc->end_padding); + max_idx_from_zero = idx_by_payload ? desc->queue_entries : + desc->queue_entries * payload_size_u32; + if (desc->txq_idx_start >= U32_MAX - max_idx_from_zero) { + HWFNC_ERR("%s txq_idx start:%lu by_payload:%s q_entries:%d will overflow txq_idx\n", + desc->name, desc->txq_idx_start, idx_by_payload ? "true" : "false", + desc->queue_entries); + ret = -EINVAL; + goto exit; + } - return 0; + HWFNC_DBG_INIT("%s: start_p=%lu end_p=%lu txq_idx_start:%lu txq_idx_by_payload:%s\n", + desc->name, desc->start_padding, desc->end_padding, desc->txq_idx_start, + idx_by_payload ? "true" : "false"); + +exit: + return ret; } static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data, @@ -981,14 +1032,3 @@ int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int cli return drv_data->hw_fence_client_queue_size[client_id].type->queues_num; } - -bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id) -{ - if (!drv_data || client_id >= drv_data->clients_num || - !drv_data->hw_fence_client_queue_size[client_id].type) { - HWFNC_ERR("invalid access to client:%d skips_txq_wr_idx\n", client_id); - return false; - } - - return drv_data->hw_fence_client_queue_size[client_id].type->skip_txq_wr_idx; -} diff --git a/hw_fence/src/msm_hw_fence.c b/hw_fence/src/msm_hw_fence.c index d243b06543..82ee33bdaa 100644 --- a/hw_fence/src/msm_hw_fence.c +++ b/hw_fence/src/msm_hw_fence.c @@ -99,16 +99,13 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext, goto error; } - hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data, - client_id); - /* Alloc Client HFI Headers and Queues */ ret = hw_fence_alloc_client_resources(hw_fence_drv_data, hw_fence_client, mem_descriptor); if (ret) goto error; - /* Initialize signal for communication withe FenceCTL */ + /* Initialize signal for communication with FenceCTL */ ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client); if (ret) goto error;