Explorar o código

mm-drivers: hw_fence: update txq to use separate software wr ptr

Some hw fence driver clients require the ability to call the
'msm_hw_fence_update_txq' API to update the queue payload without
updating the 'write_index' member within the hfi header. These clients
also need to receive the index at which the payload is written within
the queue.

This change adds support for this requirement by adding a device-tree
property to configure this behavior for each client. The 'tx_wm' member
within the hfi header is used to track in software the place where the
payloads are within the queue for clients that skip the update to the
'write_index' member.

Change-Id: I2881fa49bef4e49691eb6049830f9dc8dc8fa425
Signed-off-by: Grace An <[email protected]>
Grace An %!s(int64=2) %!d(string=hai) anos
pai
achega
f4afac60ba

+ 6 - 0
hw_fence/include/hw_fence_drv_priv.h

@@ -175,6 +175,8 @@ enum payload_type {
  * @ipc_client_pid: physical id of the ipc client for this hw fence driver client
  * @update_rxq: bool to indicate if client uses rx-queue
  * @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
+ * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
+ *                   driver and hfi_header->tx_wm is updated instead
  * @wait_queue: wait queue for the validation clients
  * @val_signal: doorbell flag to signal the validation clients in the wait queue
  */
@@ -188,6 +190,7 @@ struct msm_hw_fence_client {
 	int ipc_client_pid;
 	bool update_rxq;
 	bool send_ipc;
+	bool skip_txq_wr_idx;
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	wait_queue_head_t wait_queue;
 	atomic_t val_signal;
@@ -241,12 +244,15 @@ struct msm_hw_fence_dbg_data {
  * @mem_size: size of memory allocated for client queues
  * @start_offset: start offset of client queue memory region, from beginning of carved-out memory
  *                allocation for hw fence driver
+ * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
+ *                   driver and hfi_header->tx_wm is updated instead
  */
 struct hw_fence_client_queue_size_desc {
 	u32 queues_num;
 	u32 queue_entries;
 	u32 mem_size;
 	u32 start_offset;
+	bool skip_txq_wr_idx;
 };
 
 /**

+ 16 - 0
hw_fence/include/hw_fence_drv_utils.h

@@ -51,6 +51,8 @@ enum hw_fence_mem_reserve {
  *              two (for both Tx and Rx Queues)
  * @queue_entries: number of entries per client queue of given client type
  * @mem_size: size of memory allocated for client queue(s) per client
+ * @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
+ *                   driver and hfi_header->tx_wm is updated instead
  */
 struct hw_fence_client_type_desc {
 	char *name;
@@ -60,6 +62,7 @@ struct hw_fence_client_type_desc {
 	u32 queues_num;
 	u32 queue_entries;
 	u32 mem_size;
+	bool skip_txq_wr_idx;
 };
 
 /**
@@ -170,4 +173,17 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
 enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
 	enum hw_fence_client_id client_id);
 
+/**
+ * hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index
+ *                                       is not updated in hw fence driver. Instead,
+ *                                       hfi_header->tx_wm tracks where payload is written within
+ *                                       the queue.
+ *
+ * @drv_data: driver data
+ * @client_id: hw fence driver client id
+ *
+ * Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise
+ */
+bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id);
+
 #endif /* __HW_FENCE_DRV_UTILS_H */

+ 13 - 5
hw_fence/src/hw_fence_drv_priv.c

@@ -248,6 +248,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	bool lock_client = false;
 	u32 lock_idx;
 	u64 timestamp;
+	u32 *wr_ptr;
 	int ret = 0;
 
 	if (queue_type >=
@@ -269,6 +270,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 		return -EINVAL;
 	}
 
+	/* if skipping update txq wr_index, then use hfi_header->tx_wm instead */
+	if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx)
+		wr_ptr = &hfi_header->tx_wm;
+	else
+		wr_ptr = &hfi_header->write_index;
+
 	/*
 	 * We need to lock the client if there is an Rx Queue update, since that
 	 * is the only time when HW Fence driver can have a race condition updating
@@ -294,11 +301,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 
 	/* Get read and write index */
 	read_idx = readl_relaxed(&hfi_header->read_index);
-	write_idx = readl_relaxed(&hfi_header->write_index);
+	write_idx = readl_relaxed(wr_ptr);
 
-	HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n",
-		hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
-		read_idx, write_idx, queue, queue_type);
+	HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
+		hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
+		read_idx, write_idx, queue, queue_type,
+		hw_fence_client->skip_txq_wr_idx ? "true" : "false");
 
 	/* Check queue to make sure message will fit */
 	q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
@@ -351,7 +359,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
 	wmb();
 
 	/* update the write index */
-	writel_relaxed(to_write_idx, &hfi_header->write_index);
+	writel_relaxed(to_write_idx, wr_ptr);
 
 	/* update memory for the index */
 	wmb();

+ 38 - 22
hw_fence/src/hw_fence_drv_utils.c

@@ -58,9 +58,9 @@
  * struct hw_fence_client_types - Table describing all supported client types, used to parse
  *                                device-tree properties related to client queue size.
  *
- * The fields name, init_id, and max_clients_num are constants. Default values for clients_num and
- * queues_num are provided in this table, and clients_num, queues_num, and queue_entries can be read
- * from device-tree.
+ * The fields name, init_id, and max_clients_num are constants. Default values for clients_num,
+ * queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num,
+ * queue_entries, and skip_txq_wr_idx can be read from device-tree.
  *
  * If a value for queue entries is not parsed for the client type, then the default number of client
  * queue entries (parsed from device-tree) is used.
@@ -68,29 +68,30 @@
  * Notes:
  * 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
  * 2. Each HW Fence client ID must be described by one of the client types in this table.
- * 3. A new client type must set: name, init_id, max_clients_num, clients_num, and queues_num.
+ * 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and
+ *    skip_txq_wr_idx.
  * 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must
  *    be incremented as appropriate for new client types.
  */
 struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
 	{"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
-		HW_FENCE_CLIENT_QUEUES, 0, 0},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
 	{"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
-		HW_FENCE_CLIENT_QUEUES, 0, 0},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
 	{"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
-		HW_FENCE_CLIENT_QUEUES, 0, 0},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
 	{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0,
-		HW_FENCE_CLIENT_QUEUES, 0, 0},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
 	{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0,
-		HW_FENCE_CLIENT_QUEUES, 0, 0},
-	{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
-	{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0},
+		HW_FENCE_CLIENT_QUEUES, 0, 0, false},
+	{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
+	{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
 };
 
 static void _lock(uint64_t *wait)
@@ -594,13 +595,13 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 	struct hw_fence_client_type_desc *desc)
 {
 	char name[31];
-	u32 tmp[3];
+	u32 tmp[4];
 	u32 queue_size;
 	int ret;
 
 	/* parse client queue property from device-tree */
 	snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
-	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 3);
+	ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
 	if (ret) {
 		HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
 			ret);
@@ -609,6 +610,12 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 		desc->clients_num = tmp[0];
 		desc->queues_num = tmp[1];
 		desc->queue_entries = tmp[2];
+
+		if (tmp[3] > 1) {
+			HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]);
+			return -EINVAL;
+		}
+		desc->skip_txq_wr_idx = tmp[3];
 	}
 
 	if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
@@ -642,8 +649,9 @@ static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_da
 		return -EINVAL;
 	}
 
-	HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu\n", desc->name,
-		desc->clients_num, desc->queues_num, desc->queue_entries, desc->mem_size);
+	HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n",
+		desc->name, desc->clients_num, desc->queues_num, desc->queue_entries,
+		desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false");
 
 	return 0;
 }
@@ -700,7 +708,7 @@ static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
 			drv_data->hw_fence_client_queue_size[client_id] =
 				(struct hw_fence_client_queue_size_desc)
 				{desc->queues_num, desc->queue_entries, desc->mem_size,
-				start_offset};
+				start_offset, desc->skip_txq_wr_idx};
 			HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
 				desc->name, client_id_ext, client_id, start_offset);
 			start_offset += desc->mem_size;
@@ -919,3 +927,11 @@ enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver
 
 	return client_id_priv;
 }
+
+bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id)
+{
+	if (!drv_data || client_id >= drv_data->clients_num)
+		return false;
+
+	return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx;
+}

+ 2 - 0
hw_fence/src/msm_hw_fence.c

@@ -93,6 +93,8 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
 	}
 
 	hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
+	hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data,
+		client_id);
 
 	/* Alloc Client HFI Headers and Queues */
 	ret = hw_fence_alloc_client_resources(hw_fence_drv_data,