Change-Id: I59178de15716e1bc15d1a462306ec849fab7deb9
Этот коммит содержится в:
Linux Build Service Account
2023-01-10 11:06:18 -08:00
родитель 83808f0957 55082654a9
Коммит 206f215eab
9 изменённых файлов: 676 добавлений и 144 удалений

Просмотреть файл

@@ -85,7 +85,7 @@ struct hw_fence_out_clients_map {
* The index of this struct must match the enum hw_fence_client_id
*/
static const struct hw_fence_out_clients_map
dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_MAX] = {
dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */

Просмотреть файл

@@ -9,12 +9,32 @@
/* ipc clients virtual client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8
#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9
#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11
#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12
#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25
#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128
#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129
#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130
#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131
#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132
#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133
#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134
#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135
/* ipc clients physical client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3
#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4
#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5
#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8
#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9
#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11
#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12
#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13
#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14
#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15
#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16
#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17
#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_LAHAINA 2
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_WAIPIO 1

Просмотреть файл

@@ -35,8 +35,8 @@
#define HW_FENCE_HFI_CTRL_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CTRL_QUEUES))
#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * HW_FENCE_CLIENT_QUEUES))
#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) (HW_FENCE_HFI_TABLE_HEADER_SIZE + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE * queues_num))
/*
* Max Payload size is the bigest size of the message that we can have in the CTRL queue
@@ -48,8 +48,8 @@
#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE
#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload))
/* Locks area for all the clients */
#define HW_FENCE_MEM_LOCKS_SIZE (sizeof(u64) * (HW_FENCE_CLIENT_MAX - 1))
/* Locks area for all clients with RxQ */
#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (sizeof(u64) * rxq_clients_num)
#define HW_FENCE_TX_QUEUE 1
#define HW_FENCE_RX_QUEUE 2
@@ -165,7 +165,9 @@ enum payload_type {
/**
* struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
* @client_id: id of the client
* @client_id: internal client_id used within HW fence driver; index into the clients struct
* @client_id_ext: external client_id, equal to client_id except for clients with configurable
* number of sub-clients (e.g. ife clients)
* @mem_descriptor: hfi header memory descriptor
* @queues: queues descriptor
* @ipc_signal_id: id of the signal to be triggered for this client
@@ -173,11 +175,14 @@ enum payload_type {
* @ipc_client_pid: physical id of the ipc client for this hw fence driver client
* @update_rxq: bool to indicate if client uses rx-queue
* @send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
* @wait_queue: wait queue for the validation clients
* @val_signal: doorbell flag to signal the validation clients in the wait queue
*/
struct msm_hw_fence_client {
enum hw_fence_client_id client_id;
enum hw_fence_client_id client_id_ext;
struct msm_hw_fence_mem_addr mem_descriptor;
struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
int ipc_signal_id;
@@ -185,6 +190,7 @@ struct msm_hw_fence_client {
int ipc_client_pid;
bool update_rxq;
bool send_ipc;
bool skip_txq_wr_idx;
#if IS_ENABLED(CONFIG_DEBUG_FS)
wait_queue_head_t wait_queue;
atomic_t val_signal;
@@ -230,6 +236,25 @@ struct msm_hw_fence_dbg_data {
u64 lock_wake_cnt;
};
/**
* struct hw_fence_client_queue_size_desc - Structure holding client queue properties for a client.
*
* @queues_num: number of client queues
* @queue_entries: number of queue entries per client queue
* @mem_size: size of memory allocated for client queues
* @start_offset: start offset of client queue memory region, from beginning of carved-out memory
* allocation for hw fence driver
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
*/
struct hw_fence_client_queue_size_desc {
u32 queues_num;
u32 queue_entries;
u32 mem_size;
u32 start_offset;
bool skip_txq_wr_idx;
};
/**
* struct hw_fence_driver_data - Structure holding internal hw-fence driver data
*
@@ -240,8 +265,9 @@ struct msm_hw_fence_dbg_data {
* @hw_fence_queue_entries: total number of entries that can be available in the queue
* @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
* @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
* @hw_fence_client_queue_size: size of the client queue for the payload
* @hw_fence_mem_clients_queues_size: total size of client queues, including: header + rxq + txq
* @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
* @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
* @clients_num: number of supported hw fence clients (configured based on device-tree)
* @hw_fences_tbl: pointer to the hw-fences table
* @hw_fences_tbl_cnt: number of elements in the hw-fence table
* @client_lock_tbl: pointer to the per-client locks table
@@ -257,6 +283,7 @@ struct msm_hw_fence_dbg_data {
* @peer_name: peer name for this carved-out memory
* @rm_nb: hyp resource manager notifier
* @memparcel: memparcel for the allocated memory
* @used_mem_size: total memory size of global table, lock region, and ctrl and client queues
* @db_label: doorbell label
* @rx_dbl: handle to the Rx doorbell
* @debugfs_data: debugfs info
@@ -274,7 +301,7 @@ struct msm_hw_fence_dbg_data {
* @ctl_start_size: size of the ctl_start registers of the display hw (platforms with no dpu-ipc)
* @client_id_mask: bitmask for tracking registered client_ids
* @clients_register_lock: lock to synchronize clients registration and deregistration
* @msm_hw_fence_client: table with the handles of the registered clients
* @clients: table with the handles of the registered clients; size is equal to clients_num
* @vm_ready: flag to indicate if vm has been initialized
* @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized
*/
@@ -291,8 +318,10 @@ struct hw_fence_driver_data {
u32 hw_fence_ctrl_queue_size;
u32 hw_fence_mem_ctrl_queues_size;
/* client queues */
u32 hw_fence_client_queue_size;
u32 hw_fence_mem_clients_queues_size;
struct hw_fence_client_queue_size_desc *hw_fence_client_queue_size;
struct hw_fence_client_type_desc *hw_fence_client_types;
u32 rxq_clients_num;
u32 clients_num;
/* HW Fences Table VA */
struct msm_hw_fence *hw_fences_tbl;
@@ -316,6 +345,7 @@ struct hw_fence_driver_data {
u32 peer_name;
struct notifier_block rm_nb;
u32 memparcel;
u32 used_mem_size;
/* doorbell */
u32 db_label;
@@ -350,7 +380,7 @@ struct hw_fence_driver_data {
struct mutex clients_register_lock;
/* table with registered client handles */
struct msm_hw_fence_client *clients[HW_FENCE_CLIENT_MAX];
struct msm_hw_fence_client **clients;
bool vm_ready;
#ifdef HW_DPU_IPCC

Просмотреть файл

@@ -6,6 +6,24 @@
#ifndef __HW_FENCE_DRV_UTILS_H
#define __HW_FENCE_DRV_UTILS_H
/**
* HW_FENCE_MAX_CLIENT_TYPE_STATIC:
* Total number of client types without configurable number of sub-clients (GFX, DPU, VAL, IPE, VPU)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 5
/**
* HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE:
* Maximum number of client types with configurable number of sub-clients (e.g. IFE)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 8
/**
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX:
* Maximum number of static clients, i.e. clients without configurable numbers of sub-clients
*/
#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0
/**
* enum hw_fence_mem_reserve - Types of reservations for the carved-out memory.
* HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues.
@@ -20,6 +38,33 @@ enum hw_fence_mem_reserve {
HW_FENCE_MEM_RESERVE_CLIENT_QUEUE
};
/**
* struct hw_fence_client_type_desc - Structure holding client type properties, including static
* properties and client queue properties read from device-tree.
*
* @name: name of client type, used to parse properties from device-tree
* @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
* HW_FENCE_CLIENT_ID_CTL0 for DPU clients
* @max_clients_num: maximum number of clients of given client type
* @clients_num: number of clients of given client type
* @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
* two (for both Tx and Rx Queues)
* @queue_entries: number of entries per client queue of given client type
* @mem_size: size of memory allocated for client queue(s) per client
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
*/
struct hw_fence_client_type_desc {
char *name;
enum hw_fence_client_id init_id;
u32 max_clients_num;
u32 clients_num;
u32 queues_num;
u32 queue_entries;
u32 mem_size;
bool skip_txq_wr_idx;
};
/**
* global_atomic_store() - Inter-processor lock
* @drv_data: hw fence driver data
@@ -111,4 +156,34 @@ int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
u32 reset_flags);
/**
* hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver
* from the client_id used externally
*
* Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX,
* otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if
* provided with client IDs for such clients when support for those clients is not configured in
* device-tree.
*
* @drv_data: hw fence driver data
* @client_id: external client_id to get internal client_id for
*
* Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX
*/
enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id client_id);
/**
* hw_fence_utils_skips_txq_wr_index() - Returns bool to indicate if client Tx Queue write_index
* is not updated in hw fence driver. Instead,
* hfi_header->tx_wm tracks where payload is written within
* the queue.
*
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* Returns: true if hw fence driver skips update to client tx queue write_index, false otherwise
*/
bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id);
#endif /* __HW_FENCE_DRV_UTILS_H */

Просмотреть файл

@@ -9,6 +9,13 @@
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
/*
* Max size of base table with ipc mappings, with one mapping per client type with configurable
* number of subclients
*/
#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \
HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
/**
* struct hw_fence_client_ipc_map - map client id with ipc signal for trigger.
* @ipc_client_id_virt: virtual ipc client id for the hw-fence client.
@@ -36,7 +43,7 @@ struct hw_fence_client_ipc_map {
* To change to a loopback signal instead of GMU, change ctx0 row to use:
* {HW_FENCE_IPC_CLIENT_ID_APPS, 20}.
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_MAX] = {
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/* ctrlq*/
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/* ctx0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 14, false, true},/*ctl0*/
@@ -64,7 +71,7 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_no_dpu[HW_FENCE_CLIENT_M
*
* Note that the index of this struct must match the enum hw_fence_client_id
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = {
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true},/*ctrl q*/
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, false, false},/*ctx0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, true},/* ctl0 */
@@ -81,7 +88,17 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true},/* val4*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true},/* val5*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true},/* val6*/
#else
{0, 0, 0, false, false}, /* val0 */
{0, 0, 0, false, false}, /* val1 */
{0, 0, 0, false, false}, /* val2 */
{0, 0, 0, false, false}, /* val3 */
{0, 0, 0, false, false}, /* val4 */
{0, 0, 0, false, false}, /* val5 */
{0, 0, 0, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true}, /* ipe */
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true}, /* vpu */
};
/**
@@ -90,9 +107,12 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_CLIENT_MAX] = {
* signaled.
* This version is for targets that support dpu client id and IPC v2.
*
* Note that the index of this struct must match the enum hw_fence_client_id
* Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
* than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
* For clients with configurable sub-clients, the index of this struct matches
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX] = {
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true},/*ctrlq */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, false, false},/* ctx0*/
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, true},/* ctl0 */
@@ -109,12 +129,30 @@ struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_CLIENT_MAX]
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true},/* val4*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true},/* val5*/
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true},/* val6*/
#else
{0, 0, 0, false, false}, /* val0 */
{0, 0, 0, false, false}, /* val1 */
{0, 0, 0, false, false}, /* val2 */
{0, 0, 0, false, false}, /* val3 */
{0, 0, 0, false, false}, /* val4 */
{0, 0, 0, false, false}, /* val5 */
{0, 0, 0, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true}, /* ipe */
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true}, /* vpu */
{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, true},/* ife0*/
{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, true},/* ife1*/
{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, true},/* ife2*/
{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, true},/* ife3*/
{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, true},/* ife4*/
{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, true},/* ife5*/
{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, true},/* ife6*/
{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, true},/* ife7*/
};
int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_virt;
@@ -122,7 +160,7 @@ int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32
int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_phys;
@@ -130,7 +168,7 @@ int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_signal_id;
@@ -138,8 +176,8 @@ int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 clien
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return -EINVAL;
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].update_rxq;
}
@@ -147,7 +185,7 @@ bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int c
bool hw_fence_ipcc_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= HW_FENCE_CLIENT_MAX)
return -EINVAL;
return false;
return drv_data->ipc_clients_table[client_id].send_ipc;
}
@@ -164,6 +202,26 @@ static inline char *_get_ipc_phys_client_name(u32 client_id)
return "GPU_PID";
case HW_FENCE_IPC_CLIENT_ID_DPU_PID:
return "DPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IPE_PID:
return "IPE_PID";
case HW_FENCE_IPC_CLIENT_ID_VPU_PID:
return "VPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_PID:
return "IFE0_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_PID:
return "IFE1_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_PID:
return "IFE2_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_PID:
return "IFE3_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_PID:
return "IFE4_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_PID:
return "IFE5_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_PID:
return "IFE6_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_PID:
return "IFE7_PID";
}
return "UNKNOWN_PID";
@@ -181,6 +239,26 @@ static inline char *_get_ipc_virt_client_name(u32 client_id)
return "GPU_VID";
case HW_FENCE_IPC_CLIENT_ID_DPU_VID:
return "DPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IPE_VID:
return "IPE_VID";
case HW_FENCE_IPC_CLIENT_ID_VPU_VID:
return "VPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_VID:
return "IFE0_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_VID:
return "IFE1_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_VID:
return "IFE2_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_VID:
return "IFE3_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_VID:
return "IFE4_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_VID:
return "IFE5_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_VID:
return "IFE6_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_VID:
return "IFE7_VID";
}
return "UNKNOWN_VID";
@@ -208,6 +286,46 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
wmb();
}
static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_ipc_map *base_table)
{
int i, j, map_idx;
size_t size;
size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map);
drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL);
if (!drv_data->ipc_clients_table)
return -ENOMEM;
/* copy mappings for static hw fence clients */
size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map);
memcpy(drv_data->ipc_clients_table, base_table, size);
/* initialize mappings for ipc clients with configurable number of hw fence clients */
map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX;
for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) {
int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i;
int clients_num = drv_data->hw_fence_client_types[client_type].clients_num;
for (j = 0; j < clients_num; j++) {
/* this should never happen if drv_data->clients_num is correct */
if (map_idx >= drv_data->clients_num) {
HWFNC_ERR("%s clients_num:%lu exceeds drv_data->clients_num:%lu\n",
drv_data->hw_fence_client_types[client_type].name,
clients_num, drv_data->clients_num);
return -EINVAL;
}
drv_data->ipc_clients_table[map_idx] =
base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i];
drv_data->ipc_clients_table[map_idx].ipc_signal_id = j;
map_idx++;
}
}
return 0;
}
/**
* _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data,
* according to the ipcc hw revision.
@@ -216,6 +334,8 @@ void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
*/
static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev)
{
int ret = 0;
switch (hwrev) {
case HW_FENCE_IPCC_HW_REV_100:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
@@ -241,15 +361,16 @@ static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32
case HW_FENCE_IPCC_HW_REV_203:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE; /* Fence */
drv_data->ipc_clients_table = hw_fence_clients_ipc_map_v2;
drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */
ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
hw_fence_clients_ipc_map_v2);
HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n");
break;
default:
return -1;
}
return 0;
return ret;
}
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
@@ -320,7 +441,7 @@ int hw_fence_ipcc_enable_dpu_signaling(struct hw_fence_driver_data *drv_data)
HWFNC_DBG_H("Initialize dpu signals\n");
/* Enable Client-Signal pairs from DPU (25) to APPS(NS) (8) */
for (i = 0; i < HW_FENCE_CLIENT_MAX; i++) {
for (i = 0; i < drv_data->clients_num; i++) {
hw_fence_client = &drv_data->ipc_clients_table[i];
/* skip any client that is not a dpu client */

Просмотреть файл

@@ -46,8 +46,14 @@ static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
break;
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE;
queue_size = drv_data->hw_fence_client_queue_size;
if (client_id >= drv_data->clients_num) {
HWFNC_ERR("Invalid client_id: %d\n", client_id);
return -EINVAL;
}
headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num);
queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
drv_data->hw_fence_client_queue_size[client_id].queue_entries;
payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
break;
default:
@@ -242,10 +248,13 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
bool lock_client = false;
u32 lock_idx;
u64 timestamp;
u32 *wr_ptr;
int ret = 0;
if (queue_type >= HW_FENCE_CLIENT_QUEUES) {
HWFNC_ERR("Invalid queue type:%s\n", queue_type);
if (queue_type >=
drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) {
HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type,
hw_fence_client->client_id);
return -EINVAL;
}
@@ -261,6 +270,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
return -EINVAL;
}
/* if skipping update txq wr_index, then use hfi_header->tx_wm instead */
if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx)
wr_ptr = &hfi_header->tx_wm;
else
wr_ptr = &hfi_header->write_index;
/*
* We need to lock the client if there is an Rx Queue update, since that
* is the only time when HW Fence driver can have a race condition updating
@@ -286,11 +301,12 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
/* Get read and write index */
read_idx = readl_relaxed(&hfi_header->read_index);
write_idx = readl_relaxed(&hfi_header->write_index);
write_idx = readl_relaxed(wr_ptr);
HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n",
hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
read_idx, write_idx, queue, queue_type);
HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
read_idx, write_idx, queue, queue_type,
hw_fence_client->skip_txq_wr_idx ? "true" : "false");
/* Check queue to make sure message will fit */
q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
@@ -343,7 +359,7 @@ int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
wmb();
/* update the write index */
writel_relaxed(to_write_idx, &hfi_header->write_index);
writel_relaxed(to_write_idx, wr_ptr);
/* update memory for the index */
wmb();
@@ -526,7 +542,8 @@ int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
/* Init client queues */
ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
&hw_fence_client->mem_descriptor, hw_fence_client->queues,
HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id);
drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num,
hw_fence_client->client_id);
if (ret) {
HWFNC_ERR("Failure to init the queue for client:%d\n",
hw_fence_client->client_id);
@@ -549,12 +566,12 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
/*
* Initialize IPCC Signals for this client
*
* NOTE: Fore each Client HW-Core, the client drivers might be the ones making
* NOTE: For each Client HW-Core, the client drivers might be the ones making
* it's own initialization (in case that any hw-sequence must be enforced),
* however, if that is not the case, any per-client ipcc init to enable the
* signaling, can go here.
*/
switch (hw_fence_client->client_id) {
switch ((int)hw_fence_client->client_id) {
case HW_FENCE_CLIENT_ID_CTX0:
/* nothing to initialize for gpu client */
break;
@@ -587,6 +604,16 @@ int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
}
#endif /* HW_DPU_IPCC */
break;
case HW_FENCE_CLIENT_ID_IPE:
/* nothing to initialize for IPE client */
break;
case HW_FENCE_CLIENT_ID_VPU:
/* nothing to initialize for VPU client */
break;
case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 +
MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
/* nothing to initialize for IFE clients */
break;
default:
HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id);
ret = -EINVAL;
@@ -1388,7 +1415,7 @@ static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
u64 client_data = 0;
/* signal with an error all the waiting clients for this fence */
for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) {
for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) {
if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
hw_fence_wait_client = drv_data->clients[wait_client_id];
data_id = hw_fence_get_client_data_id(wait_client_id);
@@ -1457,6 +1484,12 @@ enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id
case HW_FENCE_CLIENT_ID_VAL1:
data_id = HW_FENCE_CLIENT_DATA_ID_VAL1;
break;
case HW_FENCE_CLIENT_ID_IPE:
data_id = HW_FENCE_CLIENT_DATA_ID_IPE;
break;
case HW_FENCE_CLIENT_ID_VPU:
data_id = HW_FENCE_CLIENT_DATA_ID_VPU;
break;
default:
data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA;
break;

Просмотреть файл

@@ -15,6 +15,85 @@
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
/**
* MAX_CLIENT_QUEUE_MEM_SIZE:
* Maximum memory size for client queues of a hw fence client.
*/
#define MAX_CLIENT_QUEUE_MEM_SIZE 0x100000
/**
* HW_FENCE_MAX_CLIENT_TYPE:
* Total number of client types with and without configurable number of sub-clients
*/
#define HW_FENCE_MAX_CLIENT_TYPE (HW_FENCE_MAX_CLIENT_TYPE_STATIC + \
HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
/**
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX:
* Maximum number of static clients, i.e. clients without configurable numbers of sub-clients
*/
#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IFE0
/**
* HW_FENCE_MIN_RXQ_CLIENTS:
* Minimum number of static hw fence clients with rxq
*/
#define HW_FENCE_MIN_RXQ_CLIENTS HW_FENCE_CLIENT_ID_VAL6
/**
* HW_FENCE_MIN_RXQ_CLIENT_TYPE:
* Minimum number of static hw fence client types with rxq (GFX, DPU, VAL)
*/
#define HW_FENCE_MIN_RXQ_CLIENT_TYPE 3
/* Maximum number of clients for each client type */
#define HW_FENCE_CLIENT_TYPE_MAX_GPU 1
#define HW_FENCE_CLIENT_TYPE_MAX_DPU 6
#define HW_FENCE_CLIENT_TYPE_MAX_VAL 7
#define HW_FENCE_CLIENT_TYPE_MAX_IPE 1
#define HW_FENCE_CLIENT_TYPE_MAX_VPU 1
#define HW_FENCE_CLIENT_TYPE_MAX_IFE 32
/**
* struct hw_fence_client_types - Table describing all supported client types, used to parse
* device-tree properties related to client queue size.
*
* The fields name, init_id, and max_clients_num are constants. Default values for clients_num,
* queues_num, and skip_txq_wr_idx are provided in this table, and clients_num, queues_num,
* queue_entries, and skip_txq_wr_idx can be read from device-tree.
*
* If a value for queue entries is not parsed for the client type, then the default number of client
* queue entries (parsed from device-tree) is used.
*
* Notes:
* 1. Client types must be in the same order as client_ids within the enum 'hw_fence_client_id'.
* 2. Each HW Fence client ID must be described by one of the client types in this table.
* 3. A new client type must set: name, init_id, max_clients_num, clients_num, queues_num, and
* skip_txq_wr_idx.
* 4. Either constant HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE or HW_FENCE_MAX_CLIENT_TYPE_STATIC must
* be incremented as appropriate for new client types.
*/
struct hw_fence_client_type_desc hw_fence_client_types[HW_FENCE_MAX_CLIENT_TYPE] = {
{"gpu", HW_FENCE_CLIENT_ID_CTX0, HW_FENCE_CLIENT_TYPE_MAX_GPU, HW_FENCE_CLIENT_TYPE_MAX_GPU,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"dpu", HW_FENCE_CLIENT_ID_CTL0, HW_FENCE_CLIENT_TYPE_MAX_DPU, HW_FENCE_CLIENT_TYPE_MAX_DPU,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"val", HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_TYPE_MAX_VAL, HW_FENCE_CLIENT_TYPE_MAX_VAL,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"ipe", HW_FENCE_CLIENT_ID_IPE, HW_FENCE_CLIENT_TYPE_MAX_IPE, 0,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"vpu", HW_FENCE_CLIENT_ID_VPU, HW_FENCE_CLIENT_TYPE_MAX_VPU, 0,
HW_FENCE_CLIENT_QUEUES, 0, 0, false},
{"ife0", HW_FENCE_CLIENT_ID_IFE0, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife1", HW_FENCE_CLIENT_ID_IFE1, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife2", HW_FENCE_CLIENT_ID_IFE2, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife3", HW_FENCE_CLIENT_ID_IFE3, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife4", HW_FENCE_CLIENT_ID_IFE4, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife5", HW_FENCE_CLIENT_ID_IFE5, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife6", HW_FENCE_CLIENT_ID_IFE6, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
{"ife7", HW_FENCE_CLIENT_ID_IFE7, HW_FENCE_CLIENT_TYPE_MAX_IFE, 0, 1, 0, 0, true},
};
static void _lock(uint64_t *wait)
{
#if defined(__aarch64__)
@@ -399,6 +478,11 @@ int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *drv_data)
return -ENXIO;
}
drv_data->size = resource_size(&drv_data->res);
if (drv_data->size < drv_data->used_mem_size) {
HWFNC_ERR("0x%x size of carved-out memory region is less than required size:0x%x\n",
drv_data->size, drv_data->used_mem_size);
return -ENOMEM;
}
HWFNC_DBG_INIT("io_mem_base:0x%x start:0x%x end:0x%x size:0x%x name:%s\n",
drv_data->io_mem_base, drv_data->res.start,
@@ -455,26 +539,32 @@ int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
case HW_FENCE_MEM_RESERVE_LOCKS_REGION:
/* Locks region starts at the end of the ctrl queues */
start_offset = drv_data->hw_fence_mem_ctrl_queues_size;
*size = HW_FENCE_MEM_LOCKS_SIZE;
*size = HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
break;
case HW_FENCE_MEM_RESERVE_TABLE:
/* HW Fence table starts at the end of the Locks region */
start_offset = drv_data->hw_fence_mem_ctrl_queues_size + HW_FENCE_MEM_LOCKS_SIZE;
start_offset = drv_data->hw_fence_mem_ctrl_queues_size +
HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num);
*size = drv_data->hw_fence_mem_fences_table_size;
break;
case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
if (client_id >= HW_FENCE_CLIENT_MAX) {
if (client_id >= drv_data->clients_num) {
HWFNC_ERR("unexpected client_id:%d\n", client_id);
ret = -EINVAL;
goto exit;
}
start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
HW_FENCE_MEM_LOCKS_SIZE +
drv_data->hw_fence_mem_fences_table_size) +
((client_id - 1) * drv_data->hw_fence_mem_clients_queues_size);
*size = drv_data->hw_fence_mem_clients_queues_size;
start_offset = drv_data->hw_fence_client_queue_size[client_id].start_offset;
*size = drv_data->hw_fence_client_queue_size[client_id].mem_size;
/*
* If this error occurs when client should be valid, check that support for this
* client has been configured in device-tree properties.
*/
if (!*size) {
HWFNC_ERR("invalid client_id:%d not reserved client queue\n", client_id);
ret = -EINVAL;
}
break;
default:
HWFNC_ERR("Invalid mem reserve type:%d\n", type);
@@ -501,9 +591,138 @@ exit:
return ret;
}
static int _parse_client_queue_dt_props_indv(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_type_desc *desc)
{
char name[31];
u32 tmp[4];
u32 queue_size;
int ret;
/* parse client queue property from device-tree */
snprintf(name, sizeof(name), "qcom,hw-fence-client-type-%s", desc->name);
ret = of_property_read_u32_array(drv_data->dev->of_node, name, tmp, 4);
if (ret) {
HWFNC_DBG_INIT("missing %s client queue entry or invalid ret:%d\n", desc->name,
ret);
desc->queue_entries = drv_data->hw_fence_queue_entries;
} else {
desc->clients_num = tmp[0];
desc->queues_num = tmp[1];
desc->queue_entries = tmp[2];
if (tmp[3] > 1) {
HWFNC_ERR("%s invalid skip_txq_wr_idx prop:%lu\n", desc->name, tmp[3]);
return -EINVAL;
}
desc->skip_txq_wr_idx = tmp[3];
}
if (desc->clients_num > desc->max_clients_num || !desc->queues_num ||
desc->queues_num > HW_FENCE_CLIENT_QUEUES || !desc->queue_entries) {
HWFNC_ERR("%s invalid dt: clients_num:%lu queues_num:%lu, queue_entries:%lu\n",
desc->name, desc->clients_num, desc->queues_num, desc->queue_entries);
return -EINVAL;
}
/* compute mem_size */
if (desc->queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
HWFNC_ERR("%s client queue entries:%lu will overflow client queue size\n",
desc->name, desc->queue_entries);
return -EINVAL;
}
queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
if (queue_size >= ((U32_MAX & PAGE_MASK) -
HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num)) / desc->queues_num) {
HWFNC_ERR("%s client queue size:%lu will overflow client queue mem size\n",
desc->name, queue_size);
return -EINVAL;
}
desc->mem_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE(desc->queues_num) +
(queue_size * desc->queues_num));
if (desc->mem_size > MAX_CLIENT_QUEUE_MEM_SIZE) {
HWFNC_ERR("%s client queue mem_size:%lu greater than max client queue size:%lu\n",
desc->name, desc->mem_size, MAX_CLIENT_QUEUE_MEM_SIZE);
return -EINVAL;
}
HWFNC_DBG_INIT("%s: clients=%lu q_num=%lu q_entries=%lu mem_sz=%lu skips_wr_ptr:%s\n",
desc->name, desc->clients_num, desc->queues_num, desc->queue_entries,
desc->mem_size, desc->skip_txq_wr_idx ? "true" : "false");
return 0;
}
static int _parse_client_queue_dt_props(struct hw_fence_driver_data *drv_data)
{
struct hw_fence_client_type_desc *desc;
int i, j, ret;
u32 start_offset;
size_t size;
int configurable_clients_num = 0;
drv_data->rxq_clients_num = HW_FENCE_MIN_RXQ_CLIENTS;
for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
desc = &hw_fence_client_types[i];
ret = _parse_client_queue_dt_props_indv(drv_data, desc);
if (ret) {
HWFNC_ERR("failed to initialize %s client queue size properties\n",
desc->name);
return ret;
}
if (i >= HW_FENCE_MIN_RXQ_CLIENT_TYPE &&
desc->queues_num == HW_FENCE_CLIENT_QUEUES)
drv_data->rxq_clients_num += desc->clients_num;
if (i >= HW_FENCE_MAX_CLIENT_TYPE_STATIC)
configurable_clients_num += desc->clients_num;
}
/* store client type descriptors for configurable client indexing logic */
drv_data->hw_fence_client_types = hw_fence_client_types;
/* clients and size desc are allocated for all static clients regardless of device-tree */
drv_data->clients_num = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + configurable_clients_num;
/* allocate memory for client queue size descriptors */
size = drv_data->clients_num * sizeof(struct hw_fence_client_queue_size_desc);
drv_data->hw_fence_client_queue_size = kzalloc(size, GFP_KERNEL);
if (!drv_data->hw_fence_client_queue_size)
return -ENOMEM;
/* initialize client queue size desc for each client */
start_offset = PAGE_ALIGN(drv_data->hw_fence_mem_ctrl_queues_size +
HW_FENCE_MEM_LOCKS_SIZE(drv_data->rxq_clients_num) +
drv_data->hw_fence_mem_fences_table_size);
for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE; i++) {
desc = &hw_fence_client_types[i];
for (j = 0; j < desc->clients_num; j++) {
enum hw_fence_client_id client_id_ext = desc->init_id + j;
enum hw_fence_client_id client_id =
hw_fence_utils_get_client_id_priv(drv_data, client_id_ext);
drv_data->hw_fence_client_queue_size[client_id] =
(struct hw_fence_client_queue_size_desc)
{desc->queues_num, desc->queue_entries, desc->mem_size,
start_offset, desc->skip_txq_wr_idx};
HWFNC_DBG_INIT("%s client_id_ext:%lu client_id:%lu start_offset:%lu\n",
desc->name, client_id_ext, client_id, start_offset);
start_offset += desc->mem_size;
}
}
drv_data->used_mem_size = start_offset;
return 0;
}
int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
{
int ret;
size_t size;
u32 val = 0;
ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-table-entries", &val);
@@ -549,29 +768,26 @@ int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data)
/* clients queues init */
if (drv_data->hw_fence_queue_entries >= U32_MAX / HW_FENCE_CLIENT_QUEUE_PAYLOAD) {
HWFNC_ERR("queue entries:%lu will overflow client queue size\n",
drv_data->hw_fence_queue_entries);
ret = _parse_client_queue_dt_props(drv_data);
if (ret) {
HWFNC_ERR("failed to parse client queue properties\n");
return -EINVAL;
}
drv_data->hw_fence_client_queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
drv_data->hw_fence_queue_entries;
if (drv_data->hw_fence_client_queue_size >= ((U32_MAX & PAGE_MASK) -
HW_FENCE_HFI_CLIENT_HEADERS_SIZE) / HW_FENCE_CLIENT_QUEUES) {
HWFNC_ERR("queue size:%lu will overflow client queue mem size\n",
drv_data->hw_fence_client_queue_size);
return -EINVAL;
}
drv_data->hw_fence_mem_clients_queues_size = PAGE_ALIGN(HW_FENCE_HFI_CLIENT_HEADERS_SIZE +
(HW_FENCE_CLIENT_QUEUES * drv_data->hw_fence_client_queue_size));
/* allocate clients */
size = drv_data->clients_num * sizeof(struct msm_hw_fence_client *);
drv_data->clients = kzalloc(size, GFP_KERNEL);
if (!drv_data->clients)
return -ENOMEM;
HWFNC_DBG_INIT("table: entries=%lu mem_size=%lu queue: entries=%lu\b",
drv_data->hw_fence_table_entries, drv_data->hw_fence_mem_fences_table_size,
drv_data->hw_fence_queue_entries);
HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu clients queues: size=%lu mem_size=%lu\b",
drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size,
drv_data->hw_fence_client_queue_size, drv_data->hw_fence_mem_clients_queues_size);
HWFNC_DBG_INIT("ctrl queue: size=%lu mem_size=%lu\b",
drv_data->hw_fence_ctrl_queue_size, drv_data->hw_fence_mem_ctrl_queues_size);
HWFNC_DBG_INIT("clients_num: %lu, total_mem_size:%lu\n", drv_data->clients_num,
drv_data->used_mem_size);
return 0;
}
@@ -683,3 +899,39 @@ int hw_fence_utils_map_ctl_start(struct hw_fence_driver_data *drv_data)
return 0;
}
enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id client_id)
{
int i, client_type, offset;
enum hw_fence_client_id client_id_priv;
if (client_id < HW_FENCE_MAX_STATIC_CLIENTS_INDEX)
return client_id;
/* consolidate external 'hw_fence_client_id' enum into consecutive internal client IDs */
client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC +
(client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) /
MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
offset = (client_id - HW_FENCE_MAX_STATIC_CLIENTS_INDEX) %
MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT;
/* invalid client id out of range of supported configurable sub-clients */
if (offset >= drv_data->hw_fence_client_types[client_type].clients_num)
return HW_FENCE_CLIENT_MAX;
client_id_priv = HW_FENCE_MAX_STATIC_CLIENTS_INDEX + offset;
for (i = HW_FENCE_MAX_CLIENT_TYPE_STATIC; i < client_type; i++)
client_id_priv += drv_data->hw_fence_client_types[i].clients_num;
return client_id_priv;
}
bool hw_fence_utils_skips_txq_wr_idx(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->hw_fence_client_queue_size[client_id].skip_txq_wr_idx;
}

Просмотреть файл

@@ -71,16 +71,16 @@ struct hw_fence_sync_create_data {
/**
* struct hw_fence_array_sync_create_data - data used in creating multiple fences.
* @seqno: array of sequence numbers used to create fences.
* @num_fences: number of fences to be created.
* @fence: return the fd of the new sync_file with the created fence.
* @hash: array of fence hash
* @seqno: sequence number used to create fence array.
* @num_fences: number of fence fds received.
* @fences: array of fence fds.
* @fence_array_fd: fd of fence array.
*/
struct hw_fence_array_sync_create_data {
u64 seqno[HW_FENCE_ARRAY_SIZE];
u64 seqno;
int num_fences;
__s32 fence;
u64 hash[HW_FENCE_ARRAY_SIZE];
u64 fences[HW_FENCE_ARRAY_SIZE];
__s32 fence_array_fd;
};
/**
@@ -132,9 +132,9 @@ static bool _is_valid_client(struct hw_sync_obj *obj)
if (!obj)
return false;
if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id >= HW_FENCE_CLIENT_MAX) {
if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX);
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return false;
}
@@ -151,9 +151,9 @@ static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg)
if (!obj)
return -EINVAL;
if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id >= HW_FENCE_CLIENT_MAX) {
if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_MAX);
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return -EINVAL;
}
@@ -343,16 +343,22 @@ static long hw_sync_ioctl_destroy_fence(struct hw_sync_obj *obj, unsigned long a
return 0;
}
static void _put_child_fences(int i, struct dma_fence **fences)
{
int fence_idx;
for (fence_idx = i; fence_idx >= 0 ; fence_idx--)
dma_fence_put(fences[i]);
}
static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence_array *fence_array;
struct hw_fence_array_sync_create_data data;
struct dma_fence **fences = NULL;
struct msm_hw_fence_create_params params;
struct sync_file *sync_file;
spinlock_t **fence_lock = NULL;
int num_fences, i, fd, ret;
u64 hash;
struct hw_dma_fence *fence;
if (!_is_valid_client(obj)) {
return -EINVAL;
@@ -370,80 +376,43 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l
num_fences, HW_FENCE_ARRAY_SIZE);
return -EINVAL;
}
fence_lock = kcalloc(num_fences, sizeof(*fence_lock), GFP_KERNEL);
if (!fence_lock)
return -ENOMEM;
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) {
kfree(fence_lock);
return -ENOMEM;
}
/*
* Create the array of dma fences
* This API takes seqno[num_fences] as the seqno for the fence-array
* and from 0 to (num_fences - 1) for the fences in the array.
*/
for (i = 0; i < num_fences; i++) {
struct hw_dma_fence *dma_fence;
fence_lock[i] = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!fence_lock[i]) {
_cleanup_fences(i, fences, fence_lock);
return -ENOMEM;
fd = data.fences[i];
if (fd <= 0) {
kfree(fences);
return -EINVAL;
}
dma_fence = kzalloc(sizeof(*dma_fence), GFP_KERNEL);
if (!dma_fence) {
_cleanup_fences(i, fences, fence_lock);
return -ENOMEM;
fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
_put_child_fences(i-1, fences);
kfree(fences);
return -EINVAL;
}
fences[i] = &dma_fence->base;
spin_lock_init(fence_lock[i]);
dma_fence_init(fences[i], &hw_fence_dbg_ops, fence_lock[i],
obj->context, data.seqno[i]);
fences[i] = &fence->base;
}
/* create the fence array from array of dma fences */
fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno[i], 0);
fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0);
if (!fence_array) {
HWFNC_ERR("Error creating fence_array\n");
_cleanup_fences(num_fences - 1, fences, fence_lock);
/* decrease the refcount incremented for each child fences */
for (i = 0; i < num_fences; i++)
dma_fence_put(fences[i]);
kfree(fences);
return -EINVAL;
}
/* create hw fences */
for (i = 0; i < num_fences; i++) {
params.fence = fences[i];
params.handle = &hash;
ret = msm_hw_fence_create(obj->client_handle, &params);
if (ret) {
HWFNC_ERR("Error creating HW fence\n");
dma_fence_put(&fence_array->base);
/*
* free array of pointers, no need to call kfree in 'fences',
* since that is released from the fence-array release api
*/
kfree(fence_lock);
kfree(fence_array);
return -EINVAL;
}
/* keep handle in dma_fence, to destroy hw-fence during release */
to_hw_dma_fence(fences[i])->client_handle = obj->client_handle;
data.hash[i] = hash;
}
/* create fd */
fd = get_unused_fd_flags(0);
if (fd < 0) {
if (fd <= 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
return fd;
}
@@ -451,7 +420,6 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
ret = -EINVAL;
goto exit;
@@ -460,12 +428,10 @@ static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned l
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence_array->base);
data.fence = fd;
data.fence_array_fd = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
fput(sync_file->file);
dma_fence_put(&fence_array->base);
kfree(fence_lock);
kfree(fence_array);
ret = -EFAULT;
goto exit;
}
@@ -492,7 +458,7 @@ static long hw_sync_ioctl_destroy_fence_array(struct hw_sync_obj *obj, unsigned
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fd = data.fence_array_fd;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);

Просмотреть файл

@@ -17,24 +17,33 @@
struct hw_fence_driver_data *hw_fence_drv_data;
static bool hw_fence_driver_enable;
void *msm_hw_fence_register(enum hw_fence_client_id client_id,
void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
struct msm_hw_fence_mem_addr *mem_descriptor)
{
struct msm_hw_fence_client *hw_fence_client;
enum hw_fence_client_id client_id;
int ret;
HWFNC_DBG_H("++ client_id:%d\n", client_id);
HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext);
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return ERR_PTR(-EAGAIN);
}
if (!mem_descriptor || client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: %d client_id:%d\n",
!mem_descriptor, client_id);
if (!mem_descriptor || client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: %d client_id_ext:%d\n",
!mem_descriptor, client_id_ext);
return ERR_PTR(-EINVAL);
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n",
client_id, client_id_ext);
return ERR_PTR(-EINVAL);
}
/* Alloc client handle */
hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL);
if (!hw_fence_client)
@@ -54,6 +63,7 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id,
mutex_unlock(&hw_fence_drv_data->clients_register_lock);
hw_fence_client->client_id = client_id;
hw_fence_client->client_id_ext = client_id_ext;
hw_fence_client->ipc_client_vid =
hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id);
hw_fence_client->ipc_client_pid =
@@ -74,7 +84,17 @@ void *msm_hw_fence_register(enum hw_fence_client_id client_id,
}
hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
if (hw_fence_client->update_rxq &&
hw_fence_drv_data->hw_fence_client_queue_size[client_id].queues_num <
HW_FENCE_CLIENT_QUEUES) {
HWFNC_ERR("Cannot update rx queue for tx queue-only client:%d\n", client_id);
ret = -EINVAL;
goto error;
}
hw_fence_client->send_ipc = hw_fence_ipcc_needs_ipc_irq(hw_fence_drv_data, client_id);
hw_fence_client->skip_txq_wr_idx = hw_fence_utils_skips_txq_wr_idx(hw_fence_drv_data,
client_id);
/* Alloc Client HFI Headers and Queues */
ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
@@ -124,7 +144,7 @@ int msm_hw_fence_deregister(void *client_handle)
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) {
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
@@ -249,7 +269,7 @@ int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle)
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->client_id >= HW_FENCE_CLIENT_MAX) {
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
@@ -297,6 +317,12 @@ int msm_hw_fence_wait_update_v2(void *client_handle,
return -EINVAL;
}
if (hw_fence_client->client_id > hw_fence_drv_data->rxq_clients_num) {
HWFNC_ERR("Transmit-only client client_id:%d client_id_ext:%d register for wait\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Process all the list of fences */
@@ -372,10 +398,19 @@ int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
}
EXPORT_SYMBOL(msm_hw_fence_reset_client);
int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags)
int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags)
{
enum hw_fence_client_id client_id;
if (client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext);
return -EINVAL;
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id:%d\n", client_id);
HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext);
return -EINVAL;
}