ipa: Remove IPA framework

1. Move the kernel and UAPI header files from the kernel project t the dataipa
2. Add python script which sanitizes and copies UAPI headers
3. Add kernel and UAPI header libraries export to Android makefiles
4. Refactor the driver code to use the internal headers
5. Refactor the driver code export symbols without the IPA framework
6. Merge ipa_clients and rndis modules into the main IPA module

Change-Id: Ie633f291aefe559ff2c39b26a5a9765371399edc
Signed-off-by: Ilia Lin <quic_ilial@quicinc.com>
This commit is contained in:
Ilia Lin
2022-08-08 15:04:36 +03:00
parent ae5791c585
commit b37958da46
93 changed files with 13127 additions and 1256 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,278 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_ETH_H_
#define _IPA_ETH_H_
#include "ipa.h"
#include <linux/msm_ipa.h>
#include <linux/msm_gsi.h>
/* New architecture prototypes */
typedef void (*ipa_eth_ready_cb)(void *user_data);
typedef u32 ipa_eth_hdl_t;
/**
* struct ipa_eth_ready_cb - eth readiness parameters
*
* @notify: ipa_eth client ready callback notifier
* @userdata: userdata for ipa_eth ready cb
* @is_eth_ready: true if ipa_eth client is already ready
*/
struct ipa_eth_ready {
ipa_eth_ready_cb notify;
void *userdata;
/* out params */
bool is_eth_ready;
};
/**
* enum ipa_eth_client_type - names for the various IPA
* eth "clients".
*/
enum ipa_eth_client_type {
IPA_ETH_CLIENT_AQC107,
IPA_ETH_CLIENT_AQC113,
IPA_ETH_CLIENT_RTK8111K,
IPA_ETH_CLIENT_RTK8125B,
IPA_ETH_CLIENT_NTN,
IPA_ETH_CLIENT_EMAC,
IPA_ETH_CLIENT_MAX,
};
/**
* enum ipa_eth_pipe_traffic_type - traffic type for the various IPA
* eth "pipes".
*/
enum ipa_eth_pipe_traffic_type {
IPA_ETH_PIPE_BEST_EFFORT,
IPA_ETH_PIPE_LOW_LATENCY,
IPA_ETH_PIPE_TRAFFIC_TYPE_MAX,
};
/**
* enum ipa_eth_pipe_direction - pipe direcitons for same
* ethernet client.
*/
enum ipa_eth_pipe_direction {
IPA_ETH_PIPE_DIR_TX,
IPA_ETH_PIPE_DIR_RX,
IPA_ETH_PIPE_DIR_MAX,
};
#define IPA_ETH_INST_ID_MAX (2)
/**
* struct ipa_eth_ntn_setup_info - parameters for ntn ethernet
* offloading
*
* @bar_addr: bar PA to access NTN register
* @tail_ptr_offs: tail ptr offset
* @ioc_mod_threshold: Descriptors # per interrupt request from
* NTN3 HW via descriptor bit as part of the protocol.
*/
struct ipa_eth_ntn_setup_info {
phys_addr_t bar_addr;
phys_addr_t tail_ptr_offs;
uint16_t ioc_mod_threshold;
};
/**
* struct ipa_eth_aqc_setup_info - parameters for aqc ethernet
* offloading
*
* @bar_addr: bar PA to access AQC register
* @head_ptr_offs: head ptr offset
* @aqc_ch: AQC ch number
* @dest_tail_ptr_offs: tail ptr offset
*/
struct ipa_eth_aqc_setup_info {
phys_addr_t bar_addr;
phys_addr_t head_ptr_offs;
u8 aqc_ch;
phys_addr_t dest_tail_ptr_offs;
};
/**
* struct ipa_eth_realtek_setup_info - parameters for realtek ethernet
* offloading
*
* @bar_addr: bar PA to access RTK register
* @bar_size: bar region size
* @queue_number: Which RTK queue to check the status on
* @dest_tail_ptr_offs: tail ptr offset
*/
struct ipa_eth_realtek_setup_info {
phys_addr_t bar_addr;
u32 bar_size;
u8 queue_number;
phys_addr_t dest_tail_ptr_offs;
};
/**
* struct ipa_eth_buff_smmu_map - IPA iova->pa SMMU mapping
* @iova: virtual address of the data buffer
* @pa: physical address of the data buffer
*/
struct ipa_eth_buff_smmu_map {
dma_addr_t iova;
phys_addr_t pa;
};
/**
* struct ipa_eth_pipe_setup_info - info needed for IPA setups
* @is_transfer_ring_valid: if transfer ring is needed
* @transfer_ring_base: the base of the transfer ring
* @transfer_ring_sgt: sgtable of transfer ring
* @transfer_ring_size: size of the transfer ring
* @is_buffer_pool_valid: if buffer pool is needed
* @buffer_pool_base_addr: base of buffer pool address
* @buffer_pool_base_sgt: sgtable of buffer pool
* @data_buff_list_size: number of buffers
* @data_buff_list: array of data buffer list
* @fix_buffer_size: buffer size
* @notify: callback for exception/embedded packets
* @priv: priv for exception callback
* @client_info: vendor specific pipe setup info
* @db_pa: doorbell physical address
* @db_val: doorbell value ethernet HW need to ring
*/
struct ipa_eth_pipe_setup_info {
/* transfer ring info */
bool is_transfer_ring_valid;
dma_addr_t transfer_ring_base;
struct sg_table *transfer_ring_sgt;
u32 transfer_ring_size;
/* buffer pool info */
bool is_buffer_pool_valid;
dma_addr_t buffer_pool_base_addr;
struct sg_table *buffer_pool_base_sgt;
/* buffer info */
u32 data_buff_list_size;
struct ipa_eth_buff_smmu_map *data_buff_list;
u32 fix_buffer_size;
/* client notify cb */
ipa_notify_cb notify;
void *priv;
/* vendor specific info */
union {
struct ipa_eth_aqc_setup_info aqc;
struct ipa_eth_realtek_setup_info rtk;
struct ipa_eth_ntn_setup_info ntn;
} client_info;
/* output params */
phys_addr_t db_pa;
u32 db_val;
};
/**
* struct ipa_eth_client_pipe_info - ETH pipe/gsi related configuration
* @link: link of ep for different client function on same ethernet HW
* @dir: TX or RX direction
* @info: tx/rx pipe setup info
* @client_info: client the pipe belongs to
* @pipe_hdl: output params, pipe handle
*/
struct ipa_eth_client_pipe_info {
struct list_head link;
enum ipa_eth_pipe_direction dir;
struct ipa_eth_pipe_setup_info info;
struct ipa_eth_client *client_info;
/* output params */
ipa_eth_hdl_t pipe_hdl;
};
/**
* struct ipa_eth_client - client info per traffic type
* provided by offload client
* @client_type: ethernet client type
* @inst_id: instance id for dual NIC support
* @traffic_type: traffic type
* @pipe_list: list of pipes with same traffic type
* @priv: private data for client
* @test: is test client
*/
struct ipa_eth_client {
/* vendor driver */
enum ipa_eth_client_type client_type;
u8 inst_id;
/* traffic type */
enum ipa_eth_pipe_traffic_type traffic_type;
struct list_head pipe_list;
/* client specific priv data*/
void *priv;
bool test;
};
/**
* struct ipa_eth_perf_profile - To set BandWidth profile
*
* @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
*/
struct ipa_eth_perf_profile {
u32 max_supported_bw_mbps;
};
/**
* struct ipa_eth_hdr_info - Header to install on IPA HW
*
* @hdr: header to install on IPA HW
* @hdr_len: length of header
* @dst_mac_addr_offset: destination mac address offset
* @hdr_type: layer two header type
*/
struct ipa_eth_hdr_info {
u8 *hdr;
u8 hdr_len;
u8 dst_mac_addr_offset;
enum ipa_hdr_l2_type hdr_type;
};
/**
* struct ipa_eth_intf_info - parameters for ipa offload
* interface registration
*
* @netdev_name: network interface name
* @hdr: hdr for ipv4/ipv6
* @pipe_hdl_list_size: number of pipes prop needed for this interface
* @pipe_hdl_list: array of pipes used for this interface
*/
struct ipa_eth_intf_info {
const char *netdev_name;
struct ipa_eth_hdr_info hdr[IPA_IP_MAX];
/* tx/rx pipes for same netdev */
int pipe_hdl_list_size;
ipa_eth_hdl_t *pipe_hdl_list;
};
int ipa_eth_register_ready_cb(struct ipa_eth_ready *ready_info);
int ipa_eth_unregister_ready_cb(struct ipa_eth_ready *ready_info);
int ipa_eth_client_conn_pipes(struct ipa_eth_client *client);
int ipa_eth_client_disconn_pipes(struct ipa_eth_client *client);
int ipa_eth_client_reg_intf(struct ipa_eth_intf_info *intf);
int ipa_eth_client_unreg_intf(struct ipa_eth_intf_info *intf);
int ipa_eth_client_set_perf_profile(struct ipa_eth_client *client,
struct ipa_eth_perf_profile *profile);
int ipa_eth_client_conn_evt(struct ipa_ecm_msg *msg);
int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type(
enum ipa_eth_client_type eth_client_type, enum ipa_eth_pipe_direction dir);
bool ipa_eth_client_exist(
enum ipa_eth_client_type eth_client_type, int inst_id);
#endif // _IPA_ETH_H_

View File

@@ -0,0 +1,170 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#ifndef IPA_MHI_H_
#define IPA_MHI_H_
#include <linux/ipa.h>
#include <linux/types.h>
/**
* enum ipa_mhi_event_type - event type for mhi callback
*
* @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
* this event MHI client is expected to call to ipa_mhi_start() API
* @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
*/
enum ipa_mhi_event_type {
IPA_MHI_EVENT_READY,
IPA_MHI_EVENT_DATA_AVAILABLE,
IPA_MHI_EVENT_MAX,
};
enum ipa_mhi_mstate {
IPA_MHI_STATE_M0,
IPA_MHI_STATE_M1,
IPA_MHI_STATE_M2,
IPA_MHI_STATE_M3,
IPA_MHI_STATE_M_MAX
};
typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
unsigned long data);
/**
* struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
* @addr_low: MSI lower base physical address
* @addr_hi: MSI higher base physical address
* @data: Data Pattern to use when generating the MSI
* @mask: Mask indicating number of messages assigned by the host to device
*
* msi value is written according to this formula:
* ((data & ~mask) | (mmio.msiVec & mask))
*/
struct ipa_mhi_msi_info {
u32 addr_low;
u32 addr_hi;
u32 data;
u32 mask;
};
/**
* struct ipa_mhi_init_params - parameters for IPA MHI initialization API
*
* @msi: MSI (Message Signaled Interrupts) parameters
* @mmio_addr: MHI MMIO physical address
* @first_ch_idx: First channel ID for hardware accelerated channels.
* @first_er_idx: First event ring ID for hardware accelerated channels.
* @assert_bit40: should assert bit 40 in order to access host space.
* if PCIe iATU is configured then not need to assert bit40
* @notify: client callback
* @priv: client private data to be provided in client callback
* @test_mode: flag to indicate if IPA MHI is in unit test mode
*/
struct ipa_mhi_init_params {
struct ipa_mhi_msi_info msi;
u32 mmio_addr;
u32 first_ch_idx;
u32 first_er_idx;
bool assert_bit40;
mhi_client_cb notify;
void *priv;
bool test_mode;
};
/**
* struct ipa_mhi_start_params - parameters for IPA MHI start API
*
* @host_ctrl_addr: Base address of MHI control data structures
* @host_data_addr: Base address of MHI data buffers
* @channel_context_addr: channel context array address in host address space
* @event_context_addr: event context array address in host address space
*/
struct ipa_mhi_start_params {
u32 host_ctrl_addr;
u32 host_data_addr;
u64 channel_context_array_addr;
u64 event_context_array_addr;
};
/**
* struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
*
* @sys: IPA EP configuration info
* @channel_id: MHI channel id
*/
struct ipa_mhi_connect_params {
struct ipa_sys_connect_params sys;
u8 channel_id;
};
/* bit #40 in address should be asserted for MHI transfers over pcie */
#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
#if IS_ENABLED(CONFIG_IPA3)
int ipa_mhi_init(struct ipa_mhi_init_params *params);
int ipa_mhi_start(struct ipa_mhi_start_params *params);
int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
int ipa_mhi_suspend(bool force);
int ipa_mhi_resume(void);
void ipa_mhi_destroy(void);
int ipa_mhi_update_mstate(enum ipa_mhi_mstate mstate_info);
#else /* IS_ENABLED(CONFIG_IPA3) */
static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
{
return -EPERM;
}
static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
{
return -EPERM;
}
static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
u32 *clnt_hdl)
{
return -EPERM;
}
static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
{
return -EPERM;
}
static inline int ipa_mhi_suspend(bool force)
{
return -EPERM;
}
static inline int ipa_mhi_resume(void)
{
return -EPERM;
}
static inline void ipa_mhi_destroy(void)
{
}
static inline int ipa_mhi_update_mstate
(enum ipa_mhi_mstate mstate_info)
{
return -EPERM;
}
#endif /* IS_ENABLED(CONFIG_IPA3) */
#endif /* IPA_MHI_H_ */

View File

@@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_ODO_BRIDGE_H_
#define _IPA_ODO_BRIDGE_H_
#include "ipa.h"
/**
* struct odu_bridge_params - parameters for odu bridge initialization API
*
* @netdev_name: network interface name
* @priv: private data that will be supplied to client's callback
* @tx_dp_notify: callback for handling SKB. the following event are supported:
* IPA_WRITE_DONE: will be called after client called to odu_bridge_tx_dp()
* Client is expected to free the skb.
* IPA_RECEIVE: will be called for delivering skb to APPS.
* Client is expected to deliver the skb to network stack.
* @send_dl_skb: callback for sending skb on downlink direction to adapter.
* Client is expected to free the skb.
* @device_ethaddr: device Ethernet address in network order.
* @ipa_desc_size: IPA Sys Pipe Desc Size
*/
struct odu_bridge_params {
const char *netdev_name;
void *priv;
ipa_notify_cb tx_dp_notify;
int (*send_dl_skb)(void *priv, struct sk_buff *skb);
u8 device_ethaddr[ETH_ALEN];
u32 ipa_desc_size;
};
/**
* struct ipa_bridge_init_params - parameters for IPA bridge initialization API
*
* @info: structure contains initialization information
* @wakeup_request: callback to client to indicate there is downlink data
* available. Client is expected to call ipa_bridge_resume() to start
* receiving data
*/
struct ipa_bridge_init_params {
struct odu_bridge_params info;
void (*wakeup_request)(void *cl_priv);
};
#if IS_ENABLED(CONFIG_IPA3)
int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl);
int ipa_bridge_connect(u32 hdl);
int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth);
int ipa_bridge_disconnect(u32 hdl);
int ipa_bridge_suspend(u32 hdl);
int ipa_bridge_resume(u32 hdl);
int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
struct ipa_tx_meta *metadata);
int ipa_bridge_cleanup(u32 hdl);
#else /* IS_ENABLED(CONFIG_IPA3) */
static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl)
{
return -EPERM;
}
static inline int ipa_bridge_connect(u32 hdl)
{
return -EPERM;
}
static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
{
return -EPERM;
}
static inline int ipa_bridge_disconnect(u32 hdl)
{
return -EPERM;
}
static inline int ipa_bridge_suspend(u32 hdl)
{
return -EPERM;
}
static inline int ipa_bridge_resume(u32 hdl)
{
return -EPERM;
}
static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
struct ipa_tx_meta *metadata)
{
return -EPERM;
}
static inline int ipa_bridge_cleanup(u32 hdl)
{
return -EPERM;
}
#endif /* IS_ENABLED(CONFIG_IPA3) */
/* Below API is deprecated. Please use the API above */
static inline int odu_bridge_init(struct odu_bridge_params *params)
{
return -EPERM;
}
static inline int odu_bridge_disconnect(void)
{
return -EPERM;
}
static inline int odu_bridge_connect(void)
{
return -EPERM;
}
static inline int odu_bridge_tx_dp(struct sk_buff *skb,
struct ipa_tx_meta *metadata)
{
return -EPERM;
}
static inline int odu_bridge_cleanup(void)
{
return -EPERM;
}
#endif /* _IPA_ODO_BRIDGE_H */

View File

@@ -0,0 +1,101 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_QDSS_H_
#define _IPA_QDSS_H_
#include "ipa.h"
/**
* enum ipa_qdss_notify - these are the only return items
* @IPA_QDSS_SUCCESS: will be returned as it is for both conn
* and disconn
* @IPA_QDSS_PIPE_CONN_FAILURE: will be returned as negative value
* @IPA_QDSS_PIPE_DISCONN_FAILURE: will be returned as negative value
*/
enum ipa_qdss_notify {
IPA_QDSS_SUCCESS,
IPA_QDSS_PIPE_CONN_FAILURE,
IPA_QDSS_PIPE_DISCONN_FAILURE,
};
/**
* struct ipa_qdss_conn_in_params - QDSS -> IPA TX configuration
* @data_fifo_base_addr: Base address of the data FIFO used by BAM
* @data_fifo_size: Size of the data FIFO
* @desc_fifo_base_addr: Base address of the descriptor FIFO by BAM
* @desc_fifo_size: Should be configured to 1 by QDSS
* @bam_p_evt_dest_addr: equivalent to event_ring_doorbell_pa
* physical address of the doorbell that IPA uC
* will update the headpointer of the event ring.
* QDSS should send BAM_P_EVNT_REG address in this var
* Configured with the GSI Doorbell Address.
* GSI sends Update RP by doing a write to this address
* @bam_p_evt_threshold: Threshold level of how many bytes consumed
* @override_eot: if override EOT==1, it doesn't check the EOT bit in
* the descriptor
*/
struct ipa_qdss_conn_in_params {
phys_addr_t data_fifo_base_addr;
u32 data_fifo_size;
phys_addr_t desc_fifo_base_addr;
u32 desc_fifo_size;
phys_addr_t bam_p_evt_dest_addr;
u32 bam_p_evt_threshold;
u32 override_eot;
};
/**
* struct ipa_qdss_conn_out_params - information provided
* to QDSS driver
* @rx_db_pa: physical address of IPA doorbell for RX (QDSS->IPA transactions)
* QDSS to take this address and assign it to BAM_P_EVENT_DEST_ADDR
*/
struct ipa_qdss_conn_out_params {
phys_addr_t ipa_rx_db_pa;
};
#if IS_ENABLED(CONFIG_IPA3)
/**
* ipa_qdss_conn_pipes - Client should call this
* function to connect QDSS -> IPA pipe
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_qdss_conn_pipes(struct ipa_qdss_conn_in_params *in,
struct ipa_qdss_conn_out_params *out);
/**
* ipa_qdss_disconn_pipes() - Client should call this
* function to disconnect pipes
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_qdss_disconn_pipes(void);
#else /* CONFIG_IPA3 */
static inline int ipa_qdss_conn_pipes(struct ipa_qdss_conn_in_params *in,
struct ipa_qdss_conn_out_params *out)
{
return -IPA_QDSS_PIPE_CONN_FAILURE;
}
static inline int ipa_qdss_disconn_pipes(void)
{
return -IPA_QDSS_PIPE_DISCONN_FAILURE;
}
#endif /* CONFIG_IPA3 */
#endif /* _IPA_QDSS_H_ */

View File

@@ -0,0 +1,326 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_UC_OFFLOAD_H_
#define _IPA_UC_OFFLOAD_H_
#include "ipa.h"
/**
* enum ipa_uc_offload_proto
* Protocol type: either WDI or Neutrino
*
* @IPA_UC_WDI: wdi Protocol
* @IPA_UC_NTN: Neutrino Protocol
*/
enum ipa_uc_offload_proto {
IPA_UC_INVALID = 0,
IPA_UC_WDI = 1,
IPA_UC_NTN = 2,
IPA_UC_NTN_V2X = 3,
IPA_UC_MAX_PROT_SIZE
};
/**
* struct ipa_hdr_info - Header to install on IPA HW
*
* @hdr: header to install on IPA HW
* @hdr_len: length of header
* @dst_mac_addr_offset: destination mac address offset
* @hdr_type: layer two header type
*/
struct ipa_hdr_info {
u8 *hdr;
u8 hdr_len;
u8 dst_mac_addr_offset;
enum ipa_hdr_l2_type hdr_type;
};
/**
* struct ipa_uc_offload_intf_params - parameters for uC offload
* interface registration
*
* @netdev_name: network interface name
* @notify: callback for exception/embedded packets
* @priv: callback cookie
* @hdr_info: header information
* @meta_data: metadata if any
* @meta_data_mask: metadata mask
* @proto: uC offload protocol type
* @alt_dst_pipe: alternate routing output pipe
*/
struct ipa_uc_offload_intf_params {
const char *netdev_name;
ipa_notify_cb notify;
void *priv;
struct ipa_hdr_info hdr_info[IPA_IP_MAX];
u8 is_meta_data_valid;
u32 meta_data;
u32 meta_data_mask;
enum ipa_uc_offload_proto proto;
enum ipa_client_type alt_dst_pipe;
};
/**
* struct ntn_buff_smmu_map - IPA iova->pa SMMU mapping
* @iova: virtual address of the data buffer
* @pa: physical address of the data buffer
*/
struct ntn_buff_smmu_map {
dma_addr_t iova;
phys_addr_t pa;
};
/**
* struct ipa_ntn_setup_info - NTN TX/Rx configuration
* @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
* @smmu_enabled: SMMU is enabled for uC or not
* @ring_base_pa: physical address of the base of the Tx/Rx ring
* @ring_base_iova: virtual address of the base of the Tx/Rx ring
* @ring_base_sgt:Scatter table for ntn_rings,contains valid non NULL
* value when ENAC S1-SMMU enabed, else NULL.
* @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
* @buff_pool_base_pa: physical address of the base of the Tx/Rx buffer pool
* @buff_pool_base_iova: virtual address of the base of the Tx/Rx buffer pool
* @buff_pool_base_sgt: Scatter table for buffer pools,contains valid
* non NULL value. When NULL, do continuosly
* pa to iova mapping (SMMU disable, pa == iova).
* @num_buffers: Rx/Tx buffer pool size (in terms of elements)
* @data_buff_size: size of the each data buffer allocated in DDR
* @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
* @u8 db_mode: 0 means irq mode, 1 means db mode
* tail pointer
*/
struct ipa_ntn_setup_info {
enum ipa_client_type client;
bool smmu_enabled;
phys_addr_t ring_base_pa;
dma_addr_t ring_base_iova;
struct sg_table *ring_base_sgt;
u32 ntn_ring_size;
phys_addr_t buff_pool_base_pa;
dma_addr_t buff_pool_base_iova;
struct sg_table *buff_pool_base_sgt;
struct ntn_buff_smmu_map *data_buff_list;
u32 num_buffers;
u32 data_buff_size;
phys_addr_t ntn_reg_base_ptr_pa;
u8 db_mode;
};
/**
* struct ipa_uc_offload_out_params - out parameters for uC offload
*
* @clnt_hndl: Handle that client need to pass during
* further operations
*/
struct ipa_uc_offload_out_params {
u32 clnt_hndl;
};
/**
* struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
* @ul: parameters to connect UL pipe(from Neutrino to IPA)
* @dl: parameters to connect DL pipe(from IPA to Neutrino)
*/
struct ipa_ntn_conn_in_params {
struct ipa_ntn_setup_info ul;
struct ipa_ntn_setup_info dl;
};
/**
* struct ipa_ntn_conn_out_params - information provided
* to uC offload client
* @ul_uc_db_pa: physical address of IPA uc doorbell for UL
* @dl_uc_db_pa: physical address of IPA uc doorbell for DL
* @clnt_hdl: opaque handle assigned to offload client
* @ul_uc_db_iomem: iomem address of IPA uc doorbell for UL
* @dl_uc_db_iomem: iomem address of IPA uc doorbell for DL
*/
struct ipa_ntn_conn_out_params {
phys_addr_t ul_uc_db_pa;
phys_addr_t dl_uc_db_pa;
void __iomem *ul_uc_db_iomem;
void __iomem *dl_uc_db_iomem;
};
/**
* struct ipa_uc_offload_conn_in_params - information provided by
* uC offload client
* @clnt_hndl: Handle that return as part of reg interface
* @proto: Protocol to use for offload data path
* @ntn: uC RX/Tx configuration info
*/
struct ipa_uc_offload_conn_in_params {
u32 clnt_hndl;
union {
struct ipa_ntn_conn_in_params ntn;
} u;
};
/**
* struct ipa_uc_offload_conn_out_params - information provided
* to uC offload client
* @ul_uc_db_pa: physical address of IPA uc doorbell for UL
* @dl_uc_db_pa: physical address of IPA uc doorbell for DL
* @clnt_hdl: opaque handle assigned to offload client
*/
struct ipa_uc_offload_conn_out_params {
union {
struct ipa_ntn_conn_out_params ntn;
} u;
};
/**
* struct ipa_perf_profile - To set BandWidth profile
*
* @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
* @proto: uC offload protocol type
* @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
*/
struct ipa_perf_profile {
enum ipa_client_type client;
enum ipa_uc_offload_proto proto;
u32 max_supported_bw_mbps;
};
/**
* struct ipa_uc_ready_params - uC ready CB parameters
* @is_uC_ready: uC loaded or not
* @priv : callback cookie
* @notify: callback
* @proto: uC offload protocol type
*/
struct ipa_uc_ready_params {
bool is_uC_ready;
void *priv;
ipa_uc_ready_cb notify;
enum ipa_uc_offload_proto proto;
};
#if IS_ENABLED(CONFIG_IPA3)
/**
* ipa_uc_offload_reg_intf - Client should call this function to
* init uC offload data path
*
* @init: [in] initialization parameters
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_uc_offload_reg_intf(
struct ipa_uc_offload_intf_params *in,
struct ipa_uc_offload_out_params *out);
/**
* ipa_uc_offload_cleanup - Client Driver should call this
* function before unload and after disconnect
*
* @Return 0 on success, negative on failure
*/
int ipa_uc_offload_cleanup(u32 clnt_hdl);
/**
* ipa_uc_offload_conn_pipes - Client should call this
* function to connect uC pipe for offload data path
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
struct ipa_uc_offload_conn_out_params *out);
/**
* ipa_uc_offload_disconn_pipes() - Client should call this
* function to disconnect uC pipe to disable offload data path
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
/**
* ipa_set_perf_profile() - Client should call this function to
* set IPA clock Band Width based on data rates
* @profile: [in] BandWidth profile to use
*
* Returns: 0 on success, negative on failure
*/
int ipa_set_perf_profile(struct ipa_perf_profile *profile);
/*
* To register uC ready callback if uC not ready
* and also check uC readiness
* if uC not ready only, register callback
*/
int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param);
/*
* To de-register uC ready callback
*/
void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto);
#else /* IS_ENABLED(CONFIG_IPA3) */
static inline int ipa_uc_offload_reg_intf(
struct ipa_uc_offload_intf_params *in,
struct ipa_uc_offload_out_params *out)
{
return -EPERM;
}
static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
{
return -EPERM;
}
static inline int ipa_uc_offload_conn_pipes(
struct ipa_uc_offload_conn_in_params *in,
struct ipa_uc_offload_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
{
return -EPERM;
}
static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
{
return -EPERM;
}
static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param)
{
return -EPERM;
}
static inline void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
{
}
#endif /* CONFIG_IPA3 */
#endif /* _IPA_UC_OFFLOAD_H_ */

View File

@@ -0,0 +1,737 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 - 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_WDI3_H_
#define _IPA_WDI3_H_
#include <linux/ipa.h>
#define IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE 32
#define IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE 8
#define IPA_HW_WDI3_MAX_ER_DESC_SIZE \
(((IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) > \
(IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE)) ? \
(IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) : \
(IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE))
#define IPA_WDI_MAX_SUPPORTED_SYS_PIPE 3
typedef u32 ipa_wdi_hdl_t;
enum ipa_wdi_version {
IPA_WDI_1,
IPA_WDI_2,
IPA_WDI_3,
IPA_WDI_3_V2,
IPA_WDI_VER_MAX
};
#define IPA_WDI3_TX_DIR 1
#define IPA_WDI3_TX1_DIR 2
#define IPA_WDI3_RX_DIR 3
#define IPA_WDI_INST_MAX (2)
/**
* struct ipa_wdi_init_in_params - wdi init input parameters
*
* @wdi_version: wdi version
* @notify: uc ready callback
* @priv: uc ready callback cookie
*/
struct ipa_wdi_init_in_params {
enum ipa_wdi_version wdi_version;
ipa_uc_ready_cb notify;
void *priv;
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
ipa_wdi_meter_notifier_cb wdi_notify;
#endif
int inst_id;
};
/**
* struct ipa_wdi_init_out_params - wdi init output parameters
*
* @is_uC_ready: is uC ready. No API should be called until uC
is ready.
* @is_smmu_enable: is smmu enabled
* @is_over_gsi: is wdi over GSI or uC
*/
struct ipa_wdi_init_out_params {
bool is_uC_ready;
bool is_smmu_enabled;
bool is_over_gsi;
ipa_wdi_hdl_t hdl;
};
/**
* struct ipa_wdi_hdr_info - Header to install on IPA HW
*
* @hdr: header to install on IPA HW
* @hdr_len: length of header
* @dst_mac_addr_offset: destination mac address offset
* @hdr_type: layer two header type
*/
struct ipa_wdi_hdr_info {
u8 *hdr;
u8 hdr_len;
u8 dst_mac_addr_offset;
enum ipa_hdr_l2_type hdr_type;
};
/**
* struct ipa_wdi_reg_intf_in_params - parameters for uC offload
* interface registration
*
* @netdev_name: network interface name
* @hdr_info: header information
* @is_meta_data_valid: if metadata is valid
* @meta_data: metadata if any
* @meta_data_mask: metadata mask
* @is_tx1_used: to indicate whether 2.4g or 5g iface
*/
struct ipa_wdi_reg_intf_in_params {
const char *netdev_name;
struct ipa_wdi_hdr_info hdr_info[IPA_IP_MAX];
enum ipa_client_type alt_dst_pipe;
u8 is_meta_data_valid;
u32 meta_data;
u32 meta_data_mask;
u8 is_tx1_used;
ipa_wdi_hdl_t hdl;
};
/**
* struct ipa_wdi_pipe_setup_info - WDI TX/Rx configuration
* @ipa_ep_cfg: ipa endpoint configuration
* @client: type of "client"
* @transfer_ring_base_pa: physical address of the base of the transfer ring
* @transfer_ring_size: size of the transfer ring
* @transfer_ring_doorbell_pa: physical address of the doorbell that
IPA uC will update the tailpointer of the transfer ring
* @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
* @event_ring_base_pa: physical address of the base of the event ring
* @event_ring_size: event ring size
* @event_ring_doorbell_pa: physical address of the doorbell that IPA uC
will update the headpointer of the event ring
* @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
* @num_pkt_buffers: Number of pkt buffers allocated. The size of the event
ring and the transfer ring has to be at least ( num_pkt_buffers + 1)
* @pkt_offset: packet offset (wdi header length)
* @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]: Holds a cached
template of the desc format
* @rx_bank_id: value used to perform TCL HW setting
*/
struct ipa_wdi_pipe_setup_info {
struct ipa_ep_cfg ipa_ep_cfg;
enum ipa_client_type client;
phys_addr_t transfer_ring_base_pa;
u32 transfer_ring_size;
phys_addr_t transfer_ring_doorbell_pa;
bool is_txr_rn_db_pcie_addr;
phys_addr_t event_ring_base_pa;
u32 event_ring_size;
phys_addr_t event_ring_doorbell_pa;
bool is_evt_rn_db_pcie_addr;
u16 num_pkt_buffers;
u16 pkt_offset;
u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE];
u8 rx_bank_id;
};
/**
* struct ipa_wdi_pipe_setup_info_smmu - WDI TX/Rx configuration
* @ipa_ep_cfg: ipa endpoint configuration
* @client: type of "client"
* @transfer_ring_base_pa: physical address of the base of the transfer ring
* @transfer_ring_size: size of the transfer ring
* @transfer_ring_doorbell_pa: physical address of the doorbell that
IPA uC will update the tailpointer of the transfer ring
* @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
* @event_ring_base_pa: physical address of the base of the event ring
* @event_ring_size: event ring size
* @event_ring_doorbell_pa: physical address of the doorbell that IPA uC
will update the headpointer of the event ring
* @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
* @num_pkt_buffers: Number of pkt buffers allocated. The size of the event
ring and the transfer ring has to be at least ( num_pkt_buffers + 1)
* @pkt_offset: packet offset (wdi header length)
* @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]: Holds a cached
template of the desc format
* @rx_bank_id: value used to perform TCL HW setting
*/
struct ipa_wdi_pipe_setup_info_smmu {
struct ipa_ep_cfg ipa_ep_cfg;
enum ipa_client_type client;
struct sg_table transfer_ring_base;
u32 transfer_ring_size;
phys_addr_t transfer_ring_doorbell_pa;
bool is_txr_rn_db_pcie_addr;
struct sg_table event_ring_base;
u32 event_ring_size;
phys_addr_t event_ring_doorbell_pa;
bool is_evt_rn_db_pcie_addr;
u16 num_pkt_buffers;
u16 pkt_offset;
u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE];
u8 rx_bank_id;
};
/**
* struct ipa_wdi_conn_in_params - information provided by
* uC offload client
* @notify: client callback function
* @priv: client cookie
* @is_smmu_enabled: if smmu is enabled
* @num_sys_pipe_needed: number of sys pipe needed
* @sys_in: parameters to setup sys pipe in mcc mode
* @tx: parameters to connect TX pipe(from IPA to WLAN)
* @tx_smmu: smmu parameters to connect TX pipe(from IPA to WLAN)
* @rx: parameters to connect RX pipe(from WLAN to IPA)
* @rx_smmu: smmu parameters to connect RX pipe(from WLAN to IPA)
* @is_tx1_used: to notify extra pipe required/not
* @tx1: parameters to connect TX1 pipe(from IPA to WLAN second pipe)
* @tx1_smmu: smmu parameters to connect TX1 pipe(from IPA to WLAN second pipe)
*/
struct ipa_wdi_conn_in_params {
ipa_notify_cb notify;
void *priv;
bool is_smmu_enabled;
u8 num_sys_pipe_needed;
struct ipa_sys_connect_params sys_in[IPA_WDI_MAX_SUPPORTED_SYS_PIPE];
union {
struct ipa_wdi_pipe_setup_info tx;
struct ipa_wdi_pipe_setup_info_smmu tx_smmu;
} u_tx;
union {
struct ipa_wdi_pipe_setup_info rx;
struct ipa_wdi_pipe_setup_info_smmu rx_smmu;
} u_rx;
bool is_tx1_used;
union {
struct ipa_wdi_pipe_setup_info tx;
struct ipa_wdi_pipe_setup_info_smmu tx_smmu;
} u_tx1;
ipa_wdi_hdl_t hdl;
};
/**
* struct ipa_wdi_conn_out_params - information provided
* to WLAN driver
* @tx_uc_db_pa: physical address of IPA uC doorbell for TX
* @rx_uc_db_pa: physical address of IPA uC doorbell for RX
* @tx1_uc_db_pa: physical address of IPA uC doorbell for TX1
* @is_ddr_mapped: flag set to true if address is from DDR
*/
struct ipa_wdi_conn_out_params {
phys_addr_t tx_uc_db_pa;
phys_addr_t rx_uc_db_pa;
phys_addr_t tx1_uc_db_pa;
bool is_ddr_mapped;
};
/**
* struct ipa_wdi_perf_profile - To set BandWidth profile
*
* @client: type of client
* @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
*/
struct ipa_wdi_perf_profile {
enum ipa_client_type client;
u32 max_supported_bw_mbps;
};
/**
* struct ipa_wdi_capabilities - wdi capability parameters
*
* @num_of_instances: Number of WLAN instances supported.
*/
struct ipa_wdi_capabilities_out_params {
u8 num_of_instances;
};
#if IS_ENABLED(CONFIG_IPA3)
/**
* ipa_wdi_get_capabilities - Client should call this function to
* know the WDI capabilities
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_get_capabilities(
struct ipa_wdi_capabilities_out_params *out);
/**
* ipa_wdi_init - Client should call this function to
* init WDI IPA offload data path
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
struct ipa_wdi_init_out_params *out);
/** ipa_get_wdi_version - return wdi version
*
* @Return void
*/
int ipa_get_wdi_version(void);
/** ipa_wdi_is_tx1_used - return if DBS mode is active
*
* @Return bool
*/
bool ipa_wdi_is_tx1_used(void);
/**
* ipa_wdi_init_per_inst - Client should call this function to
* init WDI IPA offload data path
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_init_per_inst(struct ipa_wdi_init_in_params *in,
struct ipa_wdi_init_out_params *out);
/**
* ipa_wdi_cleanup - Client should call this function to
* clean up WDI IPA offload data path
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_cleanup(void);
/**
* ipa_wdi_cleanup_per_inst - Client should call this function to
* clean up WDI IPA offload data path
*
* @hdl: hdl to wdi client
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_cleanup_per_inst(ipa_wdi_hdl_t hdl);
/**
* ipa_wdi_reg_intf - Client should call this function to
* register interface
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_reg_intf(
struct ipa_wdi_reg_intf_in_params *in);
/**
* ipa_wdi_reg_intf_per_inst - Client should call this function to
* register interface
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_reg_intf_per_inst(
struct ipa_wdi_reg_intf_in_params *in);
/**
* ipa_wdi_dereg_intf - Client Driver should call this
* function to deregister before unload and after disconnect
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_dereg_intf(const char *netdev_name);
/**
* ipa_wdi_dereg_intf_per_inst - Client Driver should call this
* function to deregister before unload and after disconnect
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_dereg_intf_per_inst(const char *netdev_name, ipa_wdi_hdl_t hdl);
/**
* ipa_wdi_conn_pipes - Client should call this
* function to connect pipes
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out);
/**
* ipa_wdi_conn_pipes_per_inst - Client should call this
* function to connect pipes
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_conn_pipes_per_inst(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out);
/**
* ipa_wdi_disconn_pipes() - Client should call this
* function to disconnect pipes
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_disconn_pipes(void);
/**
* ipa_wdi_disconn_pipes_per_inst() - Client should call this
* function to disconnect pipes
*
* @hdl: hdl to wdi client
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_disconn_pipes_per_inst(ipa_wdi_hdl_t hdl);
/**
* ipa_wdi_enable_pipes() - Client should call this
* function to enable IPA offload data path
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_enable_pipes(void);
/**
* ipa_wdi_enable_pipes_per_inst() - Client should call this
* function to enable IPA offload data path
*
* @hdl: hdl to wdi client
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_enable_pipes_per_inst(ipa_wdi_hdl_t hdl);
/**
* ipa_wdi_disable_pipes() - Client should call this
* function to disable IPA offload data path
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_disable_pipes(void);
/**
* ipa_wdi_disable_pipes_per_inst() - Client should call this
* function to disable IPA offload data path
*
* @hdl: hdl to wdi client
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_disable_pipes_per_inst(ipa_wdi_hdl_t hdl);
/**
* ipa_wdi_set_perf_profile() - Client should call this function to
* set IPA clock bandwidth based on data rates
*
* @profile: [in] BandWidth profile to use
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile);
/**
* ipa_wdi_set_perf_profile_per_inst() - Client should call this function to
* set IPA clock bandwidth based on data rates
*
* @hdl: hdl to wdi client
* @profile: [in] BandWidth profile to use
*
* Returns: 0 on success, negative on failure
*/
int ipa_wdi_set_perf_profile_per_inst(ipa_wdi_hdl_t hdl,
struct ipa_wdi_perf_profile *profile);
/**
* ipa_wdi_create_smmu_mapping() - Create smmu mapping
*
* @num_buffers: number of buffers
*
* @info: wdi buffer info
*/
int ipa_wdi_create_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info);
/**
* ipa_wdi_create_smmu_mapping_per_inst() - Create smmu mapping
*
* @hdl: hdl to wdi client
* @num_buffers: number of buffers
* @info: wdi buffer info
*/
int ipa_wdi_create_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
u32 num_buffers,
struct ipa_wdi_buffer_info *info);
/**
* ipa_wdi_release_smmu_mapping() - Release smmu mapping
*
* @num_buffers: number of buffers
*
* @info: wdi buffer info
*/
int ipa_wdi_release_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info);
/**
* ipa_wdi_release_smmu_mapping_per_inst() - Release smmu mapping
*
* @hdl: hdl to wdi client
* @num_buffers: number of buffers
*
* @info: wdi buffer info
*/
int ipa_wdi_release_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
u32 num_buffers,
struct ipa_wdi_buffer_info *info);
/**
* ipa_wdi_get_stats() - Query WDI statistics
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats);
/**
* ipa_wdi_bw_monitor() - set wdi BW monitoring
* @info: [inout] info blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info);
/**
* ipa_wdi_sw_stats() - set wdi BW monitoring
* @info: [inout] info blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info);
#else /* IS_ENABLED(CONFIG_IPA3) */
/**
* ipa_wdi_get_capabilities - Client should call this function to
* know the WDI capabilities
*
* Note: Should not be called from atomic context and only
* after checking IPA readiness using ipa_register_ipa_ready_cb()
*
* @Return 0 on success, negative on failure
*/
int ipa_wdi_get_capabilities(
struct ipa_wdi_capabilities_out_params *out)
{
return -EPERM;
}
static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
struct ipa_wdi_init_out_params *out)
{
return -EPERM;
}
static inline int ipa_wdi_init_per_inst(
struct ipa_wdi_init_in_params *in,
struct ipa_wdi_init_out_params *out)
{
return -EPERM;
}
static inline int ipa_get_wdi_version(void)
{
return -EPERM;
}
static inline int ipa_wdi_is_tx1_used(void)
{
return -EPERM;
}
static inline int ipa_wdi_cleanup(void)
{
return -EPERM;
}
static inline int ipa_wdi_cleanup_per_inst(ipa_wdi_hdl_t hdl)
{
return -EPERM;
}
static inline int ipa_wdi_reg_intf(
struct ipa_wdi_reg_intf_in_params *in)
{
return -EPERM;
}
static inline int ipa_wdi_reg_intf_per_inst(
struct ipa_wdi_reg_intf_in_params *in)
{
return -EPERM;
}
static inline int ipa_wdi_dereg_intf(const char *netdev_name)
{
return -EPERM;
}
static inline int ipa_wdi_dereg_intf_per_inst(const char *netdev_name,
ipa_wdi_hdl_t hdl)
{
return -EPERM;
}
static inline int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wdi_conn_pipes_per_inst(
struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wdi_disconn_pipes(void)
{
return -EPERM;
}
static inline int ipa_wdi_disconn_pipes_per_inst(ipa_wdi_hdl_t hdl)
{
return -EPERM;
}
static inline int ipa_wdi_enable_pipes(void)
{
return -EPERM;
}
static inline int ipa_wdi_enable_pipes_per_inst(ipa_wdi_hdl_t hdl)
{
return -EPERM;
}
static inline int ipa_wdi_disable_pipes(void)
{
return -EPERM;
}
static inline int ipa_wdi_disable_pipes_per_inst(ipa_wdi_hdl_t hdl)
{
return -EPERM;
}
static inline int ipa_wdi_set_perf_profile(
struct ipa_wdi_perf_profile *profile)
{
return -EPERM;
}
static inline int ipa_wdi_set_perf_profile_per_inst(
ipa_wdi_hdl_t hdl,
struct ipa_wdi_perf_profile *profile)
{
return -EPERM;
}
static inline int ipa_wdi_create_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return -EPERM;
}
static inline int ipa_wdi_create_smmu_mapping_per_inst(
ipa_wdi_hdl_t hdl,
u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return -EPERM;
}
static inline int ipa_wdi_release_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return -EPERM;
}
static inline int ipa_wdi_release_smmu_mapping_per_inst(
ipa_wdi_hdl_t hdl,
u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return -EPERM;
}
static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
{
return -EPERM;
}
static inline int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
{
return -EPERM;
}
static inline int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
{
return -EPERM;
}
#endif /* IS_ENABLED(CONFIG_IPA3) */
#endif /* _IPA_WDI3_H_ */

View File

@@ -0,0 +1,487 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_WIGIG_H_
#define _IPA_WIGIG_H_
#include <linux/msm_ipa.h>
#include "ipa.h"
typedef void (*ipa_wigig_misc_int_cb)(void *priv);
/*
* struct ipa_wigig_init_in_params - wigig init input parameters
*
* @periph_baddr_pa: physical address of wigig HW base
* @pseudo_cause_pa: physical address of wigig HW pseudo_cause register
* @int_gen_tx_pa: physical address of wigig HW int_gen_tx register
* @int_gen_rx_pa: physical address of wigig HW int_gen_rx register
* @dma_ep_misc_pa: physical address of wigig HW dma_ep_misc register
* @notify: uc ready callback
* @int_notify: wigig misc interrupt callback
* @priv: uc ready callback cookie
*/
struct ipa_wigig_init_in_params {
phys_addr_t periph_baddr_pa;
phys_addr_t pseudo_cause_pa;
phys_addr_t int_gen_tx_pa;
phys_addr_t int_gen_rx_pa;
phys_addr_t dma_ep_misc_pa;
ipa_uc_ready_cb notify;
ipa_wigig_misc_int_cb int_notify;
void *priv;
};
/*
* struct ipa_wigig_init_out_params - wigig init output parameters
*
* @is_uC_ready: is uC ready. No API should be called until uC is ready.
* @uc_db_pa: physical address of IPA uC doorbell
* @lan_rx_napi_enable: if we use NAPI in the LAN rx
*/
struct ipa_wigig_init_out_params {
bool is_uc_ready;
phys_addr_t uc_db_pa;
bool lan_rx_napi_enable;
};
/*
* struct ipa_wigig_hdr_info - Header to install on IPA HW
*
* @hdr: header to install on IPA HW
* @hdr_len: length of header
* @dst_mac_addr_offset: destination mac address offset
* @hdr_type: layer two header type
*/
struct ipa_wigig_hdr_info {
u8 *hdr;
u8 hdr_len;
u8 dst_mac_addr_offset;
enum ipa_hdr_l2_type hdr_type;
};
/*
* struct ipa_wigig_reg_intf_in_params - parameters for offload interface
* registration
*
* @netdev_name: network interface name
* @netdev_mac: netdev mac address
* @hdr_info: header information
*/
struct ipa_wigig_reg_intf_in_params {
const char *netdev_name;
u8 netdev_mac[IPA_MAC_ADDR_SIZE];
struct ipa_wigig_hdr_info hdr_info[IPA_IP_MAX];
};
/*
* struct ipa_wigig_pipe_setup_info - WIGIG TX/Rx configuration
* @desc_ring_base_pa: physical address of the base of the descriptor ring
* @desc_ring_size: size of the descriptor ring in bytes
* @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
* @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
* @status_ring_base_pa: physical address of the base of the status ring
* @status_ring_size: status ring size in bytes
* @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
* @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
*/
struct ipa_wigig_pipe_setup_info {
phys_addr_t desc_ring_base_pa;
u16 desc_ring_size;
phys_addr_t desc_ring_HWHEAD_pa;
phys_addr_t desc_ring_HWTAIL_pa;
phys_addr_t status_ring_base_pa;
u16 status_ring_size;
phys_addr_t status_ring_HWHEAD_pa;
phys_addr_t status_ring_HWTAIL_pa;
};
/*
* struct ipa_wigig_pipe_setup_info_smmu - WIGIG TX/Rx configuration smmu mode
* @desc_ring_base: sg_table of the base of the descriptor ring
* @desc_ring_base_iova: IO virtual address mapped to physical base address
* @desc_ring_size: size of the descriptor ring in bytes
* @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
* @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
* @status_ring_base: sg_table of the base of the status ring
* @status_ring_base_iova: IO virtual address mapped to physical base address
* @status_ring_size: status ring size in bytes
* @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
* @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
*/
struct ipa_wigig_pipe_setup_info_smmu {
struct sg_table desc_ring_base;
u64 desc_ring_base_iova;
u16 desc_ring_size;
phys_addr_t desc_ring_HWHEAD_pa;
phys_addr_t desc_ring_HWTAIL_pa;
struct sg_table status_ring_base;
u64 status_ring_base_iova;
u16 status_ring_size;
phys_addr_t status_ring_HWHEAD_pa;
phys_addr_t status_ring_HWTAIL_pa;
};
/*
* struct ipa_wigig_rx_pipe_data_buffer_info - WIGIG Rx data buffer
* configuration
* @data_buffer_base_pa: physical address of the physically contiguous
* Rx data buffer
* @data_buffer_size: size of the data buffer
*/
struct ipa_wigig_rx_pipe_data_buffer_info {
phys_addr_t data_buffer_base_pa;
u32 data_buffer_size;
};
/*
* struct ipa_wigig_rx_pipe_data_buffer_info_smmu - WIGIG Rx data buffer
* configuration smmu mode
* @data_buffer_base: sg_table of the physically contiguous
* Rx data buffer
* @data_buffer_base_iova: IO virtual address mapped to physical base address
* @data_buffer_size: size of the data buffer
*/
struct ipa_wigig_rx_pipe_data_buffer_info_smmu {
struct sg_table data_buffer_base;
u64 data_buffer_base_iova;
u32 data_buffer_size;
};
/*
* struct ipa_wigig_conn_rx_in_params - information provided by
* WIGIG offload client for Rx pipe
* @notify: client callback function
* @priv: client cookie
* @pipe: parameters to connect Rx pipe (WIGIG to IPA)
* @dbuff: Rx data buffer info
*/
struct ipa_wigig_conn_rx_in_params {
ipa_notify_cb notify;
void *priv;
struct ipa_wigig_pipe_setup_info pipe;
struct ipa_wigig_rx_pipe_data_buffer_info dbuff;
};
/*
* struct ipa_wigig_conn_rx_in_params_smmu - information provided by
* WIGIG offload client for Rx pipe
* @notify: client callback function
* @priv: client cookie
* @pipe_smmu: parameters to connect Rx pipe (WIGIG to IPA) smmu mode
* @dbuff_smmu: Rx data buffer info smmu mode
*/
struct ipa_wigig_conn_rx_in_params_smmu {
ipa_notify_cb notify;
void *priv;
struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
struct ipa_wigig_rx_pipe_data_buffer_info_smmu dbuff_smmu;
};
/*
* struct ipa_wigig_conn_out_params - information provided
* to WIGIG driver
* @client: client type allocated by IPA driver
*/
struct ipa_wigig_conn_out_params {
enum ipa_client_type client;
};
/*
* struct ipa_wigig_tx_pipe_data_buffer_info - WIGIG Tx data buffer
* configuration
* @data_buffer_size: size of a single data buffer
*/
struct ipa_wigig_tx_pipe_data_buffer_info {
u32 data_buffer_size;
};
/*
* struct ipa_wigig_tx_pipe_data_buffer_info_smmu - WIGIG Tx data buffer
* configuration smmu mode
* @data_buffer_base_pa: sg_tables of the Tx data buffers
* @data_buffer_base_iova: IO virtual address mapped to physical base address
* @num_buffers: number of buffers
* @data_buffer_size: size of a single data buffer
*/
struct ipa_wigig_tx_pipe_data_buffer_info_smmu {
struct sg_table *data_buffer_base;
u64 *data_buffer_base_iova;
u32 num_buffers;
u32 data_buffer_size;
};
/*
* struct ipa_wigig_conn_tx_in_params - information provided by
* wigig offload client for Tx pipe
* @pipe: parameters to connect Tx pipe (IPA to WIGIG)
* @dbuff: Tx data buffer info
* @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
* @client_mac: MAC address of client to be connected
*/
struct ipa_wigig_conn_tx_in_params {
struct ipa_wigig_pipe_setup_info pipe;
struct ipa_wigig_tx_pipe_data_buffer_info dbuff;
u8 int_gen_tx_bit_num;
u8 client_mac[IPA_MAC_ADDR_SIZE];
};
/*
* struct ipa_wigig_conn_tx_in_params_smmu - information provided by
* wigig offload client for Tx pipe
* @pipe_smmu: parameters to connect Tx pipe (IPA to WIGIG) smmu mode
* @dbuff_smmu: Tx data buffer info smmu mode
* @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
* @client_mac: MAC address of client to be connected
*/
struct ipa_wigig_conn_tx_in_params_smmu {
struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
struct ipa_wigig_tx_pipe_data_buffer_info_smmu dbuff_smmu;
u8 int_gen_tx_bit_num;
u8 client_mac[IPA_MAC_ADDR_SIZE];
};
#if IS_ENABLED(CONFIG_IPA3)
/*
* ipa_wigig_init - Client should call this function to
* init WIGIG IPA offload data path
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
struct ipa_wigig_init_out_params *out);
/*
* ipa_wigig_cleanup - Client should call this function to
* clean up WIGIG IPA offload data path
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_cleanup(void);
/*
* ipa_wigig_is_smmu_enabled - get smmu state
*
* @Return true if smmu is enabled, false if disabled
*/
bool ipa_wigig_is_smmu_enabled(void);
/*
* ipa_wigig_reg_intf - Client should call this function to
* register interface
*
* Note: Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in);
/*
* ipa_wigig_dereg_intf - Client Driver should call this
* function to deregister before unload and after disconnect
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_dereg_intf(const char *netdev_name);
/*
* ipa_wigig_conn_rx_pipe - Client should call this
* function to connect the rx (UL) pipe
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Non SMMU mode only, Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
struct ipa_wigig_conn_out_params *out);
/*
* ipa_wigig_conn_rx_pipe_smmu - Client should call this
* function to connect the rx (UL) pipe
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: SMMU mode only, Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_conn_rx_pipe_smmu(struct ipa_wigig_conn_rx_in_params_smmu *in,
struct ipa_wigig_conn_out_params *out);
/*
* ipa_wigig_conn_client - Client should call this
* function to connect one of the tx (DL) pipes when a WIGIG client connects
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: Non SMMU mode only, Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
struct ipa_wigig_conn_out_params *out);
/*
* ipa_wigig_conn_client_smmu - Client should call this
* function to connect one of the tx (DL) pipes when a WIGIG client connects
*
* @in: [in] input parameters from client
* @out: [out] output params to client
*
* Note: SMMU mode only, Should not be called from atomic context
*
* @Return 0 on success, negative on failure
*/
int ipa_wigig_conn_client_smmu(struct ipa_wigig_conn_tx_in_params_smmu *in,
struct ipa_wigig_conn_out_params *out);
/*
* ipa_wigig_disconn_pipe() - Client should call this
* function to disconnect a pipe
*
* @client: [in] pipe to be disconnected
*
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wigig_disconn_pipe(enum ipa_client_type client);
/*
* ipa_wigig_enable_pipe() - Client should call this
* function to enable IPA offload data path
*
* @client: [in] pipe to be enabled
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wigig_enable_pipe(enum ipa_client_type client);
/*
* ipa_wigig_disable_pipe() - Client should call this
* function to disable IPA offload data path
*
* @client: [in] pipe to be disabled
* Note: Should not be called from atomic context
*
* Returns: 0 on success, negative on failure
*/
int ipa_wigig_disable_pipe(enum ipa_client_type client);
/*
* ipa_wigig_tx_dp() - transmit tx packet through IPA to 11ad HW
*
* @dst: [in] destination ipa client pipe to be used
* @skb: [in] skb to be transmitted
*
* Returns: 0 on success, negative on failure
*/
int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb);
/**
* ipa_wigig_set_perf_profile() - Client should call this function to
* set IPA clock bandwidth based on data rates
*
* @max_supported_bw_mbps: [in] maximum bandwidth needed (in Mbps)
*
* Returns: 0 on success, negative on failure
*/
int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps);
#else /* IS_ENABLED(CONFIG_IPA3) */
static inline int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
struct ipa_wigig_init_out_params *out)
{
return -EPERM;
}
static inline int ipa_wigig_cleanup(void)
{
return -EPERM;
}
static inline bool ipa_wigig_is_smmu_enabled(void)
{
return -EPERM;
}
static inline int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in)
{
return -EPERM;
}
static inline int ipa_wigig_dereg_intf(const char *netdev_name)
{
return -EPERM;
}
static inline int ipa_wigig_conn_rx_pipe(
struct ipa_wigig_conn_rx_in_params *in,
struct ipa_wigig_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wigig_conn_rx_pipe_smmu(
struct ipa_wigig_conn_rx_in_params_smmu *in,
struct ipa_wigig_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wigig_conn_client(
struct ipa_wigig_conn_tx_in_params *in,
struct ipa_wigig_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wigig_conn_client_smmu(
struct ipa_wigig_conn_tx_in_params_smmu *in,
struct ipa_wigig_conn_out_params *out)
{
return -EPERM;
}
static inline int ipa_wigig_disconn_pipe(enum ipa_client_type client)
{
return -EPERM;
}
static inline int ipa_wigig_enable_pipe(enum ipa_client_type client)
{
return -EPERM;
}
static inline int ipa_wigig_disable_pipe(enum ipa_client_type client)
{
return -EPERM;
}
static inline int ipa_wigig_tx_dp(enum ipa_client_type dst,
struct sk_buff *skb)
{
return -EPERM;
}
static inline int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
{
return -EPERM;
}
#endif /* IS_ENABLED(CONFIG_IPA3) */
#endif /* _IPA_WIGIG_H_ */

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#ifndef MSM_GSI_H
#define MSM_GSI_H
#include <linux/types.h>
#include <linux/interrupt.h>
enum gsi_chan_dir {
GSI_CHAN_DIR_FROM_GSI = 0x0,
GSI_CHAN_DIR_TO_GSI = 0x1
};
/**
* @GSI_USE_PREFETCH_BUFS: Channel will use normal prefetch buffers if possible
* @GSI_ESCAPE_BUF_ONLY: Channel will always use escape buffers only
* @GSI_SMART_PRE_FETCH: Channel will work in smart prefetch mode.
* relevant starting GSI 2.5
* @GSI_FREE_PRE_FETCH: Channel will work in free prefetch mode.
* relevant starting GSI 2.5
*/
enum gsi_prefetch_mode {
GSI_USE_PREFETCH_BUFS = 0x0,
GSI_ESCAPE_BUF_ONLY = 0x1,
GSI_SMART_PRE_FETCH = 0x2,
GSI_FREE_PRE_FETCH = 0x3,
};
#endif