xmsm: ipa4: rmnet_ctl low latency pipe support

Add 2 new pipes support for qmap flow control and low latency
traffic from rmnet_ctl module.

Change-Id: Iae11e742f1f3d1d3ec7b21b02426b0164dcb13c5
Signed-off-by: Bojun Pan <bojunp@codeaurora.org>
This commit is contained in:
Bojun Pan
2020-03-27 17:04:36 -07:00
committed by Gerrit - the friendly Code Review server
parent 62f45adb3d
commit c8a055ea6c
11 changed files with 989 additions and 47 deletions

View File

@@ -38,7 +38,7 @@ ipam-y += \
ipa_clients/ipa_wigig.o
ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
ipa_v3/ipa_qmi_service.o \
ipa_v3/ipa_qmi_service.o ipa_v3/rmnet_ctl_ipa.o \
ipa_v3/rmnet_ipa_fd_ioctl.o
ipam-$(CONFIG_IPA3_MHI_PROXY) += ipa_v3/ipa_mhi_proxy.o

View File

@@ -207,8 +207,11 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
__stringify(RESERVERD_CONS_101),
__stringify(IPA_CLIENT_AQC_ETHERNET_PROD),
__stringify(IPA_CLIENT_AQC_ETHERNET_CONS),
__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD),
__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS),
};
/**
@@ -3194,6 +3197,47 @@ static int ipa_ap_resume(struct device *dev)
return ret;
}
int ipa_register_rmnet_ctl_cb(
void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
void *user_data1,
void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
void *user_data2,
void (*ipa_rmnet_ctl_rx_notify_cb)(
void *user_data3, void *rx_data),
void *user_data3)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_register_rmnet_ctl_cb,
ipa_rmnet_ctl_ready_cb, user_data1,
ipa_rmnet_ctl_stop_cb, user_data2,
ipa_rmnet_ctl_rx_notify_cb, user_data3);
return ret;
}
EXPORT_SYMBOL(ipa_register_rmnet_ctl_cb);
int ipa_unregister_rmnet_ctl_cb(void)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_unregister_rmnet_ctl_cb);
return ret;
}
EXPORT_SYMBOL(ipa_unregister_rmnet_ctl_cb);
int ipa_rmnet_ctl_xmit(struct sk_buff *skb)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_rmnet_ctl_xmit,
skb);
return ret;
}
EXPORT_SYMBOL(ipa_rmnet_ctl_xmit);
/**
* ipa_inc_client_enable_clks() - Increase active clients counter, and
* enable ipa clocks if necessary

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_mhi.h>
@@ -369,6 +369,19 @@ struct ipa_api_controller {
const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)
(enum ipa_client_type client);
int (*ipa_register_rmnet_ctl_cb)(
void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
void *user_data1,
void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
void *user_data2,
void (*ipa_rmnet_ctl_rx_notify_cb)(
void *user_data3, void *rx_data),
void *user_data3);
int (*ipa_unregister_rmnet_ctl_cb)(void);
int (*ipa_rmnet_ctl_xmit)(struct sk_buff *skb);
void (*ipa_inc_client_enable_clks)(
struct ipa_active_client_logging_info *id);

View File

@@ -6658,6 +6658,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->do_ram_collection_on_crash =
resource_p->do_ram_collection_on_crash;
ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
if (ipa3_ctx->secure_debug_check_action == USE_SCM) {
if (ipa_is_mem_dump_allowed())
@@ -7090,6 +7091,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_wwan_init();
ipa3_rmnet_ctl_init();
mutex_init(&ipa3_ctx->app_clock_vote.mutex);
return 0;
@@ -7529,6 +7532,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->lan_rx_napi_enable
? "True" : "False");
ipa_drv_res->rmnet_ctl_enable =
of_property_read_bool(pdev->dev.of_node,
"qcom,rmnet-ctl-enable");
IPADBG(": Enable rmnet ctl = %s\n",
ipa_drv_res->rmnet_ctl_enable
? "True" : "False");
/* Get IPA wrapper address */
resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"ipa-base");

View File

@@ -37,7 +37,7 @@
#define IPA_GSB_AGGR_BYTE_LIMIT 14
#define IPA_GSB_RX_BUFF_BASE_SZ 16384
#define IPA_QMAP_RX_BUFF_BASE_SZ 512
#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
(X) + NET_SKB_PAD) +\
@@ -131,6 +131,7 @@ static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
int *actual_num);
static unsigned long tag_to_pointer_wa(uint64_t tag);
static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
static void ipa3_tasklet_rx_notify(unsigned long data);
static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
@@ -230,7 +231,6 @@ static void ipa3_tasklet_write_done(unsigned long data)
spin_unlock_bh(&sys->spinlock);
}
static void ipa3_send_nop_desc(struct work_struct *work)
{
struct ipa3_sys_context *sys = container_of(work,
@@ -873,7 +873,7 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
if (sys->napi_obj) {
if (sys->napi_obj || IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
/* interrupt mode is done in ipa3_rx_poll context */
ipa_assert();
} else
@@ -926,6 +926,11 @@ static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
usleep_range(SUSPEND_MIN_SLEEP_RX,
SUSPEND_MAX_SLEEP_RX);
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_COAL");
} else if (sys->ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
IPA_ACTIVE_CLIENTS_INC_SPECIAL("PIPE_SUSPEND_LOW_LAT");
usleep_range(SUSPEND_MIN_SLEEP_RX,
SUSPEND_MAX_SLEEP_RX);
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PIPE_SUSPEND_LOW_LAT");
} else
IPAERR("Unexpected event %d\n for client %d\n",
event, sys->ep->client);
@@ -1074,8 +1079,12 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
atomic_set(&ep->sys->xmit_eot_cnt, 0);
tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
(unsigned long) ep->sys);
if (IPA_CLIENT_IS_PROD(sys_in->client))
tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
(unsigned long) ep->sys);
if (sys_in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
tasklet_init(&ep->sys->tasklet, ipa3_tasklet_rx_notify,
(unsigned long) ep->sys);
ep->skip_ep_cfg = sys_in->skip_ep_cfg;
if (ipa3_assign_policy(sys_in, ep->sys)) {
IPAERR("failed to sys ctx for client %d\n", sys_in->client);
@@ -1780,6 +1789,9 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
if (sys->napi_obj) {
ipa_pm_activate_sync(sys->pm_hdl);
napi_schedule(sys->napi_obj);
} else if (IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
ipa_pm_activate_sync(sys->pm_hdl);
tasklet_schedule(&sys->tasklet);
} else
ipa3_handle_rx(sys);
}
@@ -3034,6 +3046,28 @@ static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
}
}
static int ipa3_low_lat_rx_pyld_hdlr(struct sk_buff *skb,
struct ipa3_sys_context *sys)
{
if (skb->len == 0) {
IPAERR("ZLT\n");
goto bail;
}
IPA_DUMP_BUFF(skb->data, 0, skb->len);
if (!sys->ep->client_notify) {
IPAERR("client_notify is NULL");
goto bail;
}
sys->ep->client_notify(sys->ep->priv,
IPA_RECEIVE, (unsigned long)(skb));
return 0;
bail:
sys->free_skb(skb);
return 0;
}
static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
struct ipa3_sys_context *sys)
{
@@ -3753,7 +3787,8 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
} else {
if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
in->client == IPA_CLIENT_APPS_WAN_CONS ||
in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
in->client == IPA_CLIENT_APPS_WAN_COAL_CONS ||
in->client == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
sys->ep->status.status_en = true;
sys->policy = IPA_POLICY_INTR_POLL_MODE;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
@@ -3828,6 +3863,21 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
in->ipa_ep_cfg.aggr.aggr_pkt_limit
= IPA_GENERIC_AGGR_PKT_LIMIT;
}
} else if (in->client ==
IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) {
INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
sys->ep->status.status_en = false;
in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
in->ipa_ep_cfg.aggr.aggr_time_limit = 0;
sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
IPA_QMAP_RX_BUFF_BASE_SZ);
sys->pyld_hdlr = ipa3_low_lat_rx_pyld_hdlr;
sys->repl_hdlr =
ipa3_fast_replenish_rx_cache;
sys->free_rx_wrapper =
ipa3_free_rx_wrapper;
sys->rx_pool_sz =
ipa3_ctx->wan_rx_ring_size;
}
} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
IPADBG("assigning policy to client:%d",
@@ -4371,15 +4421,14 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
* pm deactivate is done in wq context
* or after NAPI poll
*/
clk_off = ipa_pm_activate(sys->pm_hdl);
if (!clk_off && sys->napi_obj) {
if (!clk_off && sys->napi_obj)
napi_schedule(sys->napi_obj);
return;
}
queue_work(sys->wq, &sys->work);
return;
else if (!clk_off &&
IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
tasklet_schedule(&sys->tasklet);
} else
queue_work(sys->wq, &sys->work);
}
static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
@@ -5153,3 +5202,31 @@ static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
aggr_byte_limit++;
return aggr_byte_limit >> 1;
}
static void ipa3_tasklet_rx_notify(unsigned long data)
{
struct ipa3_sys_context *sys;
struct sk_buff *rx_skb;
struct gsi_chan_xfer_notify notify;
int ret;
sys = (struct ipa3_sys_context *)data;
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
start_poll:
while (1) {
ret = ipa_poll_gsi_pkt(sys, &notify);
if (ret)
break;
rx_skb = handle_skb_completion(&notify, true);
if (rx_skb) {
sys->pyld_hdlr(rx_skb, sys);
sys->repl_hdlr(sys);
}
}
ret = ipa3_rx_switch_to_intr_mode(sys);
if (ret == -GSI_STATUS_PENDING_IRQ)
goto start_poll;
ipa_pm_deferred_deactivate(sys->pm_hdl);
}

View File

@@ -1881,6 +1881,7 @@ struct ipa3_app_clock_vote {
* @icc_num_paths - number of paths icc would vote for bw
* @icc_clk - table for icc bw clock value
* @coal_cmd_pyld: holds the coslescing close frame command payload
* @rmnet_ctl_enable: enable pipe support fow low latency data
*/
struct ipa3_context {
struct ipa3_char_device_context cdev;
@@ -2061,6 +2062,7 @@ struct ipa3_context {
struct ipahal_imm_cmd_pyld *coal_cmd_pyld;
struct ipa3_app_clock_vote app_clock_vote;
bool clients_registered;
bool rmnet_ctl_enable;
};
struct ipa3_plat_drv_res {
@@ -2113,6 +2115,7 @@ struct ipa3_plat_drv_res {
u32 icc_num_paths;
const char *icc_path_name[IPA_ICC_PATH_MAX];
u32 icc_clk_val[IPA_ICC_LVL_MAX][IPA_ICC_MAX];
bool rmnet_ctl_enable;
};
/**
@@ -2839,6 +2842,8 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
bool ipa3_get_lan_rx_napi(void);
bool ipa3_get_qmap_pipe_enable(void);
/* internal functions */
int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
@@ -3130,6 +3135,21 @@ int emulator_load_fws(
u32 transport_mem_base,
u32 transport_mem_size,
enum gsi_ver);
int ipa3_rmnet_ctl_init(void);
int ipa3_register_rmnet_ctl_cb(
void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
void *user_data1,
void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
void *user_data2,
void (*ipa_rmnet_ctl_rx_notify_cb)(
void *user_data3, void *rx_data),
void *user_data3);
int ipa3_unregister_rmnet_ctl_cb(void);
int ipa3_rmnet_ctl_xmit(struct sk_buff *skb);
int ipa3_setup_apps_low_lat_prod_pipe(void);
int ipa3_setup_apps_low_lat_cons_pipe(void);
int ipa3_teardown_apps_low_lat_pipes(void);
void ipa3_rmnet_ctl_ready_notifier(void);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
int ipa_gsi_ch20_wa(void);
int ipa3_rx_poll(u32 clnt_hdl, int budget);

View File

@@ -2234,7 +2234,7 @@ int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req)
resp.resp.error, "ipa_mhi_cleanup_req_msg");
}
int ipa3_qmi_send_rsc_pipe_indication(
int ipa3_qmi_send_wan_pipe_indication(
struct ipa_endp_desc_indication_msg_v01 *req)
{
IPAWANDBG("Sending QMI_IPA_ENDP_DESC_INDICATION_V01\n");

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*/
#ifndef IPA_QMI_SERVICE_H
@@ -334,7 +334,7 @@ int ipa3_qmi_get_per_client_packet_stats(
int ipa3_qmi_send_mhi_ready_indication(
struct ipa_mhi_ready_indication_msg_v01 *req);
int ipa3_qmi_send_rsc_pipe_indication(
int ipa3_qmi_send_wan_pipe_indication(
struct ipa_endp_desc_indication_msg_v01 *req);
int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req);
@@ -478,7 +478,7 @@ static inline int ipa3_qmi_send_mhi_ready_indication(
return -EPERM;
}
static inline int ipa3_qmi_send_rsc_pipe_indication(
static inline int ipa3_qmi_send_wan_pipe_indication(
struct ipa_endp_desc_indication_msg_v01 *req)
{
return -EPERM;

View File

@@ -2930,6 +2930,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 2, 2, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
[IPA_4_9][IPA_CLIENT_APPS_WAN_LOW_LAT_PROD] = {
true, IPA_v4_9_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
{ 1, 1, 4, 4, IPA_EE_AP, GSI_SMART_PRE_FETCH, 1 } },
[IPA_4_9][IPA_CLIENT_WLAN2_PROD] = {
true, IPA_v4_9_GROUP_UL_DL,
true,
@@ -2986,6 +2992,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 20, 12, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_9][IPA_CLIENT_APPS_WAN_LOW_LAT_CONS] = {
true, IPA_v4_9_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 17, 9, 6, 6, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_9][IPA_CLIENT_USB_DPL_CONS] = {
true, IPA_v4_9_GROUP_UL_DL,
false,
@@ -7323,6 +7335,9 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
api_ctrl->ipa_start_gsi_channel = ipa3_start_gsi_channel;
api_ctrl->ipa_unregister_rmnet_ctl_cb = ipa3_unregister_rmnet_ctl_cb;
api_ctrl->ipa_register_rmnet_ctl_cb = ipa3_register_rmnet_ctl_cb;
api_ctrl->ipa_rmnet_ctl_xmit = ipa3_rmnet_ctl_xmit;
api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
api_ctrl->ipa_inc_client_enable_clks_no_block =
@@ -8166,6 +8181,11 @@ int ipa3_suspend_apps_pipes(bool suspend)
if (res == -EAGAIN)
goto undo_odl_cons;
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
suspend);
if (res == -EAGAIN)
goto undo_qmap_cons;
if (suspend) {
struct ipahal_reg_tx_wrapper tx;
int ep_idx;
@@ -8180,7 +8200,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
IPADBG("COAL frame is open 0x%x\n",
tx.coal_slave_open_frame);
res = -EAGAIN;
goto undo_odl_cons;
goto undo_qmap_cons;
}
usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
@@ -8189,25 +8209,32 @@ int ipa3_suspend_apps_pipes(bool suspend)
ipa3_ctx->ee);
if (res) {
IPADBG("suspend irq is pending 0x%x\n", res);
goto undo_odl_cons;
goto undo_qmap_cons;
}
}
do_prod:
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, suspend);
if (res == -EAGAIN)
goto undo_lan_prod;
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD,
suspend);
if (res == -EAGAIN)
goto undo_qmap_prod;
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, suspend);
if (res == -EAGAIN)
goto undo_wan_prod;
return 0;
undo_wan_prod:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_PROD, !suspend);
undo_qmap_prod:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD,
!suspend);
undo_lan_prod:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_PROD, !suspend);
undo_qmap_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
!suspend);
undo_odl_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
undo_lan_cons:

View File

@@ -0,0 +1,670 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/ipa.h>
#include <uapi/linux/msm_rmnet.h>
#include "ipa_i.h"
enum ipa_rmnet_ctl_state {
IPA_RMNET_CTL_NOT_REG,
IPA_RMNET_CTL_REGD, /* rmnet_ctl register */
IPA_RMNET_CTL_PIPE_READY, /* sys pipe setup */
IPA_RMNET_CTL_START, /* rmnet_ctl register + pipe setup */
};
#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
#define RMNET_CTRL_QUEUE_MAX (2 * IPA_WWAN_CONS_DESC_FIFO_SZ)
struct ipa3_rmnet_ctl_cb_info {
ipa_rmnet_ctl_ready_cb ready_cb;
ipa_rmnet_ctl_stop_cb stop_cb;
ipa_rmnet_ctl_rx_notify_cb rx_notify_cb;
void *ready_cb_user_data;
void *stop_cb_user_data;
void *rx_notify_cb_user_data;
};
struct ipa3_rmnet_ctl_stats {
atomic_t outstanding_pkts;
u32 tx_pkt_sent;
u32 rx_pkt_rcvd;
u64 tx_byte_sent;
u64 rx_byte_rcvd;
u32 tx_pkt_dropped;
u32 rx_pkt_dropped;
u64 tx_byte_dropped;
u64 rx_byte_dropped;
};
struct rmnet_ctl_ipa3_context {
struct ipa3_rmnet_ctl_stats stats;
enum ipa_rmnet_ctl_state state;
struct ipa_sys_connect_params apps_to_ipa_low_lat_ep_cfg;
struct ipa_sys_connect_params ipa_to_apps_low_lat_ep_cfg;
u32 apps_to_ipa3_low_lat_hdl;
u32 ipa3_to_apps_low_lat_hdl;
spinlock_t tx_lock;
struct ipa3_rmnet_ctl_cb_info cb_info;
struct sk_buff_head tx_queue;
u32 rmnet_ctl_pm_hdl;
struct mutex lock;
struct workqueue_struct *wq;
};
static struct rmnet_ctl_ipa3_context *rmnet_ctl_ipa3_ctx;
static void rmnet_ctl_wakeup_ipa(struct work_struct *work);
static DECLARE_DELAYED_WORK(rmnet_ctl_wakeup_work,
rmnet_ctl_wakeup_ipa);
static void apps_rmnet_ctl_tx_complete_notify(void *priv,
enum ipa_dp_evt_type evt, unsigned long data);
static void apps_rmnet_ctl_receive_notify(void *priv,
enum ipa_dp_evt_type evt, unsigned long data);
static int ipa3_rmnet_ctl_register_pm_client(void);
static void ipa3_rmnet_ctl_deregister_pm_client(void);
int ipa3_rmnet_ctl_init(void)
{
char buff[IPA_RESOURCE_NAME_MAX];
if (!ipa3_ctx) {
IPAERR("ipa3_ctx was not initialized\n");
return -EINVAL;
}
if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD) == -1 ||
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) == -1)
{
IPAERR("invalid low lat endpoints\n");
return -EINVAL;
}
rmnet_ctl_ipa3_ctx = kzalloc(sizeof(*rmnet_ctl_ipa3_ctx),
GFP_KERNEL);
if (!rmnet_ctl_ipa3_ctx)
return -ENOMEM;
snprintf(buff, IPA_RESOURCE_NAME_MAX, "rmnet_ctlwq");
rmnet_ctl_ipa3_ctx->wq = alloc_workqueue(buff,
WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
if (!rmnet_ctl_ipa3_ctx->wq) {
kfree(rmnet_ctl_ipa3_ctx);
return -ENOMEM;
}
memset(&rmnet_ctl_ipa3_ctx->apps_to_ipa_low_lat_ep_cfg, 0,
sizeof(struct ipa_sys_connect_params));
memset(&rmnet_ctl_ipa3_ctx->ipa_to_apps_low_lat_ep_cfg, 0,
sizeof(struct ipa_sys_connect_params));
skb_queue_head_init(&rmnet_ctl_ipa3_ctx->tx_queue);
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_NOT_REG;
mutex_init(&rmnet_ctl_ipa3_ctx->lock);
return 0;
}
int ipa3_register_rmnet_ctl_cb(
void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
void *user_data1,
void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
void *user_data2,
void (*ipa_rmnet_ctl_rx_notify_cb)(
void *user_data3, void *rx_data),
void *user_data3)
{
/* check ipa3_ctx existed or not */
if (!ipa3_ctx) {
IPADBG("rmnet_ctl_ctx haven't initialized\n");
return -EAGAIN;
}
if (!ipa3_ctx->rmnet_ctl_enable) {
IPAERR("low lat pipes are not supported");
return -ENXIO;
}
if (!rmnet_ctl_ipa3_ctx) {
IPADBG("rmnet_ctl_ctx haven't initialized\n");
return -EAGAIN;
}
mutex_lock(&rmnet_ctl_ipa3_ctx->lock);
if (rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_NOT_REG &&
rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_PIPE_READY) {
IPADBG("rmnet_ctl registered already\n", __func__);
mutex_unlock(&rmnet_ctl_ipa3_ctx->lock);
return -EEXIST;
}
rmnet_ctl_ipa3_ctx->cb_info.ready_cb = ipa_rmnet_ctl_ready_cb;
rmnet_ctl_ipa3_ctx->cb_info.ready_cb_user_data = user_data1;
rmnet_ctl_ipa3_ctx->cb_info.stop_cb = ipa_rmnet_ctl_stop_cb;
rmnet_ctl_ipa3_ctx->cb_info.stop_cb_user_data = user_data2;
rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb = ipa_rmnet_ctl_rx_notify_cb;
rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb_user_data = user_data3;
if (rmnet_ctl_ipa3_ctx->state == IPA_RMNET_CTL_NOT_REG) {
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_REGD;
} else {
(*ipa_rmnet_ctl_ready_cb)(user_data1);
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_START;
}
ipa3_rmnet_ctl_register_pm_client();
mutex_unlock(&rmnet_ctl_ipa3_ctx->lock);
IPADBG("rmnet_ctl registered successfually\n");
return 0;
}
int ipa3_unregister_rmnet_ctl_cb(void)
{
/* check ipa3_ctx existed or not */
if (!ipa3_ctx) {
IPADBG("IPA driver haven't initialized\n");
return -EAGAIN;
}
if (!ipa3_ctx->rmnet_ctl_enable) {
IPAERR("low lat pipe is disabled");
return -ENXIO;
}
if (!rmnet_ctl_ipa3_ctx) {
IPADBG("rmnet_ctl_ctx haven't initialized\n");
return -EAGAIN;
}
mutex_lock(&rmnet_ctl_ipa3_ctx->lock);
if (rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_REGD &&
rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_START) {
IPADBG("rmnet_ctl unregistered already\n", __func__);
mutex_unlock(&rmnet_ctl_ipa3_ctx->lock);
return 0;
}
rmnet_ctl_ipa3_ctx->cb_info.ready_cb = NULL;
rmnet_ctl_ipa3_ctx->cb_info.ready_cb_user_data = NULL;
rmnet_ctl_ipa3_ctx->cb_info.stop_cb = NULL;
rmnet_ctl_ipa3_ctx->cb_info.stop_cb_user_data = NULL;
rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb = NULL;
rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb_user_data = NULL;
if (rmnet_ctl_ipa3_ctx->state == IPA_RMNET_CTL_REGD)
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_NOT_REG;
else
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_PIPE_READY;
ipa3_rmnet_ctl_deregister_pm_client();
mutex_unlock(&rmnet_ctl_ipa3_ctx->lock);
IPADBG("rmnet_ctl unregistered successfually\n");
return 0;
}
int ipa3_setup_apps_low_lat_cons_pipe(void)
{
struct ipa_sys_connect_params *ipa_low_lat_ep_cfg;
int ret = 0;
int ep_idx;
if (!ipa3_ctx->rmnet_ctl_enable) {
IPAERR("low lat pipe is disabled");
return -ENXIO;
}
ep_idx = ipa_get_ep_mapping(
IPA_CLIENT_APPS_WAN_LOW_LAT_CONS);
if (ep_idx == IPA_EP_NOT_ALLOCATED) {
IPADBG("Low lat datapath not supported\n");
return -ENXIO;
}
if (rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_NOT_REG &&
rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_REGD) {
IPADBG("rmnet_ctl in bad state %d\n",
rmnet_ctl_ipa3_ctx->state);
return -ENXIO;
}
ipa_low_lat_ep_cfg =
&rmnet_ctl_ipa3_ctx->ipa_to_apps_low_lat_ep_cfg;
ipa_low_lat_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
IPA_ENABLE_CS_DL_QMAP;
ipa_low_lat_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid
= 1;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata
= 1;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid
= 1;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size
= 2;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid
= true;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad
= 0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding
= true;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset
= 0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian
= 0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask
= 0xFF000000;
ipa_low_lat_ep_cfg->client = IPA_CLIENT_APPS_WAN_LOW_LAT_CONS;
ipa_low_lat_ep_cfg->notify = apps_rmnet_ctl_receive_notify;
ipa_low_lat_ep_cfg->priv = NULL;
ipa_low_lat_ep_cfg->desc_fifo_sz =
IPA_WWAN_CONS_DESC_FIFO_SZ * IPA_FIFO_ELEMENT_SIZE;
ret = ipa_setup_sys_pipe(
&rmnet_ctl_ipa3_ctx->ipa_to_apps_low_lat_ep_cfg,
&rmnet_ctl_ipa3_ctx->ipa3_to_apps_low_lat_hdl);
if (ret) {
IPADBG("Low lat pipe setup fails\n");
return ret;
}
if (rmnet_ctl_ipa3_ctx->cb_info.ready_cb) {
(*(rmnet_ctl_ipa3_ctx->cb_info.ready_cb))
(rmnet_ctl_ipa3_ctx->cb_info.ready_cb_user_data);
} else {
IPAERR("invalid ready_cb\n");
return -EFAULT;
}
mutex_lock(&rmnet_ctl_ipa3_ctx->lock);
if (rmnet_ctl_ipa3_ctx->state == IPA_RMNET_CTL_NOT_REG)
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_PIPE_READY;
else
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_START;
mutex_unlock(&rmnet_ctl_ipa3_ctx->lock);
return 0;
}
int ipa3_setup_apps_low_lat_prod_pipe(void)
{
struct ipa_sys_connect_params *ipa_low_lat_ep_cfg;
int ret = 0;
int ep_idx;
if (!ipa3_ctx->rmnet_ctl_enable) {
IPAERR("Low lat pipe is disabled");
return -ENXIO;
}
ep_idx = ipa_get_ep_mapping(
IPA_CLIENT_APPS_WAN_LOW_LAT_PROD);
if (ep_idx == IPA_EP_NOT_ALLOCATED) {
IPAERR("low lat pipe not supported\n");
return -EFAULT;
}
ipa_low_lat_ep_cfg =
&rmnet_ctl_ipa3_ctx->apps_to_ipa_low_lat_ep_cfg;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 8;
ipa_low_lat_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
IPA_ENABLE_CS_OFFLOAD_UL;
ipa_low_lat_ep_cfg->ipa_ep_cfg.cfg.cs_metadata_hdr_offset
= 1;
ipa_low_lat_ep_cfg->ipa_ep_cfg.aggr.aggr_en =
IPA_BYPASS_AGGR;
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
/* modem want offset at 0! */
ipa_low_lat_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
ipa_low_lat_ep_cfg->ipa_ep_cfg.mode.dst =
IPA_CLIENT_Q6_WAN_CONS;
ipa_low_lat_ep_cfg->ipa_ep_cfg.mode.mode =
IPA_DMA;
ipa_low_lat_ep_cfg->client =
IPA_CLIENT_APPS_WAN_LOW_LAT_PROD;
ipa_low_lat_ep_cfg->notify =
apps_rmnet_ctl_tx_complete_notify;
ipa_low_lat_ep_cfg->desc_fifo_sz =
IPA_SYS_TX_DATA_DESC_FIFO_SZ;
ipa_low_lat_ep_cfg->priv = NULL;
ret = ipa_setup_sys_pipe(ipa_low_lat_ep_cfg,
&rmnet_ctl_ipa3_ctx->apps_to_ipa3_low_lat_hdl);
if (ret) {
IPAERR("failed to config apps low lat prod pipe\n");
return ret;
}
return 0;
}
int ipa3_teardown_apps_low_lat_pipes(void)
{
int ret = 0;
if (rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_PIPE_READY &&
rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_START) {
IPAERR("rmnet_ctl in bad state %d\n",
rmnet_ctl_ipa3_ctx->state);
return -EFAULT;
}
if (rmnet_ctl_ipa3_ctx->cb_info.stop_cb) {
(*(rmnet_ctl_ipa3_ctx->cb_info.stop_cb))
(rmnet_ctl_ipa3_ctx->cb_info.stop_cb_user_data);
} else {
IPAERR("Invalid stop_cb\n");
return -EFAULT;
}
if (rmnet_ctl_ipa3_ctx->state == IPA_RMNET_CTL_PIPE_READY)
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_NOT_REG;
else
rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_REGD;
ret = ipa3_teardown_sys_pipe(
rmnet_ctl_ipa3_ctx->ipa3_to_apps_low_lat_hdl);
if (ret < 0) {
IPAERR("Failed to teardown APPS->IPA low lat pipe\n");
return ret;
}
rmnet_ctl_ipa3_ctx->ipa3_to_apps_low_lat_hdl = -1;
ret = ipa3_teardown_sys_pipe(
rmnet_ctl_ipa3_ctx->apps_to_ipa3_low_lat_hdl);
if (ret < 0) {
return ret;
IPAERR("Failed to teardown APPS->IPA low lat pipe\n");
}
rmnet_ctl_ipa3_ctx->apps_to_ipa3_low_lat_hdl = -1;
return ret;
}
void ipa3_rmnet_ctl_ready_notifier(void)
{
if (rmnet_ctl_ipa3_ctx->cb_info.ready_cb) {
(*(rmnet_ctl_ipa3_ctx->cb_info.ready_cb))
(rmnet_ctl_ipa3_ctx->cb_info.ready_cb_user_data);
} else
IPAERR("invalid ready_cb\n");
IPADBG("low lat pipes are ready\n");
}
int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
{
int ret;
unsigned long flags;
if (!ipa3_ctx->rmnet_ctl_enable) {
IPAERR("low lat pipe not supported\n");
kfree_skb(skb);
return 0;
}
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
/* we cannot infinitely queue the packet */
if (skb_queue_len(&rmnet_ctl_ipa3_ctx->tx_queue) >=
RMNET_CTRL_QUEUE_MAX) {
IPAERR("rmnet_ctl tx queue full\n");
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
return -EAGAIN;
}
if (rmnet_ctl_ipa3_ctx->state != IPA_RMNET_CTL_START) {
IPAERR("bad rmnet_ctl state %d\n",
rmnet_ctl_ipa3_ctx->state);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
return 0;
}
/* if queue is not empty, means we still have pending wq */
if (skb_queue_len(&rmnet_ctl_ipa3_ctx->tx_queue) != 0) {
skb_queue_tail(&rmnet_ctl_ipa3_ctx->tx_queue, skb);
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
return 0;
}
/* rmnet_ctl is calling from atomic context */
ret = ipa_pm_activate(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
if (ret == -EINPROGRESS) {
skb_queue_tail(&rmnet_ctl_ipa3_ctx->tx_queue, skb);
/*
* delayed work is required here since we need to
* reschedule in the same workqueue context on error
*/
queue_delayed_work(rmnet_ctl_ipa3_ctx->wq,
&rmnet_ctl_wakeup_work, 0);
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
return 0;
} else if (ret) {
IPAERR("[%s] fatal: ipa pm activate failed %d\n",
__func__, ret);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
return 0;
}
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
/*
* both data packets and command will be routed to
* IPA_CLIENT_Q6_WAN_CONS based on DMA settings
*/
ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
if (ret) {
if (ret == -EPIPE) {
IPAERR("Low lat fatal: pipe is not valid\n");
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
return 0;
}
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
skb_queue_head(&rmnet_ctl_ipa3_ctx->tx_queue, skb);
ret = 0;
goto out;
}
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
atomic_inc(&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_sent++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_sent +=
skb->len;
ret = 0;
out:
if (atomic_read(
&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts)
== 0)
ipa_pm_deferred_deactivate(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
return ret;
}
static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
{
int ret;
unsigned long flags;
struct sk_buff *skb;
/* calling from WQ */
ret = ipa_pm_activate_sync(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
if (ret) {
IPAERR("[%s] fatal: ipa pm activate failed %d\n",
__func__, ret);
queue_delayed_work(rmnet_ctl_ipa3_ctx->wq,
&rmnet_ctl_wakeup_work,
msecs_to_jiffies(1));
return;
}
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
/* dequeue the skb */
while (skb_queue_len(&rmnet_ctl_ipa3_ctx->tx_queue) > 0) {
skb = skb_dequeue(&rmnet_ctl_ipa3_ctx->tx_queue);
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
/*
* both data packets and command will be routed to
* IPA_CLIENT_Q6_WAN_CONS based on DMA settings
*/
ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
if (ret) {
if (ret == -EPIPE) {
/* try to drain skb from queue if pipe teardown */
IPAERR_RL("Low lat fatal: pipe is not valid\n");
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
continue;
}
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
skb_queue_head(&rmnet_ctl_ipa3_ctx->tx_queue, skb);
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
goto delayed_work;
}
atomic_inc(&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts);
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_sent++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_sent +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
}
goto out;
delayed_work:
queue_delayed_work(rmnet_ctl_ipa3_ctx->wq,
&rmnet_ctl_wakeup_work,
msecs_to_jiffies(1));
out:
if (atomic_read(
&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts)
== 0) {
ipa_pm_deferred_deactivate(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
}
}
/**
* apps_rmnet_ctl_tx_complete_notify() - Rx notify
*
* @priv: driver context
* @evt: event type
* @data: data provided with event
*
* Check that the packet is the one we sent and release it
* This function will be called in defered context in IPA wq.
*/
static void apps_rmnet_ctl_tx_complete_notify(void *priv,
enum ipa_dp_evt_type evt, unsigned long data)
{
struct sk_buff *skb = (struct sk_buff *)data;
unsigned long flags;
if (evt != IPA_WRITE_DONE) {
IPAERR("unsupported evt on Tx callback, Drop the packet\n");
spin_lock_irqsave(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
rmnet_ctl_ipa3_ctx->stats.tx_pkt_dropped++;
rmnet_ctl_ipa3_ctx->stats.tx_byte_dropped +=
skb->len;
spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock,
flags);
kfree_skb(skb);
return;
}
atomic_dec(&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts);
if (atomic_read(
&rmnet_ctl_ipa3_ctx->stats.outstanding_pkts) == 0)
ipa_pm_deferred_deactivate(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
kfree_skb(skb);
}
/**
* apps_rmnet_ctl_receive_notify() - Rmnet_ctl RX notify
*
* @priv: driver context
* @evt: event type
* @data: data provided with event
*
* IPA will pass a packet to the Linux network stack with skb->data
*/
static void apps_rmnet_ctl_receive_notify(void *priv,
enum ipa_dp_evt_type evt, unsigned long data)
{
void *rx_notify_cb_rx_data;
struct sk_buff *low_lat_data;
int len;
low_lat_data = (struct sk_buff *)data;
if (low_lat_data == NULL) {
IPAERR("Rx packet is invalid");
return;
}
len = low_lat_data->len;
if (evt == IPA_RECEIVE) {
IPADBG_LOW("Rx packet was received");
rx_notify_cb_rx_data = (void *)data;
if (rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb) {
(*(rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb))(
rmnet_ctl_ipa3_ctx->cb_info.rx_notify_cb_user_data,
rx_notify_cb_rx_data);
} else
goto fail;
rmnet_ctl_ipa3_ctx->stats.rx_pkt_rcvd++;
rmnet_ctl_ipa3_ctx->stats.rx_byte_rcvd +=
len;
} else {
IPAERR("Invalid evt %d received in rmnet_ctl\n", evt);
goto fail;
}
return;
fail:
kfree_skb(low_lat_data);
rmnet_ctl_ipa3_ctx->stats.rx_pkt_dropped++;
}
static int ipa3_rmnet_ctl_register_pm_client(void)
{
int result;
struct ipa_pm_register_params pm_reg;
memset(&pm_reg, 0, sizeof(pm_reg));
pm_reg.name = "rmnet_ctl";
pm_reg.group = IPA_PM_GROUP_APPS;
result = ipa_pm_register(&pm_reg, &rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
if (result) {
IPAERR("failed to create IPA PM client %d\n", result);
return result;
}
IPAERR("%s register done\n", pm_reg.name);
return 0;
}
static void ipa3_rmnet_ctl_deregister_pm_client(void)
{
ipa_pm_deactivate_sync(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
ipa_pm_deregister(rmnet_ctl_ipa3_ctx->rmnet_ctl_pm_hdl);
}

View File

@@ -51,6 +51,13 @@
#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
#define IPA_UPSTEAM_WLAN1_IFACE_NAME "wlan1"
enum ipa_ap_ingress_ep_enum {
IPA_AP_INGRESS_NONE = 0,
IPA_AP_INGRESS_EP_DEFAULT = 1 << 0,
IPA_AP_INGRESS_EP_COALS = 1 << 1,
IPA_AP_INGRESS_EP_LOW_LAT = 1 << 2,
};
#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
#define INVALID_MUX_ID 0xFF
@@ -1395,7 +1402,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
if (result) {
pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
__func__, __LINE__);
__func__, __LINE__);
dev->stats.rx_dropped++;
}
dev->stats.rx_packets++;
@@ -1406,23 +1413,52 @@ static void apps_ipa_packet_receive_notify(void *priv,
}
/* Send RSC endpoint info to modem using QMI indication message */
static int ipa_send_rsc_pipe_ind_to_modem(void)
static int ipa_send_wan_pipe_ind_to_modem(int ingress_eps_mask)
{
struct ipa_endp_desc_indication_msg_v01 req;
struct ipa_ep_id_type_v01 *ep_info;
if (ingress_eps_mask == IPA_AP_INGRESS_NONE)
return 0;
memset(&req, 0, sizeof(struct ipa_endp_desc_indication_msg_v01));
req.ep_info_len = 1;
req.ep_info_valid = true;
req.num_eps_valid = true;
req.num_eps = 1;
ep_info = &req.ep_info[req.ep_info_len - 1];
ep_info->ep_id = rmnet_ipa3_ctx->ipa3_to_apps_hdl;
ep_info->ic_type = DATA_IC_TYPE_AP_V01;
ep_info->ep_type = DATA_EP_DESC_TYPE_RSC_PROD_V01;
ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
return ipa3_qmi_send_rsc_pipe_indication(&req);
if (ingress_eps_mask & IPA_AP_INGRESS_EP_COALS) {
req.ep_info_len++;
req.ep_info_valid = true;
req.num_eps_valid = true;
req.num_eps++;
ep_info = &req.ep_info[req.ep_info_len - 1];
ep_info->ep_id = rmnet_ipa3_ctx->ipa3_to_apps_hdl;
ep_info->ic_type = DATA_IC_TYPE_AP_V01;
ep_info->ep_type = DATA_EP_DESC_TYPE_RSC_PROD_V01;
ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
}
if (ingress_eps_mask & IPA_AP_INGRESS_EP_LOW_LAT) {
req.ep_info_len++;
req.ep_info_valid = true;
req.num_eps_valid = true;
req.num_eps++;
ep_info = &req.ep_info[req.ep_info_len - 1];
ep_info->ep_id = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_WAN_LOW_LAT_CONS);
ep_info->ic_type = DATA_IC_TYPE_AP_V01;
ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_PROD_V01;
ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
req.ep_info_len++;
req.num_eps++;
ep_info = &req.ep_info[req.ep_info_len - 1];
ep_info->ep_id = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_WAN_LOW_LAT_PROD);
ep_info->ic_type = DATA_IC_TYPE_AP_V01;
ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_CONS_V01;
ep_info->ep_status = DATA_EP_STATUS_CONNECTED_V01;
}
if (req.num_eps > 0)
return ipa3_qmi_send_wan_pipe_indication(&req);
else
return 0;
}
static int handle3_ingress_format(struct net_device *dev,
@@ -1431,6 +1467,7 @@ static int handle3_ingress_format(struct net_device *dev,
int ret = 0;
struct ipa_sys_connect_params *ipa_wan_ep_cfg;
int ep_idx;
int ingress_eps_mask = IPA_AP_INGRESS_NONE;
IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
@@ -1490,10 +1527,13 @@ static int handle3_ingress_format(struct net_device *dev,
ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS;
ingress_eps_mask |= IPA_AP_INGRESS_EP_DEFAULT;
if (dev->features & NETIF_F_GRO_HW)
if (dev->features & NETIF_F_GRO_HW) {
/* Setup coalescing pipes */
ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_COAL_CONS;
ingress_eps_mask |= IPA_AP_INGRESS_EP_COALS;
}
ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify;
ipa_wan_ep_cfg->priv = dev;
@@ -1511,12 +1551,24 @@ static int handle3_ingress_format(struct net_device *dev,
return -EFAULT;
}
ret = ipa_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
&rmnet_ipa3_ctx->ipa3_to_apps_hdl);
&rmnet_ipa3_ctx->ipa3_to_apps_hdl);
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
if (ret)
if (ret) {
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
goto end;
}
IPAWANDBG("ingress WAN pipe setup successfully\n");
ret = ipa3_setup_apps_low_lat_cons_pipe();
if (ret)
goto low_lat_fail;
ingress_eps_mask |= IPA_AP_INGRESS_EP_LOW_LAT;
IPAWANDBG("ingress low latency pipe setup successfully\n");
low_lat_fail:
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
/* construct default WAN RT tbl for IPACM */
ret = ipa3_setup_a7_qmap_hdr();
if (ret)
@@ -1526,9 +1578,11 @@ static int handle3_ingress_format(struct net_device *dev,
if (ret)
ipa3_del_a7_qmap_hdr();
/* Sending QMI indication message share RSC pipe details*/
if (dev->features & NETIF_F_GRO_HW)
ipa_send_rsc_pipe_ind_to_modem();
/* notify rmnet_ctl pipes are ready to ues */
ipa3_rmnet_ctl_ready_notifier();
/* Sending QMI indication message share RSC/QMAP pipe details*/
ipa_send_wan_pipe_ind_to_modem(ingress_eps_mask);
end:
if (ret)
IPAWANERR("failed to configure ingress\n");
@@ -1630,12 +1684,21 @@ static int handle3_egress_format(struct net_device *dev,
rc = ipa_setup_sys_pipe(
ipa_wan_ep_cfg, &rmnet_ipa3_ctx->apps_to_ipa3_hdl);
if (rc) {
IPAWANERR("failed to config egress endpoint\n");
IPAWANERR("failed to setup egress endpoint\n");
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
return rc;
}
IPAWANDBG("engress WAN pipe setup successfully\n");
rc = ipa3_setup_apps_low_lat_prod_pipe();
if (rc) {
IPAWANERR("failed to setup egress low lat endpoint\n");
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
goto low_lat_fail;
}
IPAWANDBG("engress low lat pipe setup successfully\n");
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
low_lat_fail:
if (rmnet_ipa3_ctx->num_q6_rules != 0) {
/* already got Q6 UL filter rules*/
if (!ipa3_qmi_ctx->modem_cfg_emb_pipe_flt) {
@@ -1997,6 +2060,18 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ext_ioctl_data.u.offload_params.mux_id,
tcp_en || udp_en, tcp_en, udp_en);
break;
/* vote ipa clock on/off */
case RMNET_IOCTL_SET_SLEEP_STATE:
if (ext_ioctl_data.u.data) {
/* Request to enable LPM */
IPAWANDBG("ioctl: unvote IPA clock\n");
IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NETMGR");
} else {
/* Request to disable LPM */
IPAWANDBG("ioctl: vote IPA clock\n");
IPA_ACTIVE_CLIENTS_INC_SPECIAL("NETMGR");
}
break;
default:
IPAWANERR("[%s] unsupported extended cmd[%d]",
dev->name,
@@ -2313,9 +2388,12 @@ static int ipa3_wwan_register_netdev_pm_client(struct net_device *dev)
pm_reg.group = IPA_PM_GROUP_APPS;
result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->pm_hdl);
if (result) {
IPAERR("failed to create IPA PM client %d\n", result);
return result;
IPAWANERR("failed to create IPA PM client %d\n", result);
return result;
}
IPAWANERR("%s register done\n", pm_reg.name);
return 0;
}
@@ -2519,6 +2597,9 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
IPAWANINFO("rmnet_ipa started deinitialization\n");
mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard);
ret = ipa3_teardown_apps_low_lat_pipes();
if (ret < 0)
IPAWANERR("Failed to teardown IPA->APPS qmap pipe\n");
ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
if (ret < 0)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");