rmnet_core: support low latency channel switch

Add support to switch bearers between default and lower latency
channels via QMAP commands.

Change-Id: I6662f59c713e8e3ab7409f50871bec11d9908c67
Acked-by: Weiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
This commit is contained in:
Subash Abhinov Kasiviswanathan
2020-08-27 19:47:17 -07:00
والد b38dff7d79
کامیت b8552944d5
15فایلهای تغییر یافته به همراه873 افزوده شده و 216 حذف شده

مشاهده پرونده

@@ -314,6 +314,8 @@ static void qmi_rmnet_bearer_clean(struct qos_info *qos)
if (qos->removed_bearer) {
qos->removed_bearer->watchdog_quit = true;
del_timer_sync(&qos->removed_bearer->watchdog);
qos->removed_bearer->ch_switch.timer_quit = true;
del_timer_sync(&qos->removed_bearer->ch_switch.guard_timer);
kfree(qos->removed_bearer);
qos->removed_bearer = NULL;
}
@@ -340,6 +342,8 @@ static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
bearer->ack_mq_idx = INVALID_MQ;
bearer->qos = qos_info;
timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
timer_setup(&bearer->ch_switch.guard_timer,
rmnet_ll_guard_fn, 0);
list_add(&bearer->list, &qos_info->bearer_head);
}
@@ -361,6 +365,7 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
continue;
mq->bearer = NULL;
mq->is_ll_ch = false;
if (reset) {
qmi_rmnet_reset_txq(dev, i);
qmi_rmnet_flow_control(dev, i, 1);
@@ -393,6 +398,7 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
mq = &qos_info->mq[itm->mq_idx];
if (!mq->bearer) {
mq->bearer = bearer;
mq->is_ll_ch = bearer->ch_switch.current_ch;
if (dfc_mode == DFC_MODE_SA) {
bearer->mq_idx = itm->mq_idx;
@@ -705,24 +711,25 @@ qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
__qmi_rmnet_delete_client(port, qmi, idx);
}
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len)
int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len)
{
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
void *wda_data = NULL;
int rc = 0;
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
return rc;
qmi_rmnet_add_flow(dev, tcm, qmi);
break;
case NLMSG_FLOW_DEACTIVATE:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
return;
return rc;
qmi_rmnet_del_flow(dev, tcm, qmi);
break;
@@ -732,7 +739,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
if (!DFC_SUPPORTED_MODE(dfc_mode) &&
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
return;
return rc;
if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
/* retrieve qmi again as it could have been changed */
@@ -744,7 +751,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
kfree(qmi);
}
return;
return rc;
}
if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
@@ -754,7 +761,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
break;
case NLMSG_CLIENT_DELETE:
if (!qmi)
return;
return rc;
if (tcm->tcm_handle == 0) { /* instance 0 */
rmnet_clear_powersave_format(port);
if (qmi->wda_client)
@@ -768,7 +775,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
break;
case NLMSG_SCALE_FACTOR:
if (!tcm->tcm_ifindex)
return;
return rc;
qmi_rmnet_scale_factor = tcm->tcm_ifindex;
break;
case NLMSG_WQ_FREQUENCY:
@@ -777,12 +784,16 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
case NLMSG_CHANNEL_SWITCH:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
return rc;
rc = rmnet_ll_switch(dev, tcm, attr_len);
break;
default:
pr_debug("%s(): No handler\n", __func__);
break;
}
return rc;
}
EXPORT_SYMBOL(qmi_rmnet_change_link);
@@ -880,32 +891,19 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
#ifdef CONFIG_QTI_QMI_DFC
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev, int ip_type,
u32 mark)
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
struct sk_buff *skb)
{
struct qos_info *qos = rmnet_get_qos_pt(dev);
struct rmnet_bearer_map *bearer;
struct rmnet_flow_map *itm;
bool ret = false;
int txq = skb->queue_mapping;
if (!qos)
goto out;
if (txq > ACK_MQ_OFFSET)
txq -= ACK_MQ_OFFSET;
spin_lock_bh(&qos->qos_lock);
itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
if (!itm)
goto out_unlock;
if (unlikely(!qos || txq >= MAX_MQ_NUM))
return false;
bearer = itm->bearer;
if (!bearer)
goto out_unlock;
ret = bearer->is_low_latency;
out_unlock:
spin_unlock_bh(&qos->qos_lock);
out:
return ret;
return qos->mq[txq].is_ll_ch;
}
EXPORT_SYMBOL(qmi_rmnet_flow_is_low_latency);
@@ -1059,6 +1057,8 @@ void qmi_rmnet_qos_exit_pre(void *qos)
list_for_each_entry(bearer, &qosi->bearer_head, list) {
bearer->watchdog_quit = true;
del_timer_sync(&bearer->watchdog);
bearer->ch_switch.timer_quit = true;
del_timer_sync(&bearer->ch_switch.guard_timer);
}
list_add(&qosi->list, &qos_cleanup_list);