rmnet_core: support low latency channel switch

Add support to switch bearers between default and lower latency
channels via QMAP commands.

Change-Id: I6662f59c713e8e3ab7409f50871bec11d9908c67
Acked-by: Weiyi Chen <weiyic@qti.qualcomm.com>
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Цей коміт міститься в:
Subash Abhinov Kasiviswanathan
2020-08-27 19:47:17 -07:00
джерело b38dff7d79
коміт b8552944d5
15 змінених файлів з 873 додано та 216 видалено

Переглянути файл

@@ -26,7 +26,9 @@ rmnet_core-y += \
qmi_rmnet.o \
wda_qmi.o \
dfc_qmi.o \
dfc_qmap.o
dfc_qmap.o \
rmnet_qmap.o \
rmnet_ll_qmap.o
rmnet_ctl-y := \
rmnet_ctl_client.o \

Переглянути файл

@@ -321,6 +321,34 @@ TRACE_EVENT(dfc_watchdog,
__entry->mux_id, __entry->bearer_id, __entry->event)
);
TRACE_EVENT(dfc_ll_switch,
TP_PROTO(const char *cmd, u8 type, u8 num_bearer, void *bearers),
TP_ARGS(cmd, type, num_bearer, bearers),
TP_STRUCT__entry(
__string(cmd_str, cmd)
__field(u8, type)
__field(u8, num_bearer)
__dynamic_array(u8, bearers, num_bearer)
),
TP_fast_assign(
__assign_str(cmd_str, cmd)
__entry->type = type;
__entry->num_bearer = num_bearer;
memcpy(__get_dynamic_array(bearers), bearers, num_bearer);
),
TP_printk("%s type=%u num_bearer=%u bearers={%s}",
__get_str(cmd_str),
__entry->type,
__entry->num_bearer,
__print_array(__get_dynamic_array(bearers),
__entry->num_bearer, 1))
);
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */

Переглянути файл

@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
*/
#include <net/pkt_sched.h>
#include <linux/module.h>
#include "rmnet_ctl.h"
#include "rmnet_qmap.h"
#include "dfc_defs.h"
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
@@ -13,39 +13,6 @@
#define QMAP_DFC_VER 1
#define QMAP_CMD_DONE -1
#define QMAP_CMD_REQUEST 0
#define QMAP_CMD_ACK 1
#define QMAP_CMD_UNSUPPORTED 2
#define QMAP_CMD_INVALID 3
#define QMAP_DFC_CONFIG 10
#define QMAP_DFC_IND 11
#define QMAP_DFC_QUERY 12
#define QMAP_DFC_END_MARKER 13
struct qmap_hdr {
u8 cd_pad;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
struct qmap_cmd_hdr {
u8 pad_len:6;
u8 reserved_bit:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
u8 cmd_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
__be32 tx_id;
} __aligned(1);
struct qmap_dfc_config {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
@@ -125,43 +92,12 @@ struct qmap_dfc_end_marker_cnf {
static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;
static bool dfc_config_acked;
static struct rmnet_ctl_client_if *rmnet_ctl;
static void dfc_qmap_send_config(struct dfc_qmi_data *data);
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
u8 bearer_id, u16 seq, u32 tx_id);
static void dfc_qmap_send_cmd(struct sk_buff *skb)
{
trace_dfc_qmap(skb->data, skb->len, false);
if (unlikely(!rmnet_ctl || !rmnet_ctl->send) ||
rmnet_ctl->send(rmnet_ctl_handle, skb)) {
pr_err("Failed to send to rmnet ctl\n");
kfree_skb(skb);
}
}
static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
cmd = (struct qmap_cmd_hdr *)skb->data;
skb->protocol = htons(ETH_P_MAP);
skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
if (likely(rmnet_ctl && rmnet_ctl->log))
rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0,
skb->data, skb->len);
trace_dfc_qmap(skb->data, skb->len, false);
dev_queue_xmit(skb);
}
struct rmnet_bearer_map *bearer,
u16 seq, u32 tx_id);
static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
@@ -254,33 +190,40 @@ static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
return QMAP_CMD_DONE;
}
static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
u8 bearer_id, u16 seq_num, u32 tx_id)
static int dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
u8 bearer_id, u16 seq_num, u32 tx_id)
{
struct net_device *dev;
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
int rc = QMAP_CMD_ACK;
dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
if (!dev)
return;
return rc;
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
if (!qos)
return;
return rc;
spin_lock_bh(&qos->qos_lock);
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
if (!bearer) {
spin_unlock_bh(&qos->qos_lock);
return rc;
}
if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
if (bearer->last_seq == seq_num && bearer->grant_size) {
bearer->ack_req = 1;
bearer->ack_txid = tx_id;
} else {
dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
dfc_qmap_send_end_marker_cnf(qos, bearer, seq_num, tx_id);
}
spin_unlock_bh(&qos->qos_lock);
return QMAP_CMD_DONE;
}
static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
@@ -293,47 +236,32 @@ static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
cmd = (struct qmap_dfc_end_marker_req *)skb->data;
dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
return QMAP_CMD_DONE;
return dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
ntohs(cmd->seq_num),
ntohl(cmd->hdr.tx_id));
}
static void dfc_qmap_cmd_handler(struct sk_buff *skb)
int dfc_qmap_cmd_handler(struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
struct dfc_qmi_data *dfc;
int rc = QMAP_CMD_DONE;
if (!skb)
return;
trace_dfc_qmap(skb->data, skb->len, true);
if (skb->len < sizeof(struct qmap_cmd_hdr))
goto free_skb;
cmd = (struct qmap_cmd_hdr *)skb->data;
if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
goto free_skb;
if (cmd->cmd_name == QMAP_DFC_QUERY) {
if (cmd->cmd_type != QMAP_CMD_ACK)
goto free_skb;
return rc;
} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
if (cmd->cmd_type == QMAP_CMD_ACK &&
cmd->cmd_name == QMAP_DFC_CONFIG)
dfc_config_acked = true;
goto free_skb;
return rc;
}
rcu_read_lock();
dfc = rcu_dereference(qmap_dfc_data);
if (!dfc || READ_ONCE(dfc->restart_state)) {
rcu_read_unlock();
goto free_skb;
}
if (!dfc || READ_ONCE(dfc->restart_state))
return rc;
/* Re-send DFC config once if needed */
if (unlikely(!dfc_config_acked)) {
@@ -356,25 +284,11 @@ static void dfc_qmap_cmd_handler(struct sk_buff *skb)
break;
default:
rc = QMAP_CMD_UNSUPPORTED;
if (cmd->cmd_type == QMAP_CMD_REQUEST)
rc = QMAP_CMD_UNSUPPORTED;
}
/* Send ack */
if (rc != QMAP_CMD_DONE) {
cmd->cmd_type = rc;
if (cmd->cmd_name == QMAP_DFC_IND)
dfc_qmap_send_inband_ack(dfc, skb);
else
dfc_qmap_send_cmd(skb);
rcu_read_unlock();
return;
}
rcu_read_unlock();
free_skb:
kfree_skb(skb);
return rc;
}
static void dfc_qmap_send_config(struct dfc_qmi_data *data)
@@ -396,7 +310,7 @@ static void dfc_qmap_send_config(struct dfc_qmi_data *data)
dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
dfc_config->hdr.tx_id = htonl(rmnet_qmap_next_txid());
dfc_config->cmd_ver = QMAP_DFC_VER;
dfc_config->cmd_id = QMAP_DFC_IND;
@@ -404,7 +318,7 @@ static void dfc_qmap_send_config(struct dfc_qmi_data *data)
dfc_config->ep_type = htonl(data->svc.ep_type);
dfc_config->iface_id = htonl(data->svc.iface_id);
dfc_qmap_send_cmd(skb);
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
@@ -426,16 +340,17 @@ static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
dfc_query->hdr.tx_id = htonl(rmnet_qmap_next_txid());
dfc_query->cmd_ver = QMAP_DFC_VER;
dfc_query->bearer_id = bearer_id;
dfc_qmap_send_cmd(skb);
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
u8 bearer_id, u16 seq, u32 tx_id)
struct rmnet_bearer_map *bearer,
u16 seq, u32 tx_id)
{
struct sk_buff *skb;
struct qmap_dfc_end_marker_cnf *em_cnf;
@@ -456,18 +371,17 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
em_cnf->hdr.tx_id = htonl(tx_id);
em_cnf->cmd_ver = QMAP_DFC_VER;
em_cnf->bearer_id = bearer_id;
em_cnf->bearer_id = bearer->bearer_id;
em_cnf->seq_num = htons(seq);
skb->protocol = htons(ETH_P_MAP);
skb->dev = qos->real_dev;
/* This cmd needs to be sent in-band */
if (likely(rmnet_ctl && rmnet_ctl->log))
rmnet_ctl->log(RMNET_CTL_LOG_INFO, "TXI", 0,
skb->data, skb->len);
trace_dfc_qmap(skb->data, skb->len, false);
rmnet_map_tx_qmap_cmd(skb);
/* This cmd needs to be sent in-band after data on the currnet
* channel. But due to IPA bug, it cannot be sent over LLC so send
* it over QMAP channel if current channel is LLC.
*/
if (bearer->ch_switch.current_ch == RMNET_CH_DEFAULT)
rmnet_qmap_send(skb, bearer->ch_switch.current_ch, true);
else
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
@@ -477,17 +391,13 @@ void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
if (type == DFC_ACK_TYPE_DISABLE) {
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
if (bearer)
dfc_qmap_send_end_marker_cnf(qos, bearer_id,
dfc_qmap_send_end_marker_cnf(qos, bearer,
seq, bearer->ack_txid);
} else if (type == DFC_ACK_TYPE_THRESHOLD) {
dfc_qmap_send_query(qos->mux_id, bearer_id);
}
}
static struct rmnet_ctl_client_hooks cb = {
.ctl_dl_client_hook = dfc_qmap_cmd_handler,
};
int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi)
{
@@ -512,19 +422,7 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
qmi->dfc_clients[index] = (void *)data;
rcu_assign_pointer(qmap_dfc_data, data);
atomic_set(&qmap_txid, 0);
rmnet_ctl = rmnet_ctl_if();
if (!rmnet_ctl) {
pr_err("rmnet_ctl module not loaded\n");
goto out;
}
if (rmnet_ctl->reg)
rmnet_ctl_handle = rmnet_ctl->reg(&cb);
if (!rmnet_ctl_handle)
pr_err("Failed to register with rmnet ctl\n");
rmnet_qmap_init(port);
trace_dfc_client_state_up(data->index, data->svc.instance,
data->svc.ep_type, data->svc.iface_id);
@@ -534,7 +432,6 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
dfc_config_acked = false;
dfc_qmap_send_config(data);
out:
return 0;
}
@@ -549,16 +446,13 @@ void dfc_qmap_client_exit(void *dfc_data)
trace_dfc_client_state_down(data->index, 0);
if (rmnet_ctl && rmnet_ctl->dereg)
rmnet_ctl->dereg(rmnet_ctl_handle);
rmnet_ctl_handle = NULL;
rmnet_qmap_exit();
WRITE_ONCE(data->restart_state, 1);
RCU_INIT_POINTER(qmap_dfc_data, NULL);
synchronize_rcu();
kfree(data);
rmnet_ctl = NULL;
pr_info("DFC QMAP exit\n");
}

Переглянути файл

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -912,7 +912,7 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
qos->tran_num++;
rmnet_map_tx_qmap_cmd(skb);
rmnet_map_tx_qmap_cmd(skb, RMNET_CH_DEFAULT, true);
}
int dfc_bearer_flow_ctl(struct net_device *dev,

Переглянути файл

@@ -314,6 +314,8 @@ static void qmi_rmnet_bearer_clean(struct qos_info *qos)
if (qos->removed_bearer) {
qos->removed_bearer->watchdog_quit = true;
del_timer_sync(&qos->removed_bearer->watchdog);
qos->removed_bearer->ch_switch.timer_quit = true;
del_timer_sync(&qos->removed_bearer->ch_switch.guard_timer);
kfree(qos->removed_bearer);
qos->removed_bearer = NULL;
}
@@ -340,6 +342,8 @@ static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
bearer->ack_mq_idx = INVALID_MQ;
bearer->qos = qos_info;
timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
timer_setup(&bearer->ch_switch.guard_timer,
rmnet_ll_guard_fn, 0);
list_add(&bearer->list, &qos_info->bearer_head);
}
@@ -361,6 +365,7 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
continue;
mq->bearer = NULL;
mq->is_ll_ch = false;
if (reset) {
qmi_rmnet_reset_txq(dev, i);
qmi_rmnet_flow_control(dev, i, 1);
@@ -393,6 +398,7 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
mq = &qos_info->mq[itm->mq_idx];
if (!mq->bearer) {
mq->bearer = bearer;
mq->is_ll_ch = bearer->ch_switch.current_ch;
if (dfc_mode == DFC_MODE_SA) {
bearer->mq_idx = itm->mq_idx;
@@ -705,24 +711,25 @@ qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
__qmi_rmnet_delete_client(port, qmi, idx);
}
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len)
int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len)
{
struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
void *wda_data = NULL;
int rc = 0;
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
return rc;
qmi_rmnet_add_flow(dev, tcm, qmi);
break;
case NLMSG_FLOW_DEACTIVATE:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
return;
return rc;
qmi_rmnet_del_flow(dev, tcm, qmi);
break;
@@ -732,7 +739,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
if (!DFC_SUPPORTED_MODE(dfc_mode) &&
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
return;
return rc;
if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
/* retrieve qmi again as it could have been changed */
@@ -744,7 +751,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
kfree(qmi);
}
return;
return rc;
}
if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
@@ -754,7 +761,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
break;
case NLMSG_CLIENT_DELETE:
if (!qmi)
return;
return rc;
if (tcm->tcm_handle == 0) { /* instance 0 */
rmnet_clear_powersave_format(port);
if (qmi->wda_client)
@@ -768,7 +775,7 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
break;
case NLMSG_SCALE_FACTOR:
if (!tcm->tcm_ifindex)
return;
return rc;
qmi_rmnet_scale_factor = tcm->tcm_ifindex;
break;
case NLMSG_WQ_FREQUENCY:
@@ -777,12 +784,16 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
case NLMSG_CHANNEL_SWITCH:
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
return rc;
rc = rmnet_ll_switch(dev, tcm, attr_len);
break;
default:
pr_debug("%s(): No handler\n", __func__);
break;
}
return rc;
}
EXPORT_SYMBOL(qmi_rmnet_change_link);
@@ -880,32 +891,19 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
#ifdef CONFIG_QTI_QMI_DFC
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev, int ip_type,
u32 mark)
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
struct sk_buff *skb)
{
struct qos_info *qos = rmnet_get_qos_pt(dev);
struct rmnet_bearer_map *bearer;
struct rmnet_flow_map *itm;
bool ret = false;
int txq = skb->queue_mapping;
if (!qos)
goto out;
if (txq > ACK_MQ_OFFSET)
txq -= ACK_MQ_OFFSET;
spin_lock_bh(&qos->qos_lock);
itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
if (!itm)
goto out_unlock;
if (unlikely(!qos || txq >= MAX_MQ_NUM))
return false;
bearer = itm->bearer;
if (!bearer)
goto out_unlock;
ret = bearer->is_low_latency;
out_unlock:
spin_unlock_bh(&qos->qos_lock);
out:
return ret;
return qos->mq[txq].is_ll_ch;
}
EXPORT_SYMBOL(qmi_rmnet_flow_is_low_latency);
@@ -1059,6 +1057,8 @@ void qmi_rmnet_qos_exit_pre(void *qos)
list_for_each_entry(bearer, &qosi->bearer_head, list) {
bearer->watchdog_quit = true;
del_timer_sync(&bearer->watchdog);
bearer->ch_switch.timer_quit = true;
del_timer_sync(&bearer->ch_switch.guard_timer);
}
list_add(&qosi->list, &qos_cleanup_list);

Переглянути файл

@@ -29,8 +29,8 @@ struct qmi_rmnet_ps_ind {
#ifdef CONFIG_QTI_QMI_RMNET
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len);
int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len);
void qmi_rmnet_enable_all_flows(struct net_device *dev);
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
#else
@@ -38,10 +38,11 @@ static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
{
}
static inline void
static inline int
qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
int attr_len)
{
return 0;
}
static inline void
@@ -61,8 +62,8 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev,
struct net_device *vnd_dev, u8 mux_id);
void qmi_rmnet_qos_exit_pre(void *qos);
void qmi_rmnet_qos_exit_post(void);
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev, int ip_type,
u32 mark);
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
struct sk_buff *skb);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@@ -83,7 +84,7 @@ static inline void qmi_rmnet_qos_exit_post(void)
}
static inline bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
int ip_type, u32 mark)
struct sk_buff *skb)
{
return false;
}

Переглянути файл

@@ -17,6 +17,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <uapi/linux/rtnetlink.h>
#include <linux/soc/qcom/qmi.h>
#define MAX_MQ_NUM 16
#define MAX_CLIENT_NUM 2
@@ -39,6 +41,34 @@ extern int dfc_qmap;
struct qos_info;
enum {
RMNET_CH_DEFAULT,
RMNET_CH_LL,
RMNET_CH_MAX,
RMNET_CH_CTL = 0xFF
};
enum rmnet_ch_switch_state {
CH_SWITCH_NONE,
CH_SWITCH_STARTED,
CH_SWITCH_ACKED,
CH_SWITCH_FAILED_RETRY
};
struct rmnet_ch_switch {
u8 current_ch;
u8 switch_to_ch;
u8 retry_left;
u8 status_code;
enum rmnet_ch_switch_state state;
__be32 switch_txid;
u32 flags;
bool timer_quit;
struct timer_list guard_timer;
u32 nl_pid;
u32 nl_seq;
};
struct rmnet_bearer_map {
struct list_head list;
u8 bearer_id;
@@ -51,7 +81,6 @@ struct rmnet_bearer_map {
u16 last_seq;
u32 bytes_in_flight;
u32 last_adjusted_grant;
bool is_low_latency;
bool tcp_bidir;
bool rat_switch;
bool tx_off;
@@ -63,6 +92,7 @@ struct rmnet_bearer_map {
bool watchdog_started;
bool watchdog_quit;
u32 watchdog_expire_cnt;
struct rmnet_ch_switch ch_switch;
};
struct rmnet_flow_map {
@@ -82,6 +112,7 @@ struct svc_info {
struct mq_map {
struct rmnet_bearer_map *bearer;
bool is_ll_ch;
};
struct qos_info {
@@ -170,6 +201,9 @@ void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer);
void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer);
int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen);
void rmnet_ll_guard_fn(struct timer_list *t);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -217,6 +251,12 @@ static inline void dfc_qmap_client_exit(void *dfc_data)
static inline void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
{
}
static int rmnet_ll_switch(struct net_device *dev,
struct tcmsg *tcm, int attrlen)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE

Переглянути файл

@@ -388,6 +388,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
int rc = 0;
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
@@ -422,7 +423,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct tcmsg *tcm;
tcm = nla_data(qos);
qmi_rmnet_change_link(dev, port, tcm, nla_len(qos));
rc = qmi_rmnet_change_link(dev, port, tcm, nla_len(qos));
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
@@ -435,7 +436,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
agg_params->agg_time);
}
return 0;
return rc;
}
static size_t rmnet_get_size(const struct net_device *dev)

471
core/rmnet_ll_qmap.c Звичайний файл
Переглянути файл

@@ -0,0 +1,471 @@
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netlink.h>
#include <uapi/linux/rtnetlink.h>
#include <linux/net.h>
#include <net/sock.h>
#include "dfc.h"
#include "rmnet_qmi.h"
#include "rmnet_qmap.h"
#include "qmi_rmnet_i.h"
#define QMAP_LL_VER 1
#define QMAP_LL_MAX_BEARER 15
#define QMAP_SWITCH_TO_LL 1
#define QMAP_SWITCH_TO_DEFAULT 2
#define QMAP_SWITCH_QUERY 3
/* Switch status from modem */
#define SWITCH_STATUS_ERROR 0
#define SWITCH_STATUS_SUCCESS 1
#define SWITCH_STATUS_DEFAULT 2
#define SWITCH_STATUS_LL 3
#define SWITCH_STATUS_FAIL_TEMP 4
#define SWITCH_STATUS_FAIL_PERM 5
/* Internal switch status */
#define SWITCH_STATUS_NONE 0xFF
#define SWITCH_STATUS_TIMEOUT 0xFE
#define SWITCH_STATUS_NO_EFFECT 0xFD
#define LL_MASK_NL_ACK 1
#define LL_MASK_AUTO_RETRY 2
#define LL_TIMEOUT (5 * HZ)
#define LL_RETRY_TIME (10 * HZ)
#define LL_MAX_RETRY (3)
struct qmap_ll_bearer {
u8 bearer_id;
u8 status;
u8 reserved[2];
} __aligned(1);
struct qmap_ll_switch {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved;
u8 request_type;
u8 num_bearers;
struct qmap_ll_bearer bearer[0];
} __aligned(1);
struct qmap_ll_switch_resp {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved[2];
u8 num_bearers;
struct qmap_ll_bearer bearer[0];
} __aligned(1);
struct qmap_ll_switch_status {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved[2];
u8 num_bearers;
struct qmap_ll_bearer bearer[0];
} __aligned(1);
static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct nlmsgerr *errmsg;
unsigned int flags = NLM_F_CAPPED;
if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
return;
skb = nlmsg_new(sizeof(*errmsg), GFP_ATOMIC);
if (!skb)
return;
nlh = __nlmsg_put(skb, bearer->ch_switch.nl_pid,
bearer->ch_switch.nl_seq, NLMSG_ERROR,
sizeof(*errmsg), flags);
errmsg = nlmsg_data(nlh);
errmsg->error = 0;
errmsg->msg.nlmsg_type = bearer->bearer_id;
errmsg->msg.nlmsg_flags = bearer->ch_switch.status_code;
errmsg->msg.nlmsg_seq = bearer->ch_switch.current_ch;
nlmsg_end(skb, nlh);
rtnl_unicast(skb, &init_net, bearer->ch_switch.nl_pid);
}
static void ll_qmap_maybe_set_ch(struct qos_info *qos,
struct rmnet_bearer_map *bearer, u8 status)
{
u8 ch;
if (status == SWITCH_STATUS_DEFAULT)
ch = RMNET_CH_DEFAULT;
else if (status == SWITCH_STATUS_LL)
ch = RMNET_CH_LL;
else
return;
bearer->ch_switch.current_ch = ch;
if (bearer->mq_idx < MAX_MQ_NUM)
qos->mq[bearer->mq_idx].is_ll_ch = ch;
}
static void ll_switch_complete(struct rmnet_bearer_map *bearer, u8 status)
{
bearer->ch_switch.status_code = status;
if (status == SWITCH_STATUS_FAIL_TEMP &&
bearer->ch_switch.retry_left) {
/* Temp failure retry */
bearer->ch_switch.state = CH_SWITCH_FAILED_RETRY;
mod_timer(&bearer->ch_switch.guard_timer,
jiffies + LL_RETRY_TIME);
bearer->ch_switch.retry_left--;
} else {
/* Success or permanent failure */
bearer->ch_switch.timer_quit = true;
del_timer(&bearer->ch_switch.guard_timer);
bearer->ch_switch.state = CH_SWITCH_NONE;
bearer->ch_switch.retry_left = 0;
ll_send_nl_ack(bearer);
bearer->ch_switch.flags = 0;
}
}
static int ll_qmap_handle_switch_resp(struct sk_buff *skb)
{
struct qmap_ll_switch_resp *cmd;
struct rmnet_bearer_map *bearer;
struct qos_info *qos;
struct net_device *dev;
int i;
if (skb->len < sizeof(struct qmap_ll_switch_resp))
return QMAP_CMD_DONE;
cmd = (struct qmap_ll_switch_resp *)skb->data;
if (!cmd->num_bearers)
return QMAP_CMD_DONE;
if (skb->len < sizeof(*cmd) +
cmd->num_bearers * sizeof(struct qmap_ll_bearer))
return QMAP_CMD_DONE;
dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
if (!dev)
return QMAP_CMD_DONE;
qos = rmnet_get_qos_pt(dev);
if (!qos)
return QMAP_CMD_DONE;
trace_dfc_ll_switch("ACK", 0, cmd->num_bearers, cmd->bearer);
spin_lock_bh(&qos->qos_lock);
for (i = 0; i < cmd->num_bearers; i++) {
bearer = qmi_rmnet_get_bearer_map(qos,
cmd->bearer[i].bearer_id);
if (!bearer)
continue;
ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
if (bearer->ch_switch.state == CH_SWITCH_STARTED &&
bearer->ch_switch.switch_txid == cmd->hdr.tx_id) {
/* This is an ACK to the switch request */
if (cmd->bearer[i].status == SWITCH_STATUS_SUCCESS)
bearer->ch_switch.state = CH_SWITCH_ACKED;
else
ll_switch_complete(bearer,
cmd->bearer[i].status);
}
}
spin_unlock_bh(&qos->qos_lock);
return QMAP_CMD_DONE;
}
static int ll_qmap_handle_switch_status(struct sk_buff *skb)
{
struct qmap_ll_switch_status *cmd;
struct rmnet_bearer_map *bearer;
struct qos_info *qos;
struct net_device *dev;
int i;
if (skb->len < sizeof(struct qmap_ll_switch_status))
return QMAP_CMD_INVALID;
cmd = (struct qmap_ll_switch_status *)skb->data;
if (!cmd->num_bearers)
return QMAP_CMD_ACK;
if (skb->len < sizeof(*cmd) +
cmd->num_bearers * sizeof(struct qmap_ll_bearer))
return QMAP_CMD_INVALID;
dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
if (!dev)
return QMAP_CMD_ACK;
qos = rmnet_get_qos_pt(dev);
if (!qos)
return QMAP_CMD_ACK;
trace_dfc_ll_switch("STS", 0, cmd->num_bearers, cmd->bearer);
spin_lock_bh(&qos->qos_lock);
for (i = 0; i < cmd->num_bearers; i++) {
bearer = qmi_rmnet_get_bearer_map(qos,
cmd->bearer[i].bearer_id);
if (!bearer)
continue;
ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
if (bearer->ch_switch.state == CH_SWITCH_ACKED)
ll_switch_complete(bearer, cmd->bearer[i].status);
}
spin_unlock_bh(&qos->qos_lock);
return QMAP_CMD_ACK;
}
int ll_qmap_cmd_handler(struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
int rc = QMAP_CMD_DONE;
cmd = (struct qmap_cmd_hdr *)skb->data;
if (cmd->cmd_name == QMAP_LL_SWITCH) {
if (cmd->cmd_type != QMAP_CMD_ACK)
return rc;
} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
return rc;
}
switch (cmd->cmd_name) {
case QMAP_LL_SWITCH:
rc = ll_qmap_handle_switch_resp(skb);
break;
case QMAP_LL_SWITCH_STATUS:
rc = ll_qmap_handle_switch_status(skb);
break;
default:
if (cmd->cmd_type == QMAP_CMD_REQUEST)
rc = QMAP_CMD_UNSUPPORTED;
}
return rc;
}
static int ll_qmap_send_switch(u8 mux_id, u8 channel, u8 num_bearers,
u8 *bearer_list, __be32 *txid)
{
struct sk_buff *skb;
struct qmap_ll_switch *ll_switch;
unsigned int len;
int i;
if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER || !bearer_list)
return -EINVAL;
len = sizeof(struct qmap_ll_switch) +
num_bearers * sizeof(struct qmap_ll_bearer);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb->protocol = htons(ETH_P_MAP);
ll_switch = skb_put(skb, len);
memset(ll_switch, 0, len);
ll_switch->hdr.cd_bit = 1;
ll_switch->hdr.mux_id = mux_id;
ll_switch->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
ll_switch->hdr.cmd_name = QMAP_LL_SWITCH;
ll_switch->hdr.cmd_type = QMAP_CMD_REQUEST;
ll_switch->hdr.tx_id = htonl(rmnet_qmap_next_txid());
ll_switch->cmd_ver = QMAP_LL_VER;
if (channel == RMNET_CH_CTL)
ll_switch->request_type = QMAP_SWITCH_QUERY;
else if (channel == RMNET_CH_LL)
ll_switch->request_type = QMAP_SWITCH_TO_LL;
else
ll_switch->request_type = QMAP_SWITCH_TO_DEFAULT;
ll_switch->num_bearers = num_bearers;
for (i = 0; i < num_bearers; i++)
ll_switch->bearer[i].bearer_id = bearer_list[i];
if (txid)
*txid = ll_switch->hdr.tx_id;
trace_dfc_ll_switch("REQ", ll_switch->request_type,
ll_switch->num_bearers, ll_switch->bearer);
return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
/*
* Start channel switch. The switch request is sent only if all bearers
* are eligible to switch. Return 0 if switch request is sent.
*/
int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen)
{
u8 switch_to_ch;
u8 num_bearers;
u8 *bearer_list;
u32 flags;
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
__be32 txid;
int i;
int j;
int rc = -EINVAL;
if (!dev || !tcm)
return -EINVAL;
/*
* tcm__pad1: switch type (ch #, 0xFF query)
* tcm__pad2: num bearers
* tcm_info: flags
* tcm_ifindex: netlink fd
* tcm_handle: pid
* tcm_parent: seq
*/
switch_to_ch = tcm->tcm__pad1;
num_bearers = tcm->tcm__pad2;
flags = tcm->tcm_info;
if (switch_to_ch != RMNET_CH_CTL && switch_to_ch >= RMNET_CH_MAX)
return -EOPNOTSUPP;
if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER)
return -EINVAL;
if (attrlen - sizeof(*tcm) < num_bearers)
return -EINVAL;
bearer_list = (u8 *)tcm + sizeof(*tcm);
for (i = 0; i < num_bearers; i++)
for (j = 0; j < num_bearers; j++)
if (j != i && bearer_list[i] == bearer_list[j])
return -EINVAL;
qos = rmnet_get_qos_pt(dev);
if (!qos)
return -EINVAL;
spin_lock_bh(&qos->qos_lock);
/* Validate the bearer list */
for (i = 0; i < num_bearers; i++) {
bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
if (!bearer) {
rc = -EFAULT;
goto out;
}
if (bearer->ch_switch.state != CH_SWITCH_NONE) {
rc = -EBUSY;
goto out;
}
}
/* Send QMAP switch command */
rc = ll_qmap_send_switch(qos->mux_id, switch_to_ch,
num_bearers, bearer_list, &txid);
if (rc)
goto out;
/* Update state */
for (i = 0; i < num_bearers; i++) {
bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
if (!bearer)
continue;
bearer->ch_switch.switch_to_ch = switch_to_ch;
bearer->ch_switch.switch_txid = txid;
bearer->ch_switch.state = CH_SWITCH_STARTED;
bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
bearer->ch_switch.retry_left =
(flags & LL_MASK_AUTO_RETRY) ? LL_MAX_RETRY : 0;
bearer->ch_switch.flags = flags;
bearer->ch_switch.timer_quit = false;
mod_timer(&bearer->ch_switch.guard_timer,
jiffies + LL_TIMEOUT);
bearer->ch_switch.nl_pid = tcm->tcm_handle;
bearer->ch_switch.nl_seq = tcm->tcm_parent;
}
out:
spin_unlock_bh(&qos->qos_lock);
return rc;
}
void rmnet_ll_guard_fn(struct timer_list *t)
{
struct rmnet_ch_switch *ch_switch;
struct rmnet_bearer_map *bearer;
int switch_status = SWITCH_STATUS_TIMEOUT;
__be32 txid;
int rc;
ch_switch = container_of(t, struct rmnet_ch_switch, guard_timer);
bearer = container_of(ch_switch, struct rmnet_bearer_map, ch_switch);
spin_lock_bh(&bearer->qos->qos_lock);
if (bearer->ch_switch.timer_quit ||
bearer->ch_switch.state == CH_SWITCH_NONE)
goto out;
if (bearer->ch_switch.state == CH_SWITCH_FAILED_RETRY) {
if (bearer->ch_switch.current_ch ==
bearer->ch_switch.switch_to_ch) {
switch_status = SWITCH_STATUS_NO_EFFECT;
goto send_err;
}
rc = ll_qmap_send_switch(bearer->qos->mux_id,
bearer->ch_switch.switch_to_ch,
1,
&bearer->bearer_id,
&txid);
if (!rc) {
bearer->ch_switch.switch_txid = txid;
bearer->ch_switch.state = CH_SWITCH_STARTED;
bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
goto out;
}
}
send_err:
bearer->ch_switch.state = CH_SWITCH_NONE;
bearer->ch_switch.status_code = switch_status;
bearer->ch_switch.retry_left = 0;
ll_send_nl_ack(bearer);
bearer->ch_switch.flags = 0;
out:
spin_unlock_bh(&bearer->qos->qos_lock);
}

Переглянути файл

@@ -292,6 +292,7 @@ int rmnet_map_dl_ind_register(struct rmnet_port *port,
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind);
void rmnet_map_cmd_exit(struct rmnet_port *port);
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state,
unsigned long flags);
int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,

Переглянути файл

@@ -1627,38 +1627,41 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
spin_unlock_irqrestore(&port->agg_lock, flags);
}
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
{
struct rmnet_aggregation_state *state;
struct rmnet_port *port;
struct sk_buff *agg_skb;
unsigned long flags;
unsigned int i;
if (unlikely(ch >= RMNET_MAX_AGG_STATE))
ch = RMNET_DEFAULT_AGG_STATE;
port = rmnet_get_port(qmap_skb->dev);
state = &port->agg_state[ch];
if (port && (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
if (!flush)
goto send;
for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
struct rmnet_aggregation_state *state = &port->agg_state[i];
if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
goto send;
spin_lock_irqsave(&port->agg_lock, flags);
if (state->agg_skb) {
agg_skb = state->agg_skb;
state->agg_skb = NULL;
state->agg_count = 0;
memset(&state->agg_time, 0, sizeof(state->agg_time));
state->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&state->hrtimer);
state->send_agg_skb(agg_skb);
} else {
spin_unlock_irqrestore(&port->agg_lock, flags);
}
spin_lock_irqsave(&port->agg_lock, flags);
if (state->agg_skb) {
agg_skb = state->agg_skb;
state->agg_skb = NULL;
state->agg_count = 0;
memset(&state->agg_time, 0, sizeof(state->agg_time));
state->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&state->hrtimer);
state->send_agg_skb(agg_skb);
} else {
spin_unlock_irqrestore(&port->agg_lock, flags);
}
send:
dev_queue_xmit(qmap_skb);
state->send_agg_skb(qmap_skb);
}
EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);

152
core/rmnet_qmap.c Звичайний файл
Переглянути файл

@@ -0,0 +1,152 @@
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "dfc.h"
#include "rmnet_qmi.h"
#include "rmnet_ctl.h"
#include "rmnet_qmap.h"
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;
static void *rmnet_port;
static struct net_device *real_data_dev;
static struct rmnet_ctl_client_if *rmnet_ctl;
int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush)
{
trace_dfc_qmap(skb->data, skb->len, false);
if (ch != RMNET_CH_CTL && real_data_dev) {
skb->protocol = htons(ETH_P_MAP);
skb->dev = real_data_dev;
rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0, skb->data,
skb->len);
rmnet_map_tx_qmap_cmd(skb, ch, flush);
return 0;
}
if (rmnet_ctl->send(rmnet_ctl_handle, skb)) {
pr_err("Failed to send to rmnet ctl\n");
kfree_skb(skb);
return -ECOMM;
}
return 0;
}
static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
int rc = QMAP_CMD_DONE;
if (!skb)
return;
trace_dfc_qmap(skb->data, skb->len, true);
if (skb->len < sizeof(struct qmap_cmd_hdr))
goto free_skb;
cmd = (struct qmap_cmd_hdr *)skb->data;
if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
goto free_skb;
rcu_read_lock();
switch (cmd->cmd_name) {
case QMAP_DFC_CONFIG:
case QMAP_DFC_IND:
case QMAP_DFC_QUERY:
case QMAP_DFC_END_MARKER:
rc = dfc_qmap_cmd_handler(skb);
break;
case QMAP_LL_SWITCH:
case QMAP_LL_SWITCH_STATUS:
rc = ll_qmap_cmd_handler(skb);
break;
default:
if (cmd->cmd_type == QMAP_CMD_REQUEST)
rc = QMAP_CMD_UNSUPPORTED;
}
/* Send ack */
if (rc != QMAP_CMD_DONE) {
if (rc == QMAP_CMD_ACK_INBAND) {
cmd->cmd_type = QMAP_CMD_ACK;
rmnet_qmap_send(skb, RMNET_CH_DEFAULT, false);
} else {
cmd->cmd_type = rc;
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
}
rcu_read_unlock();
return;
}
rcu_read_unlock();
free_skb:
kfree_skb(skb);
}
static struct rmnet_ctl_client_hooks cb = {
.ctl_dl_client_hook = rmnet_qmap_cmd_handler,
};
int rmnet_qmap_next_txid(void)
{
return atomic_inc_return(&qmap_txid);
}
struct net_device *rmnet_qmap_get_dev(u8 mux_id)
{
return rmnet_get_rmnet_dev(rmnet_port, mux_id);
}
int rmnet_qmap_init(void *port)
{
if (rmnet_ctl_handle)
return 0;
atomic_set(&qmap_txid, 0);
rmnet_port = port;
real_data_dev = rmnet_get_real_dev(rmnet_port);
rmnet_ctl = rmnet_ctl_if();
if (!rmnet_ctl) {
pr_err("rmnet_ctl module not loaded\n");
return -EFAULT;
}
if (rmnet_ctl->reg)
rmnet_ctl_handle = rmnet_ctl->reg(&cb);
if (!rmnet_ctl_handle) {
pr_err("Failed to register with rmnet ctl\n");
return -EFAULT;
}
return 0;
}
void rmnet_qmap_exit(void)
{
if (rmnet_ctl && rmnet_ctl->dereg)
rmnet_ctl->dereg(rmnet_ctl_handle);
rmnet_ctl_handle = NULL;
real_data_dev = NULL;
rmnet_port = NULL;
}

64
core/rmnet_qmap.h Звичайний файл
Переглянути файл

@@ -0,0 +1,64 @@
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __RMNET_QMAP_H
#define __RMNET_QMAP_H
#include "qmi_rmnet_i.h"
#define QMAP_CMD_DONE -1
#define QMAP_CMD_ACK_INBAND -2
#define QMAP_CMD_REQUEST 0
#define QMAP_CMD_ACK 1
#define QMAP_CMD_UNSUPPORTED 2
#define QMAP_CMD_INVALID 3
struct qmap_hdr {
u8 cd_pad;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
struct qmap_cmd_hdr {
u8 pad_len:6;
u8 reserved_bit:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
u8 cmd_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
__be32 tx_id;
} __aligned(1);
int rmnet_qmap_init(void *port);
void rmnet_qmap_exit(void);
int rmnet_qmap_next_txid(void);
int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush);
struct net_device *rmnet_qmap_get_dev(u8 mux_id);
#define QMAP_DFC_CONFIG 10
#define QMAP_DFC_IND 11
#define QMAP_DFC_QUERY 12
#define QMAP_DFC_END_MARKER 13
int dfc_qmap_cmd_handler(struct sk_buff *skb);
#define QMAP_LL_SWITCH 25
#define QMAP_LL_SWITCH_STATUS 26
int ll_qmap_cmd_handler(struct sk_buff *skb);
#endif /* __RMNET_QMAP_H */

Переглянути файл

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,7 @@
#include <linux/skbuff.h>
#define CONFIG_QTI_QMI_RMNET 1
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
#ifdef CONFIG_QTI_QMI_RMNET
void *rmnet_get_qmi_pt(void *port);

Переглянути файл

@@ -88,7 +88,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
if (rmnet_perf_tether_egress) {
rmnet_perf_tether_egress(skb);
}
low_latency = qmi_rmnet_flow_is_low_latency(dev, ip_type, mark);
low_latency = qmi_rmnet_flow_is_low_latency(dev, skb);
rmnet_egress_handler(skb, low_latency);
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));