core: rmnet: Fastforward to 4.19 tip

This brings the RmNet and DFC modules up to date with the 4.19 tip as of
commit 9b38611ea527 ("rmnet: Reduce synchronize_rcu calls").

As part of this, the rmnet_ctl driver was also incorporated, using commit
4ceee3aafb7d ("rmnet_ctl: Add IPC logging and optimizations")

Change-Id: Ic45d46074c7401dfed408c769cfb6462dac0d4ee
Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
This commit is contained in:
Sean Tranchetti
2020-01-13 12:21:14 -07:00
parent 08d4972b2a
commit eeb4944964
26 changed files with 2433 additions and 600 deletions

View File

@@ -12,7 +12,18 @@ LOCAL_CLANG :=true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
LOCAL_MODULE := rmnet_core.ko
LOCAL_SRC_FILES := rmnet_config.c rmnet_descriptor.c rmnet_handlers.c rmnet_map_command.c rmnet_map_data.c rmnet_vnd.c dfc_qmi.c qmi_rmnet.c wda_qmi.c
LOCAL_SRC_FILES := \
rmnet_config.c \
rmnet_descriptor.c \
rmnet_genl.c \
rmnet_handlers.c \
rmnet_map_command.c \
rmnet_map_data.c \
rmnet_vnd.c \
dfc_qmap.c \
dfc_qmi.c \
qmi_rmnet.c \
wda_qmi.c
RMNET_BLD_DIR := ../../vendor/qcom/opensource/datarmnet/core
DLKM_DIR := $(TOP)/device/qcom/common/dlkm

View File

@@ -1,5 +1,6 @@
obj-m += rmnet_core.o
rmnet_core-y := rmnet_config.o rmnet_handlers.o rmnet_descriptor.o rmnet_map_command.o rmnet_map_data.o rmnet_vnd.o rmnet_descriptor.o\
qmi_rmnet.o wda_qmi.o dfc_qmi.o
rmnet_core-y := rmnet_config.o rmnet_handlers.o rmnet_descriptor.o \
rmnet_genl.o rmnet_map_command.o rmnet_map_data.o rmnet_vnd.o\
qmi_rmnet.o wda_qmi.o dfc_qmi.o dfc_qmap.o

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,35 +23,25 @@
TRACE_EVENT(dfc_qmi_tc,
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
int qlen, u32 tcm_handle, int enable),
TP_PROTO(const char *name, u32 txq, int enable),
TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
TP_ARGS(name, txq, enable),
TP_STRUCT__entry(
__string(dev_name, name)
__field(u8, bid)
__field(u32, fid)
__field(u32, grant)
__field(int, qlen)
__field(u32, tcm_handle)
__field(u32, txq)
__field(int, enable)
),
TP_fast_assign(
__assign_str(dev_name, name);
__entry->bid = bearer_id;
__entry->fid = flow_id;
__entry->grant = grant;
__entry->qlen = qlen;
__entry->tcm_handle = tcm_handle;
__entry->txq = txq;
__entry->enable = enable;
),
TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
TP_printk("dev=%s txq=%u %s",
__get_str(dev_name),
__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
__entry->tcm_handle,
__entry->txq,
__entry->enable ? "enable" : "disable")
);
@@ -93,14 +83,16 @@ TRACE_EVENT(dfc_flow_ind,
TRACE_EVENT(dfc_flow_check,
TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
TP_PROTO(const char *name, u8 bearer_id, unsigned int len,
u32 mark, u32 grant),
TP_ARGS(name, bearer_id, len, grant),
TP_ARGS(name, bearer_id, len, mark, grant),
TP_STRUCT__entry(
__string(dev_name, name)
__field(u8, bearer_id)
__field(unsigned int, len)
__field(u32, mark)
__field(u32, grant)
),
@@ -108,12 +100,13 @@ TRACE_EVENT(dfc_flow_check,
__assign_str(dev_name, name)
__entry->bearer_id = bearer_id;
__entry->len = len;
__entry->mark = mark;
__entry->grant = grant;
),
TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
__get_str(dev_name),
__entry->bearer_id, __entry->len, __entry->grant)
TP_printk("dev=%s bearer_id=%u skb_len=%u mark=%u current_grant=%u",
__get_str(dev_name), __entry->bearer_id,
__entry->len, __entry->mark, __entry->grant)
);
TRACE_EVENT(dfc_flow_info,
@@ -141,7 +134,7 @@ TRACE_EVENT(dfc_flow_info,
__entry->action = add;
),
TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d txq=%d",
__entry->action ? "add flow" : "delete flow",
__get_str(dev_name),
__entry->bid, __entry->fid, __entry->ip, __entry->handle)
@@ -167,8 +160,7 @@ TRACE_EVENT(dfc_client_state_up,
__entry->iface = iface;
),
TP_printk("Client[%d]: Connection established with DFC Service "
"instance=%u ep_type=%u iface_id=%u",
TP_printk("DFC Client[%d] connect: instance=%u ep_type=%u iface_id=%u",
__entry->idx, __entry->instance,
__entry->ep_type, __entry->iface)
);
@@ -189,8 +181,7 @@ TRACE_EVENT(dfc_client_state_down,
__entry->from_cb = from_cb;
),
TP_printk("Client[%d]: Connection with DFC service lost. "
"Exit by callback %d",
TP_printk("DFC Client[%d] exit: callback %d",
__entry->idx, __entry->from_cb)
);
@@ -248,6 +239,29 @@ TRACE_EVENT(dfc_tx_link_status_ind,
__entry->mid, __entry->bid)
);
TRACE_EVENT(dfc_qmap,
TP_PROTO(const void *data, size_t len, bool in),
TP_ARGS(data, len, in),
TP_STRUCT__entry(
__field(bool, in)
__field(size_t, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__entry->in = in;
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk("%s [%s]",
__entry->in ? "<--" : "-->",
__print_hex(__get_dynamic_array(data), __entry->len))
);
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */

View File

@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include <net/pkt_sched.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include <trace/events/dfc.h>
#include <soc/qcom/rmnet_ctl.h>
#include "rmnet_ctl.h"
#include "dfc_defs.h"
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
#include "dfc.h"
#define QMAP_DFC_VER 1
@@ -148,6 +148,7 @@ static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
skb->protocol = htons(ETH_P_MAP);
skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
rmnet_ctl_log_debug("TXI", skb->data, skb->len);
trace_dfc_qmap(skb->data, skb->len, false);
dev_queue_xmit(skb);
}
@@ -433,6 +434,7 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
skb->dev = qos->real_dev;
/* This cmd needs to be sent in-band */
rmnet_ctl_log_info("TXI", skb->data, skb->len);
trace_dfc_qmap(skb->data, skb->len, false);
rmnet_map_tx_qmap_cmd(skb);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,27 +11,14 @@
* GNU General Public License for more details.
*/
#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
#include <linux/soc/qcom/qmi.h>
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
#include "dfc_defs.h"
#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include "dfc.h"
#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
#define DFC_MAX_QOS_ID_V01 2
#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2
struct dfc_qmap_header {
u8 pad_len:6;
u8 reserved_bit:1;
@@ -56,20 +43,6 @@ struct dfc_ack_cmd {
u8 bearer_id;
} __aligned(1);
struct dfc_qmi_data {
void *rmnet_port;
struct workqueue_struct *dfc_wq;
struct work_struct svc_arrive;
struct qmi_handle handle;
struct sockaddr_qrtr ssctl;
struct svc_info svc;
struct work_struct qmi_ind_work;
struct list_head qmi_ind_q;
spinlock_t qmi_ind_lock;
int index;
int restart_state;
};
static void dfc_svc_init(struct work_struct *work);
/* **************************************************** */
@@ -115,28 +88,6 @@ struct dfc_indication_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
enum dfc_ip_type_enum_v01 {
DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
DFC_IPV4_TYPE_V01 = 0x4,
DFC_IPV6_TYPE_V01 = 0x6,
DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};
struct dfc_qos_id_type_v01 {
u32 qos_id;
enum dfc_ip_type_enum_v01 ip_type;
};
struct dfc_flow_status_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
u32 num_bytes;
u16 seq_num;
u8 qos_ids_len;
struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};
static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -250,13 +201,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
},
};
struct dfc_ancillary_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
u32 reserved;
};
static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
@@ -309,31 +253,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
},
};
struct dfc_flow_status_ind_msg_v01 {
u8 flow_status_valid;
u8 flow_status_len;
struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
u8 eod_ack_reqd_valid;
u8 eod_ack_reqd;
u8 ancillary_info_valid;
u8 ancillary_info_len;
struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};
struct dfc_bearer_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
enum dfc_ip_type_enum_v01 ip_type;
};
struct dfc_tx_link_status_ind_msg_v01 {
u8 tx_status;
u8 bearer_info_valid;
u8 bearer_info_len;
struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};
struct dfc_get_flow_status_req_msg_v01 {
u8 bearer_id_list_valid;
u8 bearer_id_list_len;
@@ -963,6 +882,11 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
if (!qos)
return;
if (dfc_qmap) {
dfc_qmap_send_ack(qos, bearer_id, seq, type);
return;
}
skb = alloc_skb(data_size, GFP_ATOMIC);
if (!skb)
return;
@@ -995,75 +919,45 @@ int dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
struct qos_info *qos)
{
struct rmnet_flow_map *itm;
int rc = 0, qlen;
int enable;
bool enable;
enable = bearer->grant_size ? 1 : 0;
enable = bearer->grant_size ? true : false;
list_for_each_entry(itm, &qos->flow_head, list) {
if (itm->bearer_id == bearer->bearer_id) {
/*
* Do not flow disable ancillary q if ancillary is true
*/
if (bearer->tcp_bidir && enable == 0 &&
DFC_IS_ANCILLARY(itm->ip_type))
continue;
qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
enable);
trace_dfc_qmi_tc(dev->name, itm->bearer_id,
itm->flow_id, bearer->grant_size,
qlen, itm->tcm_handle, enable);
rc++;
}
}
/* Do not flow disable tcp ack q in tcp bidir */
if (bearer->ack_mq_idx != INVALID_MQ &&
(enable || !bearer->tcp_bidir))
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, enable);
if (enable == 0 && bearer->ack_req)
if (!enable && bearer->ack_req)
dfc_send_ack(dev, bearer->bearer_id,
bearer->seq, qos->mux_id,
DFC_ACK_TYPE_DISABLE);
return rc;
return 0;
}
static int dfc_all_bearer_flow_ctl(struct net_device *dev,
struct qos_info *qos, u8 ack_req, u32 ancillary,
struct dfc_flow_status_info_type_v01 *fc_info)
{
struct rmnet_bearer_map *bearer_itm;
struct rmnet_flow_map *flow_itm;
int rc = 0, qlen;
bool enable;
struct rmnet_bearer_map *bearer;
enable = fc_info->num_bytes > 0 ? 1 : 0;
list_for_each_entry(bearer, &qos->bearer_head, list) {
bearer->grant_size = fc_info->num_bytes;
bearer->grant_thresh =
qmi_rmnet_grant_per(bearer->grant_size);
bearer->seq = fc_info->seq_num;
bearer->ack_req = ack_req;
bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
bearer->last_grant = fc_info->num_bytes;
bearer->last_seq = fc_info->seq_num;
list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
bearer_itm->grant_size = fc_info->num_bytes;
bearer_itm->grant_thresh =
qmi_rmnet_grant_per(bearer_itm->grant_size);
bearer_itm->seq = fc_info->seq_num;
bearer_itm->ack_req = ack_req;
bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
bearer_itm->last_grant = fc_info->num_bytes;
bearer_itm->last_seq = fc_info->seq_num;
dfc_bearer_flow_ctl(dev, bearer, qos);
}
list_for_each_entry(flow_itm, &qos->flow_head, list) {
qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
enable);
trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
flow_itm->flow_id, fc_info->num_bytes,
qlen, flow_itm->tcm_handle, enable);
rc++;
}
if (enable == 0 && ack_req)
dfc_send_ack(dev, fc_info->bearer_id,
fc_info->seq_num, fc_info->mux_id,
DFC_ACK_TYPE_DISABLE);
return rc;
return 0;
}
static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -1075,6 +969,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
bool action = false;
itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
if (!itm)
itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
if (itm) {
/* The RAT switch flag indicates the start and end of
* the switch. Ignore indications in between.
@@ -1093,6 +990,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
(itm->grant_size > 0 && fc_info->num_bytes == 0))
action = true;
/* This is needed by qmap */
if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
dfc_qmap_send_ack(qos, itm->bearer_id,
itm->seq, DFC_ACK_TYPE_DISABLE);
itm->grant_size = fc_info->num_bytes;
itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
itm->seq = fc_info->seq_num;
@@ -1103,17 +1005,14 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
if (action)
rc = dfc_bearer_flow_ctl(dev, itm, qos);
} else {
pr_debug("grant %u before flow activate", fc_info->num_bytes);
qos->default_grant = fc_info->num_bytes;
}
return rc;
}
static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
struct dfc_svc_ind *svc_ind)
void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
struct dfc_flow_status_ind_msg_v01 *ind)
{
struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
struct net_device *dev;
struct qos_info *qos;
struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1187,13 +1086,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
if (!itm)
return;
/* If no change in tx status, ignore */
if (itm->tx_off == !tx_status)
return;
if (itm->grant_size && !tx_status) {
itm->grant_size = 0;
itm->tcp_bidir = false;
dfc_bearer_flow_ctl(dev, itm, qos);
} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
itm->grant_size = DEFAULT_GRANT;
itm->grant_thresh = DEFAULT_GRANT;
itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
itm->seq = 0;
itm->ack_req = 0;
dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1202,10 +1105,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
itm->tx_off = !tx_status;
}
static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
struct dfc_svc_ind *svc_ind)
void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
struct dfc_tx_link_status_ind_msg_v01 *ind)
{
struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
struct net_device *dev;
struct qos_info *qos;
struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1267,10 +1169,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)
if (!dfc->restart_state) {
if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
dfc_do_burst_flow_control(dfc, svc_ind);
dfc_do_burst_flow_control(
dfc, &svc_ind->d.dfc_info);
else if (svc_ind->msg_id ==
QMI_DFC_TX_LINK_STATUS_IND_V01)
dfc_handle_tx_link_status_ind(dfc, svc_ind);
dfc_handle_tx_link_status_ind(
dfc, &svc_ind->d.tx_status);
}
kfree(svc_ind);
} while (1);
@@ -1518,22 +1422,28 @@ void dfc_qmi_client_exit(void *dfc_data)
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len)
{
struct rmnet_bearer_map *bearer;
struct rmnet_bearer_map *bearer = NULL;
struct rmnet_flow_map *itm;
u32 start_grant;
spin_lock_bh(&qos->qos_lock);
if (dfc_mode == DFC_MODE_MQ_NUM) {
/* Mark is mq num */
if (likely(mark < MAX_MQ_NUM))
bearer = qos->mq[mark].bearer;
} else {
/* Mark is flow_id */
itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
if (unlikely(!itm))
goto out;
if (likely(itm))
bearer = itm->bearer;
}
bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
if (unlikely(!bearer))
goto out;
trace_dfc_flow_check(dev->name, bearer->bearer_id,
len, bearer->grant_size);
len, mark, bearer->grant_size);
if (!bearer->grant_size)
goto out;
@@ -1588,7 +1498,7 @@ void dfc_qmi_query_flow(void *dfc_data)
svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
sizeof(resp->flow_status[0]) * resp->flow_status_len);
dfc_do_burst_flow_control(data, svc_ind);
dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);
done:
kfree(svc_ind);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,40 +15,56 @@
#include "qmi_rmnet_i.h"
#include "qmi_rmnet.h"
#include "rmnet_qmi.h"
#include "dfc.h"
#include <linux/rtnetlink.h>
#include <uapi/linux/rtnetlink.h>
#include <net/pkt_sched.h>
#define CREATE_TRACE_POINTS
#include "dfc.h"
#undef CREATE_TRACE_POINTS
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/alarmtimer.h>
#define NLMSG_FLOW_ACTIVATE 1
#define NLMSG_FLOW_DEACTIVATE 2
#define NLMSG_CLIENT_SETUP 4
#define NLMSG_CLIENT_DELETE 5
#define NLMSG_SCALE_FACTOR 6
#define NLMSG_WQ_FREQUENCY 7
#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
#define DFC_MODE_MULTIQ 2
#define FLAG_QMAP_MASK 0x0020
#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
#define DFC_SUPPORTED_MODE(m) \
((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM || \
(m) == DFC_MODE_SA)
#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
int dfc_mode;
int dfc_qmap;
#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
unsigned int rmnet_wq_frequency __read_mostly = 1000;
module_param(rmnet_wq_frequency, uint, 0644);
MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check in ms");
#define PS_WORK_ACTIVE_BIT 0
#define PS_INTERVAL (((!rmnet_wq_frequency) ? \
1 : rmnet_wq_frequency/10) * (HZ/100))
#define NO_DELAY (0x0000 * HZ)
#define PS_INTERVAL_KT (ms_to_ktime(1000))
#ifdef CONFIG_QTI_QMI_DFC
static unsigned int qmi_rmnet_scale_factor = 5;
static LIST_HEAD(qos_cleanup_list);
#endif
static int
qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
struct qmi_info *qmi);
struct qmi_elem_info data_ep_id_type_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
@@ -86,7 +102,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
int i;
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
if (!qmi)
return NULL;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -124,8 +140,7 @@ qmi_rmnet_has_pending(struct qmi_info *qmi)
#ifdef CONFIG_QTI_QMI_DFC
static void
qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
struct qos_info *qos)
qmi_rmnet_clean_flow_list(struct qos_info *qos)
{
struct rmnet_bearer_map *bearer, *br_tmp;
struct rmnet_flow_map *itm, *fl_tmp;
@@ -141,6 +156,8 @@ qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
list_del(&bearer->list);
kfree(bearer);
}
memset(qos->mq, 0, sizeof(qos->mq));
}
struct rmnet_flow_map *
@@ -179,17 +196,17 @@ static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
itm->bearer_id = new_map->bearer_id;
itm->flow_id = new_map->flow_id;
itm->ip_type = new_map->ip_type;
itm->tcm_handle = new_map->tcm_handle;
itm->mq_idx = new_map->mq_idx;
}
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
{
struct netdev_queue *q;
if (unlikely(tcm_handle >= dev->num_tx_queues))
if (unlikely(mq_idx >= dev->num_tx_queues))
return 0;
q = netdev_get_tx_queue(dev, tcm_handle);
q = netdev_get_tx_queue(dev, mq_idx);
if (unlikely(!q))
return 0;
@@ -198,6 +215,8 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
else
netif_tx_stop_queue(q);
trace_dfc_qmi_tc(dev->name, mq_idx, enable);
return 0;
}
@@ -216,37 +235,169 @@ static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
}
}
static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
struct qos_info *qos_info, u8 bearer_id)
{
struct rmnet_bearer_map *bearer;
bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
if (bearer) {
bearer->flow_ref++;
} else {
bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
if (!bearer)
return NULL;
bearer->bearer_id = bearer_id;
bearer->flow_ref = 1;
bearer->grant_size = DEFAULT_CALL_GRANT;
bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
bearer->mq_idx = INVALID_MQ;
bearer->ack_mq_idx = INVALID_MQ;
list_add(&bearer->list, &qos_info->bearer_head);
}
return bearer;
}
static void __qmi_rmnet_bearer_put(struct net_device *dev,
struct qos_info *qos_info,
struct rmnet_bearer_map *bearer,
bool reset)
{
struct mq_map *mq;
int i, j;
if (bearer && --bearer->flow_ref == 0) {
for (i = 0; i < MAX_MQ_NUM; i++) {
mq = &qos_info->mq[i];
if (mq->bearer != bearer)
continue;
mq->bearer = NULL;
if (reset) {
qmi_rmnet_reset_txq(dev, i);
qmi_rmnet_flow_control(dev, i, 1);
if (dfc_mode == DFC_MODE_SA) {
j = i + ACK_MQ_OFFSET;
qmi_rmnet_reset_txq(dev, j);
qmi_rmnet_flow_control(dev, j, 1);
}
}
}
/* Remove from bearer map */
list_del(&bearer->list);
kfree(bearer);
}
}
static void __qmi_rmnet_update_mq(struct net_device *dev,
struct qos_info *qos_info,
struct rmnet_bearer_map *bearer,
struct rmnet_flow_map *itm)
{
struct mq_map *mq;
/* In SA mode default mq is not associated with any bearer */
if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
return;
mq = &qos_info->mq[itm->mq_idx];
if (!mq->bearer) {
mq->bearer = bearer;
if (dfc_mode == DFC_MODE_SA) {
bearer->mq_idx = itm->mq_idx;
bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
} else {
if (IS_ANCILLARY(itm->ip_type))
bearer->ack_mq_idx = itm->mq_idx;
else
bearer->mq_idx = itm->mq_idx;
}
qmi_rmnet_flow_control(dev, itm->mq_idx,
bearer->grant_size > 0 ? 1 : 0);
if (dfc_mode == DFC_MODE_SA)
qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
bearer->grant_size > 0 ? 1 : 0);
}
}
static int __qmi_rmnet_rebind_flow(struct net_device *dev,
struct qos_info *qos_info,
struct rmnet_flow_map *itm,
struct rmnet_flow_map *new_map)
{
struct rmnet_bearer_map *bearer;
__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
if (!bearer)
return -ENOMEM;
qmi_rmnet_update_flow_map(itm, new_map);
itm->bearer = bearer;
__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
return 0;
}
static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
struct qmi_info *qmi)
{
struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
struct rmnet_flow_map new_map, *itm;
struct rmnet_bearer_map *bearer;
struct tcmsg tmp_tcm;
int rc = 0;
if (!qos_info)
if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
return -EINVAL;
ASSERT_RTNL();
/* flow activate
* tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
* tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle
* tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
*/
new_map.bearer_id = tcm->tcm__pad1;
new_map.flow_id = tcm->tcm_parent;
new_map.ip_type = tcm->tcm_ifindex;
new_map.tcm_handle = tcm->tcm_handle;
new_map.mq_idx = tcm->tcm_handle;
trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
new_map.ip_type, new_map.tcm_handle, 1);
new_map.ip_type, new_map.mq_idx, 1);
again:
spin_lock_bh(&qos_info->qos_lock);
itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
new_map.ip_type);
if (itm) {
qmi_rmnet_update_flow_map(itm, &new_map);
if (itm->bearer_id != new_map.bearer_id) {
rc = __qmi_rmnet_rebind_flow(
dev, qos_info, itm, &new_map);
goto done;
} else if (itm->mq_idx != new_map.mq_idx) {
tmp_tcm.tcm__pad1 = itm->bearer_id;
tmp_tcm.tcm_parent = itm->flow_id;
tmp_tcm.tcm_ifindex = itm->ip_type;
tmp_tcm.tcm_handle = itm->mq_idx;
spin_unlock_bh(&qos_info->qos_lock);
qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
goto again;
} else {
goto done;
}
}
/* Create flow map */
itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
if (!itm) {
spin_unlock_bh(&qos_info->qos_lock);
@@ -256,36 +407,20 @@ static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
qmi_rmnet_update_flow_map(itm, &new_map);
list_add(&itm->list, &qos_info->flow_head);
bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
if (bearer) {
bearer->flow_ref++;
} else {
bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
/* Create or update bearer map */
bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
if (!bearer) {
rc = -ENOMEM;
goto done;
}
itm->bearer = bearer;
__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
done:
spin_unlock_bh(&qos_info->qos_lock);
return -ENOMEM;
}
bearer->bearer_id = new_map.bearer_id;
bearer->flow_ref = 1;
bearer->grant_size = qos_info->default_grant;
bearer->grant_thresh =
qmi_rmnet_grant_per(bearer->grant_size);
qos_info->default_grant = DEFAULT_GRANT;
list_add(&bearer->list, &qos_info->bearer_head);
}
qmi_rmnet_flow_control(dev, itm->tcm_handle,
bearer->grant_size > 0 ? 1 : 0);
trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
bearer->grant_size, 0, itm->tcm_handle,
bearer->grant_size > 0 ? 1 : 0);
}
spin_unlock_bh(&qos_info->qos_lock);
return 0;
return rc;
}
static int
@@ -294,7 +429,6 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
{
struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
struct rmnet_flow_map new_map, *itm;
struct rmnet_bearer_map *bearer;
if (!qos_info)
return -EINVAL;
@@ -316,35 +450,19 @@ qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
if (itm) {
trace_dfc_flow_info(dev->name, new_map.bearer_id,
new_map.flow_id, new_map.ip_type,
itm->tcm_handle, 0);
itm->mq_idx, 0);
__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
/* Remove from flow map */
list_del(&itm->list);
/*clear bearer map*/
bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
if (bearer && --bearer->flow_ref == 0) {
list_del(&bearer->list);
kfree(bearer);
/* Purge pending packets for dedicated flow */
if (itm->flow_id)
qmi_rmnet_reset_txq(dev, itm->tcm_handle);
}
/* Enable flow to allow new flow setup */
qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
0, 0, itm->tcm_handle, 1);
kfree(itm);
}
if (list_empty(&qos_info->flow_head)) {
if (list_empty(&qos_info->flow_head))
netif_tx_wake_all_queues(dev);
trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
}
spin_unlock_bh(&qos_info->qos_lock);
return 0;
}
@@ -353,42 +471,24 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
int i;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
if (qmi->dfc_clients[i])
if (qmi->dfc_clients[i] && !dfc_qmap)
dfc_qmi_query_flow(qmi->dfc_clients[i]);
}
}
static int qmi_rmnet_set_scale_factor(const char *val,
const struct kernel_param *kp)
struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
u8 bearer_id)
{
int ret;
unsigned int num = 0;
struct rmnet_bearer_map *bearer;
ret = kstrtouint(val, 10, &num);
if (ret != 0 || num == 0)
return -EINVAL;
bearer = __qmi_rmnet_bearer_get(qos_info, bearer_id);
if (bearer)
bearer->flow_ref--;
return param_set_uint(val, kp);
return bearer;
}
static const struct kernel_param_ops qmi_rmnet_scale_ops = {
.set = qmi_rmnet_set_scale_factor,
.get = param_get_uint,
};
module_param_cb(qmi_rmnet_scale_factor, &qmi_rmnet_scale_ops,
&qmi_rmnet_scale_factor, 0664);
#else
static inline void
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
struct rmnet_flow_map *itm, int add_flow)
{
}
static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
{
}
static inline void
qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
struct rmnet_flow_map *new_map)
@@ -417,7 +517,7 @@ static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
static int
qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
{
int idx, rc, err = 0;
int idx, err = 0;
struct svc_info svc;
ASSERT_RTNL();
@@ -441,18 +541,17 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
svc.ep_type = tcm->tcm_info;
svc.iface_id = tcm->tcm_parent;
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
if (DFC_SUPPORTED_MODE(dfc_mode) &&
!qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
rc = dfc_qmi_client_init(port, idx, &svc, qmi);
if (rc < 0)
err = rc;
if (dfc_qmap)
err = dfc_qmap_client_init(port, idx, &svc, qmi);
else
err = dfc_qmi_client_init(port, idx, &svc, qmi);
}
if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
(idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
rc = wda_qmi_client_init(port, &svc, qmi);
if (rc < 0)
err = rc;
err = wda_qmi_client_init(port, &svc, qmi);
}
return err;
@@ -471,6 +570,9 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
data = qmi->dfc_pending[idx];
if (data) {
if (dfc_qmap)
dfc_qmap_client_exit(data);
else
dfc_qmi_client_exit(data);
qmi->dfc_clients[idx] = NULL;
qmi->dfc_pending[idx] = NULL;
@@ -518,20 +620,23 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
qmi_rmnet_add_flow(dev, tcm, qmi);
break;
case NLMSG_FLOW_DEACTIVATE:
if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
return;
qmi_rmnet_del_flow(dev, tcm, qmi);
break;
case NLMSG_CLIENT_SETUP:
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
if (!DFC_SUPPORTED_MODE(dfc_mode) &&
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
return;
@@ -558,6 +663,14 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
}
qmi_rmnet_delete_client(port, qmi, tcm);
break;
case NLMSG_SCALE_FACTOR:
if (!tcm->tcm_ifindex)
return;
qmi_rmnet_scale_factor = tcm->tcm_ifindex;
break;
case NLMSG_WQ_FREQUENCY:
rmnet_wq_frequency = tcm->tcm_ifindex;
break;
default:
pr_debug("%s(): No handler\n", __func__);
break;
@@ -613,7 +726,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
continue;
do_wake = !bearer->grant_size;
bearer->grant_size = DEFAULT_GRANT;
bearer->grant_thresh = DEFAULT_GRANT;
bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
bearer->seq = 0;
bearer->ack_req = 0;
bearer->tcp_bidir = false;
@@ -665,43 +778,93 @@ void qmi_rmnet_burst_fc_check(struct net_device *dev,
}
EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
static bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
{
unsigned int len = skb->len;
switch (skb->protocol) {
/* TCPv4 ACKs */
case htons(ETH_P_IP):
if ((ip_hdr(skb)->protocol == IPPROTO_TCP) &&
(ip_hdr(skb)->ihl == 5) &&
(len == 40 || len == 52) &&
((tcp_flag_word(tcp_hdr(skb)) &
cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
return true;
break;
/* TCPv6 ACKs */
case htons(ETH_P_IPV6):
if ((ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
(len == 60 || len == 72) &&
((tcp_flag_word(tcp_hdr(skb)) &
cpu_to_be32(0x00FF0000)) == TCP_FLAG_ACK))
return true;
break;
}
return false;
}
static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
{
struct rmnet_flow_map *itm;
int ip_type;
int txq = DEFAULT_MQ_NUM;
/* Put RS/NS in default mq */
if (skb->protocol == htons(ETH_P_IPV6) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
(icmp6_hdr(skb)->icmp6_type == 133 ||
icmp6_hdr(skb)->icmp6_type == 135)) {
return DEFAULT_MQ_NUM;
}
ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
spin_lock_bh(&qos->qos_lock);
itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
if (unlikely(!itm))
goto done;
/* Put the packet in the assigned mq except TCP ack */
if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
txq = itm->bearer->ack_mq_idx;
else
txq = itm->mq_idx;
done:
spin_unlock_bh(&qos->qos_lock);
return txq;
}
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
{
struct qos_info *qos = rmnet_get_qos_pt(dev);
int txq = 0, ip_type = AF_INET;
unsigned int len = skb->len;
struct rmnet_flow_map *itm;
u32 mark = skb->mark;
if (!qos)
return 0;
switch (skb->protocol) {
/* TCPv4 ACKs */
case htons(ETH_P_IP):
ip_type = AF_INET;
if ((!mark) &&
(ip_hdr(skb)->protocol == IPPROTO_TCP) &&
(len == 40 || len == 52) &&
(ip_hdr(skb)->ihl == 5) &&
((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
return 1;
break;
/* If mark is mq num return it */
if (dfc_mode == DFC_MODE_MQ_NUM)
return mark;
/* TCPv6 ACKs */
case htons(ETH_P_IPV6):
ip_type = AF_INET6;
if ((!mark) &&
(ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
(len == 60 || len == 72) &&
((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
return 1;
/* Fall through */
}
if (dfc_mode == DFC_MODE_SA)
return qmi_rmnet_get_queue_sa(qos, skb);
/* Default flows */
if (!mark)
if (!mark) {
if (qmi_rmnet_is_tcp_ack(skb))
return 1;
else
return 0;
}
ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
/* Dedicated flows */
spin_lock_bh(&qos->qos_lock);
@@ -710,7 +873,7 @@ int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
if (unlikely(!itm))
goto done;
txq = itm->tcm_handle;
txq = itm->mq_idx;
done:
spin_unlock_bh(&qos->qos_lock);
@@ -728,13 +891,12 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
{
struct qos_info *qos;
qos = kmalloc(sizeof(*qos), GFP_KERNEL);
qos = kzalloc(sizeof(*qos), GFP_KERNEL);
if (!qos)
return NULL;
qos->mux_id = mux_id;
qos->real_dev = real_dev;
qos->default_grant = DEFAULT_GRANT;
qos->tran_num = 0;
INIT_LIST_HEAD(&qos->flow_head);
INIT_LIST_HEAD(&qos->bearer_head);
@@ -744,28 +906,38 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
}
EXPORT_SYMBOL(qmi_rmnet_qos_init);
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
void qmi_rmnet_qos_exit_pre(void *qos)
{
void *port = rmnet_get_rmnet_port(dev);
struct qmi_info *qmi = rmnet_get_qmi_pt(port);
struct qos_info *qos_info = (struct qos_info *)qos;
if (!qmi || !qos)
if (!qos)
return;
qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
kfree(qos);
list_add(&((struct qos_info *)qos)->list, &qos_cleanup_list);
}
EXPORT_SYMBOL(qmi_rmnet_qos_exit);
EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
void qmi_rmnet_qos_exit_post(void)
{
struct qos_info *qos, *tmp;
synchronize_rcu();
list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
list_del(&qos->list);
qmi_rmnet_clean_flow_list(qos);
kfree(qos);
}
}
EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
#endif
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
static struct workqueue_struct *rmnet_ps_wq;
static struct rmnet_powersave_work *rmnet_work;
static bool rmnet_work_quit;
static LIST_HEAD(ps_list);
struct rmnet_powersave_work {
struct delayed_work work;
struct alarm atimer;
void *port;
u64 old_rx_pkts;
u64 old_tx_pkts;
@@ -775,7 +947,7 @@ void qmi_rmnet_ps_on_notify(void *port)
{
struct qmi_rmnet_ps_ind *tmp;
list_for_each_entry(tmp, &ps_list, list)
list_for_each_entry_rcu(tmp, &ps_list, list)
tmp->ps_on_handler(port);
}
EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
@@ -784,7 +956,7 @@ void qmi_rmnet_ps_off_notify(void *port)
{
struct qmi_rmnet_ps_ind *tmp;
list_for_each_entry(tmp, &ps_list, list)
list_for_each_entry_rcu(tmp, &ps_list, list)
tmp->ps_off_handler(port);
}
EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
@@ -811,7 +983,7 @@ int qmi_rmnet_ps_ind_deregister(void *port,
if (!port || !ps_ind)
return -EINVAL;
list_for_each_entry(tmp, &ps_list, list) {
list_for_each_entry_rcu(tmp, &ps_list, list) {
if (tmp == ps_ind) {
list_del_rcu(&ps_ind->list);
goto done;
@@ -844,9 +1016,20 @@ EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
static void qmi_rmnet_work_restart(void *port)
{
if (!rmnet_ps_wq || !rmnet_work)
return;
rcu_read_lock();
if (!rmnet_work_quit)
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
rcu_read_unlock();
}
static enum alarmtimer_restart qmi_rmnet_work_alarm(struct alarm *atimer,
ktime_t now)
{
struct rmnet_powersave_work *real_work;
real_work = container_of(atimer, struct rmnet_powersave_work, atimer);
qmi_rmnet_work_restart(real_work->port);
return ALARMTIMER_NORESTART;
}
static void qmi_rmnet_check_stats(struct work_struct *work)
@@ -856,6 +1039,7 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
u64 rxd, txd;
u64 rx, tx;
bool dl_msg_active;
bool use_alarm_timer = true;
real_work = container_of(to_delayed_work(work),
struct rmnet_powersave_work, work);
@@ -873,13 +1057,9 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
qmi->ps_ignore_grant = false;
/* Register to get QMI DFC and DL marker */
if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
/* If this failed need to retry quickly */
queue_delayed_work(rmnet_ps_wq,
&real_work->work, HZ / 50);
return;
if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0)
goto end;
}
qmi->ps_enabled = false;
/* Do a query when coming out of powersave */
@@ -905,15 +1085,15 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
* (likely in RLF), no need to enter powersave
*/
if (!dl_msg_active &&
!rmnet_all_flows_enabled(real_work->port))
!rmnet_all_flows_enabled(real_work->port)) {
use_alarm_timer = false;
goto end;
}
/* Deregister to suppress QMI DFC and DL marker */
if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
queue_delayed_work(rmnet_ps_wq,
&real_work->work, PS_INTERVAL);
return;
}
if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0)
goto end;
qmi->ps_enabled = true;
/* Ignore grant after going into powersave */
@@ -931,7 +1111,16 @@ static void qmi_rmnet_check_stats(struct work_struct *work)
return;
}
end:
queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
rcu_read_lock();
if (!rmnet_work_quit) {
if (use_alarm_timer)
alarm_start_relative(&real_work->atimer,
PS_INTERVAL_KT);
else
queue_delayed_work(rmnet_ps_wq, &real_work->work,
PS_INTERVAL);
}
rcu_read_unlock();
}
static void qmi_rmnet_work_set_active(void *port, int status)
@@ -956,17 +1145,22 @@ void qmi_rmnet_work_init(void *port)
rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC);
if (!rmnet_ps_wq)
return;
rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
if (!rmnet_work) {
destroy_workqueue(rmnet_ps_wq);
rmnet_ps_wq = NULL;
return;
}
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
rmnet_work->port = port;
rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
&rmnet_work->old_tx_pkts);
rmnet_work_quit = false;
qmi_rmnet_work_set_active(rmnet_work->port, 1);
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
}
@@ -989,6 +1183,11 @@ void qmi_rmnet_work_exit(void *port)
{
if (!rmnet_ps_wq || !rmnet_work)
return;
rmnet_work_quit = true;
synchronize_rcu();
alarm_cancel(&rmnet_work->atimer);
cancel_delayed_work_sync(&rmnet_work->work);
destroy_workqueue(rmnet_ps_wq);
qmi_rmnet_work_set_active(port, 0);
@@ -1027,6 +1226,4 @@ bool qmi_rmnet_ignore_grant(void *port)
return qmi->ps_ignore_grant;
}
EXPORT_SYMBOL(qmi_rmnet_ignore_grant);
#endif
MODULE_LICENSE("GPL v2");

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,8 +21,8 @@
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
struct qmi_rmnet_ps_ind {
void (*ps_on_handler)(void *);
void (*ps_off_handler)(void *);
void (*ps_on_handler)(void *port);
void (*ps_off_handler)(void *port);
struct list_head list;
};
@@ -56,7 +56,8 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
#ifdef CONFIG_QTI_QMI_DFC
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
void qmi_rmnet_qos_exit_pre(void *qos);
void qmi_rmnet_qos_exit_post(void);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@@ -67,7 +68,11 @@ qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
return NULL;
}
static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
static inline void qmi_rmnet_qos_exit_pre(void *qos)
{
}
static inline void qmi_rmnet_qos_exit_post(void)
{
}
@@ -122,23 +127,23 @@ static inline bool qmi_rmnet_ignore_grant(void *port)
return false;
}
static inline int qmi_rmnet_ps_ind_register(void *port,
static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
struct qmi_rmnet_ps_ind *ps_ind)
{
return 0;
}
static inline int qmi_rmnet_ps_ind_deregister(void *port,
static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
struct qmi_rmnet_ps_ind *ps_ind)
{
return 0;
}
static inline void qmi_rmnet_ps_off_notify(void *port)
static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
{
}
static inline void qmi_rmnet_ps_on_notify(void *port)
static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
{
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,25 +17,26 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#define IP_VER_4 4
#define IP_VER_6 6
#define MAX_MQ_NUM 16
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
#define DEFAULT_GRANT 1
#define DEFAULT_CALL_GRANT 20480
#define DFC_MAX_BEARERS_V01 16
#define DEFAULT_MQ_NUM 0
#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
#define INVALID_MQ 0xFF
#define DFC_MODE_FLOW_ID 2
#define DFC_MODE_MQ_NUM 3
#define DFC_MODE_SA 4
#define CONFIG_QTI_QMI_RMNET 1
#define CONFIG_QTI_QMI_DFC 1
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
struct rmnet_flow_map {
struct list_head list;
u8 bearer_id;
u32 flow_id;
int ip_type;
u32 tcm_handle;
};
extern int dfc_mode;
extern int dfc_qmap;
struct rmnet_bearer_map {
struct list_head list;
@@ -50,6 +51,18 @@ struct rmnet_bearer_map {
bool tcp_bidir;
bool rat_switch;
bool tx_off;
u32 ack_txid;
u32 mq_idx;
u32 ack_mq_idx;
};
struct rmnet_flow_map {
struct list_head list;
u8 bearer_id;
u32 flow_id;
int ip_type;
u32 mq_idx;
struct rmnet_bearer_map *bearer;
};
struct svc_info {
@@ -58,21 +71,21 @@ struct svc_info {
u32 iface_id;
};
struct mq_map {
struct rmnet_bearer_map *bearer;
};
struct qos_info {
struct list_head list;
u8 mux_id;
struct net_device *real_dev;
struct list_head flow_head;
struct list_head bearer_head;
u32 default_grant;
struct mq_map mq[MAX_MQ_NUM];
u32 tran_num;
spinlock_t qos_lock;
};
struct flow_info {
struct net_device *dev;
struct rmnet_flow_map *itm;
};
struct qmi_info {
int flag;
void *wda_client;
@@ -123,13 +136,23 @@ void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable);
void dfc_qmi_query_flow(void *dfc_data);
int dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
struct qos_info *qos);
int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi);
void dfc_qmap_client_exit(void *dfc_data);
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
u8 bearer_id);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -155,17 +178,6 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
{
}
static inline void
dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len)
{
}
static inline void
dfc_qmi_query_flow(void *dfc_data)
{
}
static inline int
dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
@@ -173,6 +185,17 @@ dfc_bearer_flow_ctl(struct net_device *dev,
{
return 0;
}
static inline int
dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi)
{
return -EINVAL;
}
static inline void dfc_qmap_client_exit(void *dfc_data)
{
}
#endif
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
#include "rmnet_genl.h"
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
#define CONFIG_QTI_QMI_RMNET 1
@@ -90,6 +91,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
if (port->nr_rmnet_devs)
return -EINVAL;
netdev_rx_handler_unregister(real_dev);
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
@@ -97,8 +100,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
netdev_rx_handler_unregister(real_dev);
/* release reference on real_dev */
dev_put(real_dev);
@@ -173,11 +174,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u32 data_format;
int err = 0;
u16 mux_id;
@@ -212,10 +213,9 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
flags = nla_data(data[IFLA_RMNET_FLAGS]);
data_format = flags->flags & flags->mask;
}
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
port->data_format = data_format;
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
void *agg_params;
@@ -266,9 +266,11 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
if (!port->nr_rmnet_devs)
qmi_rmnet_qmi_exit(port->qmi_info, port);
rmnet_unregister_real_device(real_dev, port);
unregister_netdevice(dev);
unregister_netdevice_queue(dev, head);
qmi_rmnet_qos_exit_post();
rmnet_unregister_real_device(real_dev, port);
}
static void rmnet_force_unassociate_device(struct net_device *dev)
@@ -279,6 +281,7 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
HLIST_HEAD(cleanup_list);
if (!rmnet_is_real_dev_registered(real_dev))
return;
@@ -295,12 +298,23 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
hlist_add_head(&ep->hlnode, &cleanup_list);
}
synchronize_rcu();
hlist_for_each_entry_safe(ep, tmp_ep, &cleanup_list, hlnode) {
hlist_del(&ep->hlnode);
kfree(ep);
}
/* Unregistering devices in context before freeing port.
* If this API becomes non-context their order should switch.
*/
unregister_netdevice_many(&list);
qmi_rmnet_qos_exit_post();
rmnet_unregister_real_device(real_dev, port);
}
@@ -400,14 +414,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
void *agg_params;
unsigned long irq_flags;
struct rmnet_egress_agg_params *agg_params;
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
spin_lock_irqsave(&port->agg_lock, irq_flags);
memcpy(&port->egress_agg_params, agg_params,
sizeof(port->egress_agg_params));
spin_unlock_irqrestore(&port->agg_lock, irq_flags);
rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
agg_params->agg_count,
agg_params->agg_features,
agg_params->agg_time);
}
return 0;
@@ -564,9 +577,12 @@ EXPORT_SYMBOL(rmnet_get_qmi_pt);
void *rmnet_get_qos_pt(struct net_device *dev)
{
if (dev)
return rcu_dereference(
((struct rmnet_priv *)netdev_priv(dev))->qos_info);
struct rmnet_priv *priv;
if (dev) {
priv = netdev_priv(dev);
return rcu_dereference(priv->qos_info);
}
return NULL;
}
@@ -708,6 +724,26 @@ int rmnet_get_powersave_notif(void *port)
}
EXPORT_SYMBOL(rmnet_get_powersave_notif);
struct net_device *rmnet_get_real_dev(void *port)
{
if (port)
return ((struct rmnet_port *)port)->dev;
return NULL;
}
EXPORT_SYMBOL(rmnet_get_real_dev);
int rmnet_get_dlmarker_info(void *port)
{
if (!port)
return 0;
return ((struct rmnet_port *)port)->data_format &
(RMNET_INGRESS_FORMAT_DL_MARKER_V1 |
RMNET_INGRESS_FORMAT_DL_MARKER_V2);
}
EXPORT_SYMBOL(rmnet_get_dlmarker_info);
/* Startup/Shutdown */
static int __init rmnet_init(void)
@@ -724,6 +760,8 @@ static int __init rmnet_init(void)
unregister_netdevice_notifier(&rmnet_dev_notifier);
return rc;
}
rmnet_core_genl_init();
return rc;
}
@@ -731,6 +769,7 @@ static void __exit rmnet_exit(void)
{
unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
rmnet_core_genl_deinit();
}
module_init(rmnet_init)

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,11 @@ struct rmnet_endpoint {
struct hlist_node hlnode;
};
struct rmnet_agg_stats {
u64 ul_agg_reuse;
u64 ul_agg_alloc;
};
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
@@ -41,14 +46,22 @@ struct rmnet_port_priv_stats {
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
struct rmnet_agg_stats agg;
};
struct rmnet_egress_agg_params {
u16 agg_size;
u16 agg_count;
u8 agg_count;
u8 agg_features;
u32 agg_time;
};
struct rmnet_agg_page {
struct list_head list;
struct page *page;
};
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
@@ -73,12 +86,16 @@ struct rmnet_port {
struct timespec64 agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
u8 agg_size_order;
struct list_head agg_list;
struct rmnet_agg_page *agg_head;
void *qmi_info;
/* dl marker elements */
struct list_head dl_list;
struct rmnet_port_priv_stats stats;
int dl_marker_flush;
/* Descriptor pool */
spinlock_t desc_pool_lock;
@@ -137,6 +154,7 @@ struct rmnet_priv_stats {
u64 csum_sw;
u64 csum_hw;
struct rmnet_coal_stats coal;
u64 ul_prio;
};
struct rmnet_priv {

66
core/rmnet_ctl.h Normal file
View File

@@ -0,0 +1,66 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* RMNET_CTL header
*
*/
#ifndef _RMNET_CTL_H_
#define _RMNET_CTL_H_
#include <linux/skbuff.h>
enum rmnet_ctl_log_lvl {
RMNET_CTL_LOG_CRIT,
RMNET_CTL_LOG_ERR,
RMNET_CTL_LOG_INFO,
RMNET_CTL_LOG_DEBUG,
};
#define rmnet_ctl_log_err(msg, rc, data, len) \
rmnet_ctl_log(RMNET_CTL_LOG_ERR, msg, rc, data, len)
#define rmnet_ctl_log_info(msg, data, len) \
rmnet_ctl_log(RMNET_CTL_LOG_INFO, msg, 0, data, len)
#define rmnet_ctl_log_debug(msg, data, len) \
rmnet_ctl_log(RMNET_CTL_LOG_DEBUG, msg, 0, data, len)
struct rmnet_ctl_client_hooks {
void (*ctl_dl_client_hook)(struct sk_buff *skb);
};
#ifdef CONFIG_RMNET_CTL
void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
int rmnet_ctl_unregister_client(void *handle);
int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
int rc, const void *data, unsigned int len);
#else
static inline void *rmnet_ctl_register_client(
struct rmnet_ctl_client_hooks *hook)
{
return NULL;
}
static inline int rmnet_ctl_unregister_client(void *handle)
{
return -EINVAL;
}
static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
{
return -EINVAL;
}
static inline void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
int rc, const void *data, unsigned int len)
{
}
#endif /* CONFIG_RMNET_CTL */
#endif /* _RMNET_CTL_H_ */

173
core/rmnet_ctl_client.c Normal file
View File

@@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* RMNET_CTL client handlers
*
*/
#include <linux/debugfs.h>
#include <linux/ipc_logging.h>
#include "rmnet_ctl.h"
#include "rmnet_ctl_client.h"
#define RMNET_CTL_LOG_PAGE 10
#define RMNET_CTL_LOG_NAME "rmnet_ctl"
#define RMNET_CTL_LOG_LVL "ipc_log_lvl"
struct rmnet_ctl_client {
struct rmnet_ctl_client_hooks hooks;
};
struct rmnet_ctl_endpoint {
struct rmnet_ctl_dev __rcu *dev;
struct rmnet_ctl_client __rcu *client;
struct dentry *dbgfs_dir;
struct dentry *dbgfs_loglvl;
void *ipc_log;
};
#ifdef CONFIG_RMNET_CTL_DEBUG
static u8 ipc_log_lvl = RMNET_CTL_LOG_DEBUG;
#else
static u8 ipc_log_lvl = RMNET_CTL_LOG_ERR;
#endif
static DEFINE_SPINLOCK(client_lock);
static struct rmnet_ctl_endpoint ctl_ep;
void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
{
rcu_assign_pointer(ctl_ep.dev, dev);
if (dev) {
ctl_ep.dbgfs_dir = debugfs_create_dir(
RMNET_CTL_LOG_NAME, NULL);
if (!IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
ctl_ep.dbgfs_loglvl = debugfs_create_u8(
RMNET_CTL_LOG_LVL, 0644, ctl_ep.dbgfs_dir,
&ipc_log_lvl);
if (!ctl_ep.ipc_log)
ctl_ep.ipc_log = ipc_log_context_create(
RMNET_CTL_LOG_PAGE, RMNET_CTL_LOG_NAME, 0);
} else {
debugfs_remove_recursive(ctl_ep.dbgfs_dir);
}
}
void rmnet_ctl_endpoint_post(const void *data, size_t len)
{
struct rmnet_ctl_client *client;
struct sk_buff *skb;
if (unlikely(!data || !len))
return;
rmnet_ctl_log_info("RX", data, len);
rcu_read_lock();
client = rcu_dereference(ctl_ep.client);
if (client && client->hooks.ctl_dl_client_hook) {
skb = alloc_skb(len, GFP_ATOMIC);
if (skb) {
skb_put_data(skb, data, len);
skb->protocol = htons(ETH_P_MAP);
client->hooks.ctl_dl_client_hook(skb);
}
}
rcu_read_unlock();
}
void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
{
struct rmnet_ctl_client *client;
if (!hook)
return NULL;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return NULL;
client->hooks = *hook;
spin_lock(&client_lock);
/* Only support one client for now */
if (rcu_dereference(ctl_ep.client)) {
spin_unlock(&client_lock);
kfree(client);
return NULL;
}
rcu_assign_pointer(ctl_ep.client, client);
spin_unlock(&client_lock);
return client;
}
EXPORT_SYMBOL(rmnet_ctl_register_client);
int rmnet_ctl_unregister_client(void *handle)
{
struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
spin_lock(&client_lock);
if (rcu_dereference(ctl_ep.client) != client) {
spin_unlock(&client_lock);
return -EINVAL;
}
RCU_INIT_POINTER(ctl_ep.client, NULL);
spin_unlock(&client_lock);
synchronize_rcu();
kfree(client);
return 0;
}
EXPORT_SYMBOL(rmnet_ctl_unregister_client);
int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
{
struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
struct rmnet_ctl_dev *dev;
int rc = -EINVAL;
if (client != rcu_dereference(ctl_ep.client))
return rc;
rmnet_ctl_log_info("TX", skb->data, skb->len);
rcu_read_lock();
dev = rcu_dereference(ctl_ep.dev);
if (dev && dev->xmit)
rc = dev->xmit(dev, skb);
rcu_read_unlock();
if (rc)
rmnet_ctl_log_err("TXE", rc, skb->data, skb->len);
return rc;
}
EXPORT_SYMBOL(rmnet_ctl_send_client);
void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
int rc, const void *data, unsigned int len)
{
if (lvl <= ipc_log_lvl && ctl_ep.ipc_log) {
if (data == NULL || len == 0)
ipc_log_string(ctl_ep.ipc_log, "%3s(%d): (null)\n",
msg, rc);
else
ipc_log_string(ctl_ep.ipc_log, "%3s(%d): %*ph\n",
msg, rc, len > 32 ? 32 : len, data);
}
}
EXPORT_SYMBOL(rmnet_ctl_log);

29
core/rmnet_ctl_client.h Normal file
View File

@@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* RMNET_CTL client handlers
*
*/
#ifndef _RMNET_CTL_CLIENT_H_
#define _RMNET_CTL_CLIENT_H_
#include <linux/skbuff.h>
struct rmnet_ctl_stats {
u64 rx_pkts;
u64 rx_err;
u64 tx_pkts;
u64 tx_err;
u64 tx_complete;
};
struct rmnet_ctl_dev {
int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
struct rmnet_ctl_stats stats;
};
void rmnet_ctl_endpoint_post(const void *data, size_t len);
void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
#endif /* _RMNET_CTL_CLIENT_H_ */

206
core/rmnet_ctl_mhi.c Normal file
View File

@@ -0,0 +1,206 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* RMNET_CTL mhi handler
*
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/skbuff.h>
#include <linux/mhi.h>
#include "rmnet_ctl.h"
#include "rmnet_ctl_client.h"
#define RMNET_CTL_DEFAULT_MRU 256
struct rmnet_ctl_mhi_dev {
struct mhi_device *mhi_dev;
struct rmnet_ctl_dev dev;
u32 mru;
spinlock_t rx_lock; /* rx lock */
spinlock_t tx_lock; /* tx lock */
atomic_t in_reset;
};
static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
{
struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
dev, struct rmnet_ctl_mhi_dev, dev);
int rc;
spin_lock_bh(&ctl_dev->tx_lock);
rc = mhi_queue_transfer(ctl_dev->mhi_dev,
DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
if (rc)
dev->stats.tx_err++;
else
dev->stats.tx_pkts++;
spin_unlock_bh(&ctl_dev->tx_lock);
return rc;
}
static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
gfp_t gfp, void *free_buf)
{
struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
void *buf;
int no_tre, i, rc;
no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
if (!no_tre && free_buf) {
kfree(free_buf);
return;
}
for (i = 0; i < no_tre; i++) {
if (free_buf) {
buf = free_buf;
free_buf = NULL;
} else {
buf = kmalloc(ctl_dev->mru, gfp);
}
if (!buf)
return;
spin_lock_bh(&ctl_dev->rx_lock);
rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
buf, ctl_dev->mru, MHI_EOT);
spin_unlock_bh(&ctl_dev->rx_lock);
if (rc) {
kfree(buf);
return;
}
}
}
static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
if (mhi_res->transaction_status == -ENOTCONN) {
kfree(mhi_res->buf_addr);
return;
} else if (mhi_res->transaction_status ||
!mhi_res->buf_addr || !mhi_res->bytes_xferd) {
rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
ctl_dev->dev.stats.rx_err++;
} else {
ctl_dev->dev.stats.rx_pkts++;
rmnet_ctl_endpoint_post(mhi_res->buf_addr,
mhi_res->bytes_xferd);
}
/* Re-supply receive buffers */
rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
}
static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
if (skb) {
if (mhi_res->transaction_status) {
rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
skb->data, skb->len);
ctl_dev->dev.stats.tx_err++;
} else {
rmnet_ctl_log_debug("TXC", skb->data, skb->len);
ctl_dev->dev.stats.tx_complete++;
}
kfree_skb(skb);
}
}
static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
enum MHI_CB mhi_cb)
{
struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
if (mhi_cb != MHI_CB_FATAL_ERROR)
return;
atomic_inc(&ctl_dev->in_reset);
}
static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct rmnet_ctl_mhi_dev *ctl_dev;
struct device_node *of_node = mhi_dev->dev.of_node;
int rc;
ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
if (!ctl_dev)
return -ENOMEM;
ctl_dev->mhi_dev = mhi_dev;
ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
spin_lock_init(&ctl_dev->rx_lock);
spin_lock_init(&ctl_dev->tx_lock);
atomic_set(&ctl_dev->in_reset, 0);
dev_set_drvdata(&mhi_dev->dev, ctl_dev);
rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
if (rc || !ctl_dev->mru)
ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
rc = mhi_prepare_for_transfer(mhi_dev);
if (rc) {
pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
return -EINVAL;
}
/* Post receive buffers */
rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
pr_info("rmnet_ctl driver probed\n");
return 0;
}
static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
{
rmnet_ctl_endpoint_setdev(NULL);
synchronize_rcu();
dev_set_drvdata(&mhi_dev->dev, NULL);
pr_info("rmnet_ctl driver removed\n");
}
static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
{ .chan = "RMNET_CTL" },
{}
};
static struct mhi_driver rmnet_ctl_driver = {
.probe = rmnet_ctl_probe,
.remove = rmnet_ctl_remove,
.dl_xfer_cb = rmnet_ctl_dl_callback,
.ul_xfer_cb = rmnet_ctl_ul_callback,
.status_cb = rmnet_ctl_status_callback,
.id_table = rmnet_ctl_mhi_match,
.driver = {
.name = "rmnet_ctl",
.owner = THIS_MODULE,
},
};
module_driver(rmnet_ctl_driver,
mhi_driver_register, mhi_driver_unregister);
MODULE_DESCRIPTION("RmNet Control Driver");
MODULE_LICENSE("GPL v2");

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,8 +42,9 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct rmnet_frag_descriptor *frag_desc;
unsigned long flags;
spin_lock(&port->desc_pool_lock);
spin_lock_irqsave(&port->desc_pool_lock, flags);
if (!list_empty(&pool->free_list)) {
frag_desc = list_first_entry(&pool->free_list,
struct rmnet_frag_descriptor,
@@ -60,7 +61,7 @@ rmnet_get_frag_descriptor(struct rmnet_port *port)
}
out:
spin_unlock(&port->desc_pool_lock);
spin_unlock_irqrestore(&port->desc_pool_lock, flags);
return frag_desc;
}
EXPORT_SYMBOL(rmnet_get_frag_descriptor);
@@ -69,16 +70,19 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct page *page = skb_frag_page(&frag_desc->frag);
unsigned long flags;
list_del(&frag_desc->list);
put_page(skb_frag_page(&frag_desc->frag));
if (page)
put_page(page);
memset(frag_desc, 0, sizeof(*frag_desc));
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
spin_lock(&port->desc_pool_lock);
spin_lock_irqsave(&port->desc_pool_lock, flags);
list_add_tail(&frag_desc->list, &pool->free_list);
spin_unlock(&port->desc_pool_lock);
spin_unlock_irqrestore(&port->desc_pool_lock, flags);
}
EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
@@ -381,12 +385,28 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
struct rmnet_frag_descriptor *frag_desc)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (frag_desc->trans_proto == IPPROTO_TCP)
shinfo->gso_type = (frag_desc->ip_proto == 4) ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
else
shinfo->gso_type = SKB_GSO_UDP_L4;
shinfo->gso_size = frag_desc->gso_size;
shinfo->gso_segs = frag_desc->gso_segs;
}
/* Set the partial checksum information. Sets the transport checksum to the
* pseudoheader checksum and sets the offload metadata.
*/
static void rmnet_frag_partial_csum(struct sk_buff *skb,
struct rmnet_frag_descriptor *frag_desc)
{
struct iphdr *iph = (struct iphdr *)skb->data;
__sum16 pseudo;
u16 pkt_len = skb->len - frag_desc->ip_len;
bool ipv4 = frag_desc->ip_proto == 4;
if (ipv4) {
if (frag_desc->ip_proto == 4) {
iph->tot_len = htons(skb->len);
iph->check = 0;
iph->check = ip_fast_csum(iph, iph->ihl);
@@ -407,7 +427,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
((u8 *)iph + frag_desc->ip_len);
tp->check = pseudo;
shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
struct udphdr *up = (struct udphdr *)
@@ -415,14 +434,11 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
up->len = htons(pkt_len);
up->check = pseudo;
shinfo->gso_type = SKB_GSO_UDP_L4;
skb->csum_offset = offsetof(struct udphdr, check);
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
shinfo->gso_size = frag_desc->gso_size;
shinfo->gso_segs = frag_desc->gso_segs;
}
/* Allocate and populate an skb to contain the packet represented by the
@@ -450,15 +466,19 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
if (frag_desc->trans_len)
skb_set_transport_header(head_skb, frag_desc->ip_len);
/* Packets that have no data portion don't need any frags */
if (hdr_len == skb_frag_size(&frag_desc->frag))
goto skip_frags;
/* If the headers we added are the start of the page,
* we don't want to add them twice
*/
if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc))
rmnet_frag_pull(frag_desc, port, hdr_len);
if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
/* "Header only" packets can be fast-forwarded */
if (hdr_len == skb_frag_size(&frag_desc->frag))
goto skip_frags;
if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
kfree_skb(head_skb);
return NULL;
}
}
} else {
/* Allocate enough space to avoid penalties in the stack
* from __pskb_pull_tail()
@@ -542,8 +562,64 @@ skip_frags:
}
/* Handle csum offloading */
if (frag_desc->csum_valid)
if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
/* Set the partial checksum information */
rmnet_frag_partial_csum(head_skb, frag_desc);
} else if (frag_desc->csum_valid) {
/* Non-RSB/RSC/perf packet. The current checksum is fine */
head_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (frag_desc->hdrs_valid &&
(frag_desc->trans_proto == IPPROTO_TCP ||
frag_desc->trans_proto == IPPROTO_UDP)) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
* reliable way to do it is to calculate the actual checksum
* and corrupt it.
*/
__sum16 *check;
__wsum csum;
unsigned int offset = skb_transport_offset(head_skb);
__sum16 pseudo;
/* Calculate pseudo header and update header fields */
if (frag_desc->ip_proto == 4) {
struct iphdr *iph = ip_hdr(head_skb);
__be16 tot_len = htons(head_skb->len);
csum_replace2(&iph->check, iph->tot_len, tot_len);
iph->tot_len = tot_len;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
ip6h->payload_len = htons(head_skb->len -
sizeof(*ip6h));
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
}
if (frag_desc->trans_proto == IPPROTO_TCP) {
check = &tcp_hdr(head_skb)->check;
} else {
udp_hdr(head_skb)->len = htons(head_skb->len -
frag_desc->ip_len);
check = &udp_hdr(head_skb)->check;
}
*check = pseudo;
csum = skb_checksum(head_skb, offset, head_skb->len - offset,
0);
/* Add 1 to corrupt. This cannot produce a final value of 0
* since csum_fold() can't return a value of 0xFFFF
*/
*check = csum16_add(csum_fold(csum), htons(1));
head_skb->ip_summed = CHECKSUM_NONE;
}
/* Handle any rmnet_perf metadata */
if (frag_desc->hash) {
@@ -576,8 +652,8 @@ EXPORT_SYMBOL(rmnet_frag_deliver);
static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
struct rmnet_port *port,
struct list_head *list,
u8 pkt_id)
struct list_head *list, u8 pkt_id,
bool csum_valid)
{
struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
struct rmnet_frag_descriptor *new_frag;
@@ -606,6 +682,12 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
new_frag->tcp_seq_set = 1;
new_frag->tcp_seq = htonl(ntohl(th->seq) +
coal_desc->data_offset);
} else if (coal_desc->trans_proto == IPPROTO_UDP) {
struct udphdr *uh;
uh = (struct udphdr *)(hdr_start + coal_desc->ip_len);
if (coal_desc->ip_proto == 4 && !uh->check)
csum_valid = true;
}
if (coal_desc->ip_proto == 4) {
@@ -617,7 +699,7 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
}
new_frag->hdr_ptr = hdr_start;
new_frag->csum_valid = true;
new_frag->csum_valid = csum_valid;
priv->stats.coal.coal_reconstruct++;
/* Update meta information to move past the data we just segmented */
@@ -628,6 +710,33 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
list_add_tail(&new_frag->list, list);
}
static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
{
u8 *data = rmnet_frag_data_ptr(frag_desc);
unsigned int datagram_len;
__wsum csum;
__sum16 pseudo;
datagram_len = skb_frag_size(&frag_desc->frag) - frag_desc->ip_len;
if (frag_desc->ip_proto == 4) {
struct iphdr *iph = (struct iphdr *)data;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
datagram_len,
frag_desc->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
datagram_len, frag_desc->trans_proto,
0);
}
csum = csum_partial(data + frag_desc->ip_len, datagram_len,
csum_unfold(pseudo));
return !csum_fold(csum);
}
/* Converts the coalesced frame into a list of descriptors.
* NLOs containing csum erros will not be included.
*/
@@ -643,12 +752,16 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
u8 pkt, total_pkt = 0;
u8 nlo;
bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
bool zero_csum = false;
/* Pull off the headers we no longer need */
rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header));
if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)))
return;
coal_hdr = (struct rmnet_map_v5_coal_header *)
rmnet_frag_data_ptr(coal_desc);
rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr));
if (!rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr)))
return;
iph = (struct iphdr *)rmnet_frag_data_ptr(coal_desc);
@@ -662,11 +775,12 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
gro = false;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
int ip_len;
__be16 frag_off;
u8 protocol = ip6h->nexthdr;
coal_desc->ip_proto = 6;
coal_desc->ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
sizeof(*ip6h),
&protocol,
&frag_off);
@@ -676,10 +790,13 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
* (which should technically not be possible, if the HW
* works as intended...), bail.
*/
if (coal_desc->ip_len < 0 || frag_off) {
if (ip_len < 0 || frag_off) {
priv->stats.coal.coal_ip_invalid++;
return;
} else if (coal_desc->ip_len > sizeof(*ip6h)) {
}
coal_desc->ip_len = (u16)ip_len;
if (coal_desc->ip_len > sizeof(*ip6h)) {
/* Don't allow coalescing of any packets with IPv6
* extension headers.
*/
@@ -696,44 +813,87 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len);
coal_desc->trans_len = th->doff * 4;
} else if (coal_desc->trans_proto == IPPROTO_UDP) {
coal_desc->trans_len = sizeof(struct udphdr);
struct udphdr *uh;
uh = (struct udphdr *)((u8 *)iph + coal_desc->ip_len);
coal_desc->trans_len = sizeof(*uh);
if (coal_desc->ip_proto == 4 && !uh->check)
zero_csum = true;
} else {
priv->stats.coal.coal_trans_invalid++;
return;
}
coal_desc->hdrs_valid = 1;
if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
/* Mark the checksum as valid if it checks out */
if (rmnet_frag_validate_csum(coal_desc))
coal_desc->csum_valid = true;
coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
list_add_tail(&coal_desc->list, list);
return;
}
/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
* no checksum errors, and are allowing GRO. We can just reuse this
* descriptor unchanged.
*/
if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
coal_desc->csum_valid = true;
coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
list_add_tail(&coal_desc->list, list);
return;
}
/* Segment the coalesced descriptor into new packets */
for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
coal_desc->gso_size = pkt_len;
for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
pkt++, total_pkt++) {
nlo_err_mask <<= 1;
if (nlo_err_mask & (1ULL << 63)) {
pkt++, total_pkt++, nlo_err_mask >>= 1) {
bool csum_err = nlo_err_mask & 1;
/* Segment the packet if we're not sending the larger
* packet up the stack.
*/
if (!gro) {
coal_desc->gso_segs = 1;
if (csum_err)
priv->stats.coal.coal_csum_err++;
__rmnet_frag_segment_data(coal_desc, port,
list, total_pkt,
!csum_err);
continue;
}
if (csum_err) {
priv->stats.coal.coal_csum_err++;
/* Segment out the good data */
if (gro && coal_desc->gso_segs)
if (coal_desc->gso_segs)
__rmnet_frag_segment_data(coal_desc,
port,
list,
total_pkt);
total_pkt,
true);
/* skip over bad packet */
coal_desc->data_offset += pkt_len;
coal_desc->pkt_id = total_pkt + 1;
/* Segment out the bad checksum */
coal_desc->gso_segs = 1;
__rmnet_frag_segment_data(coal_desc, port,
list, total_pkt,
false);
} else {
coal_desc->gso_segs++;
/* Segment the packet if we aren't sending the
* larger packet up the stack.
*/
if (!gro)
__rmnet_frag_segment_data(coal_desc,
port,
list,
total_pkt);
}
}
@@ -741,25 +901,9 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
* the previous one, if we haven't done so. NLOs only switch
* when the packet length changes.
*/
if (gro && coal_desc->gso_segs) {
/* Fast forward the (hopefully) common case.
* Frames with only one NLO (i.e. one packet length) and
* no checksum errors don't need to be segmented here.
* We can just pass off the original skb.
*/
if (coal_desc->gso_size * coal_desc->gso_segs ==
skb_frag_size(&coal_desc->frag) -
coal_desc->ip_len - coal_desc->trans_len) {
coal_desc->hdr_ptr =
rmnet_frag_data_ptr(coal_desc);
coal_desc->csum_valid = true;
list_add_tail(&coal_desc->list, list);
return;
}
if (coal_desc->gso_segs)
__rmnet_frag_segment_data(coal_desc, port, list,
total_pkt);
}
total_pkt, true);
}
}
@@ -840,7 +984,7 @@ rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
u8 pkt = coal_hdr->nl_pairs[i].num_packets;
mask |= ((u64)err) << (7 - i) * 8;
mask |= ((u64)err) << (8 * i);
/* Track total packets in frame */
pkts += pkt;
@@ -897,15 +1041,23 @@ int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
priv->stats.csum_valid_unset++;
}
rmnet_frag_pull(frag_desc, port,
if (!rmnet_frag_pull(frag_desc, port,
sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header));
sizeof(struct rmnet_map_v5_csum_header))) {
rc = -EINVAL;
break;
}
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
rmnet_frag_trim(frag_desc, port, len);
if (!rmnet_frag_trim(frag_desc, port, len)) {
rc = -EINVAL;
break;
}
list_del_init(&frag_desc->list);
list_add_tail(&frag_desc->list, list);
break;
@@ -969,10 +1121,14 @@ __rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
goto recycle;
} else {
/* We only have the main QMAP header to worry about */
rmnet_frag_pull(frag_desc, port, sizeof(*qmap));
if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
return;
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
rmnet_frag_trim(frag_desc, port, len);
if (!rmnet_frag_trim(frag_desc, port, len))
return;
list_add_tail(&frag_desc->list, &segs);
}

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,8 @@ static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
unsigned int size)
{
if (size >= skb_frag_size(&frag_desc->frag)) {
pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
__func__, size, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
@@ -107,6 +109,8 @@ static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
unsigned int size)
{
if (!size) {
pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
__func__, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}

412
core/rmnet_genl.c Normal file
View File

@@ -0,0 +1,412 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* RMNET Data Generic Netlink
*
*/
#include "rmnet_genl.h"
#include <net/sock.h>
#include <linux/skbuff.h>
/* Static Functions and Definitions */
static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
1] = {
[RMNET_CORE_GENL_ATTR_INT] = { .type = NLA_S32 },
[RMNET_CORE_GENL_ATTR_PID_BPS] = { .len =
sizeof(struct rmnet_core_pid_bps_resp) },
[RMNET_CORE_GENL_ATTR_PID_BOOST] = { .len =
sizeof(struct rmnet_core_pid_boost_req) },
[RMNET_CORE_GENL_ATTR_STR] = { .type = NLA_NUL_STRING },
};
#define RMNET_CORE_GENL_OP(_cmd, _func) \
{ \
.cmd = _cmd, \
.doit = _func, \
.dumpit = NULL, \
.flags = 0, \
}
static const struct genl_ops rmnet_core_genl_ops[] = {
RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
rmnet_core_genl_pid_bps_req_hdlr),
RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
rmnet_core_genl_pid_boost_req_hdlr),
};
struct genl_family rmnet_core_genl_family = {
.hdrsize = 0,
.name = RMNET_CORE_GENL_FAMILY_NAME,
.version = RMNET_CORE_GENL_VERSION,
.maxattr = RMNET_CORE_GENL_ATTR_MAX,
.policy = rmnet_genl_attr_policy,
.ops = rmnet_core_genl_ops,
.n_ops = ARRAY_SIZE(rmnet_core_genl_ops),
};
#define RMNET_PID_STATS_HT_SIZE (8)
#define RMNET_PID_STATS_HT rmnet_pid_ht
DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
/* Spinlock definition for pid hash table */
static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
#define RMNET_GENL_SEC_TO_MSEC(x) ((x) * 1000)
#define RMNET_GENL_SEC_TO_NSEC(x) ((x) * 1000000000)
#define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
#define RMNET_GENL_NSEC_TO_SEC(x) ({\
u64 __quotient = (x); \
do_div(__quotient, 1000000000); \
__quotient; \
})
int rmnet_core_userspace_connected;
#define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
struct rmnet_pid_node_s {
struct hlist_node list;
time_t timstamp_last_query;
u64 tx_bytes;
u64 tx_bytes_last_query;
u64 tx_bps;
u64 sched_boost_period_ms;
int sched_boost_remaining_ms;
int sched_boost_enable;
pid_t pid;
};
void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
int *boost_enable, u64 *boost_period)
{
struct hlist_node *tmp;
struct rmnet_pid_node_s *node_p;
unsigned long ht_flags;
u8 is_match_found = 0;
u64 tx_bytes = 0;
*boost_enable = 0;
*boost_period = 0;
/* Using do while to spin lock and unlock only once */
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
do {
hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
list, pid) {
if (pid != node_p->pid)
continue;
/* PID Match found */
is_match_found = 1;
node_p->tx_bytes += len;
tx_bytes = node_p->tx_bytes;
if (node_p->sched_boost_enable) {
rm_err("boost triggered for pid %d",
pid);
/* Just triggered boost, dont re-trigger */
node_p->sched_boost_enable = 0;
*boost_enable = 1;
*boost_period = node_p->sched_boost_period_ms;
node_p->sched_boost_remaining_ms =
(int)*boost_period;
}
break;
}
if (is_match_found)
break;
/* No PID match */
node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
if (!node_p)
break;
node_p->pid = pid;
node_p->tx_bytes = len;
node_p->sched_boost_enable = 0;
node_p->sched_boost_period_ms = 0;
node_p->sched_boost_remaining_ms = 0;
hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
break;
} while (0);
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
}
void rmnet_boost_for_pid(pid_t pid, int boost_enable,
u64 boost_period)
{
struct hlist_node *tmp;
struct rmnet_pid_node_s *node_p;
unsigned long ht_flags;
/* Using do while to spin lock and unlock only once */
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
do {
hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
list, pid) {
if (pid != node_p->pid)
continue;
/* PID Match found */
rm_err("CORE_BOOST: enable boost for pid %d for %d ms",
pid, boost_period);
node_p->sched_boost_enable = boost_enable;
node_p->sched_boost_period_ms = boost_period;
break;
}
break;
} while (0);
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
}
static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
*pid_bps_resp_ptr)
{
struct timespec time;
struct hlist_node *tmp;
struct rmnet_pid_node_s *node_p;
unsigned long ht_flags;
u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
int i;
u16 bkt;
(void)getnstimeofday(&time);
pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
time.tv_nsec;
/* Using do while to spin lock and unlock only once */
spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
do {
i = 0;
hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
node_p, list) {
tx_bytes_cur = node_p->tx_bytes;
if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
/* Dont send inactive pids to userspace */
/* TODO: can remove from hash table probably */
node_p->tx_bps = 0;
node_p->timstamp_last_query =
pid_bps_resp_ptr->timestamp;
node_p->sched_boost_remaining_ms = 0;
continue;
}
/* Compute bits per second */
byte_diff = (node_p->tx_bytes -
node_p->tx_bytes_last_query);
time_diff_ns = (pid_bps_resp_ptr->timestamp -
node_p->timstamp_last_query);
tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
/* Note that do_div returns remainder and the */
/* numerator gets assigned the quotient */
/* Since do_div takes the numerator as a reference, */
/* a tmp_bits is used*/
do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
node_p->tx_bps = tmp_bits;
if (node_p->sched_boost_remaining_ms >=
RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
node_p->sched_boost_remaining_ms -=
RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
rm_err("CORE_BOOST: enabling boost for pid %d\n"
"sched boost remaining = %d ms",
node_p->pid,
node_p->sched_boost_remaining_ms);
} else {
node_p->sched_boost_remaining_ms = 0;
}
pid_bps_resp_ptr->list[i].pid = node_p->pid;
pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
pid_bps_resp_ptr->list[i].boost_remaining_ms =
node_p->sched_boost_remaining_ms;
node_p->timstamp_last_query =
pid_bps_resp_ptr->timestamp;
node_p->tx_bytes_last_query = tx_bytes_cur;
i++;
/* Support copying up to 32 active pids */
if (i >= RMNET_CORE_GENL_MAX_PIDS)
break;
}
break;
} while (0);
spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
pid_bps_resp_ptr->list_len = i;
}
int rmnet_core_genl_send_resp(struct genl_info *info,
struct rmnet_core_pid_bps_resp *pid_bps_resp)
{
struct sk_buff *skb;
void *msg_head;
int rc;
if (!info || !pid_bps_resp) {
rm_err("%s", "SHS_GNL: Invalid params\n");
goto out;
}
skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
if (!skb)
goto out;
msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
&rmnet_core_genl_family,
0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
if (!msg_head) {
rc = -ENOMEM;
goto out;
}
rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
sizeof(struct rmnet_core_pid_bps_resp),
pid_bps_resp);
if (rc != 0)
goto out;
genlmsg_end(skb, msg_head);
rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
if (rc != 0)
goto out;
rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
return RMNET_GENL_SUCCESS;
out:
/* TODO: Need to free skb?? */
rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
rmnet_core_userspace_connected = 0;
return RMNET_GENL_FAILURE;
}
int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct rmnet_core_pid_bps_req pid_bps_req;
struct rmnet_core_pid_bps_resp pid_bps_resp;
int is_req_valid = 0;
rm_err("CORE_GNL: %s connected = %d", __func__,
rmnet_core_userspace_connected);
if (!info) {
rm_err("%s", "CORE_GNL: error - info is null");
pid_bps_resp.valid = 0;
} else {
na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
if (na) {
if (nla_memcpy(&pid_bps_req, na,
sizeof(pid_bps_req)) > 0) {
is_req_valid = 1;
} else {
rm_err("CORE_GNL: nla_memcpy failed %d\n",
RMNET_CORE_GENL_ATTR_PID_BPS);
}
} else {
rm_err("CORE_GNL: no info->attrs %d\n",
RMNET_CORE_GENL_ATTR_PID_BPS);
}
}
if (!rmnet_core_userspace_connected)
rmnet_core_userspace_connected = 1;
/* Copy to pid/byte list to the payload */
if (is_req_valid) {
memset(&pid_bps_resp, 0x0,
sizeof(pid_bps_resp));
rmnet_create_pid_bps_resp(&pid_bps_resp);
}
pid_bps_resp.valid = 1;
rmnet_core_genl_send_resp(info, &pid_bps_resp);
return RMNET_GENL_SUCCESS;
}
int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info)
{
struct nlattr *na;
struct rmnet_core_pid_boost_req pid_boost_req;
int is_req_valid = 0;
u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
u16 i = 0;
rm_err("%s", "CORE_GNL: %s", __func__);
if (!info) {
rm_err("%s", "CORE_GNL: error - info is null");
return RMNET_GENL_FAILURE;
}
na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
if (na) {
if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
is_req_valid = 1;
} else {
rm_err("CORE_GNL: nla_memcpy failed %d\n",
RMNET_CORE_GENL_ATTR_PID_BOOST);
return RMNET_GENL_FAILURE;
}
} else {
rm_err("CORE_GNL: no info->attrs %d\n",
RMNET_CORE_GENL_ATTR_PID_BOOST);
return RMNET_GENL_FAILURE;
}
if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
boost_pid_cnt = pid_boost_req.list_len;
if (!pid_boost_req.valid)
boost_pid_cnt = 0;
for (i = 0; i < boost_pid_cnt; i++) {
if (pid_boost_req.list[i].boost_enabled) {
rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
pid_boost_req.list[i].boost_period);
}
}
return RMNET_GENL_SUCCESS;
}
/* register new rmnet core driver generic netlink family */
int rmnet_core_genl_init(void)
{
int ret;
ret = genl_register_family(&rmnet_core_genl_family);
if (ret != 0) {
rm_err("CORE_GNL: register family failed: %i", ret);
genl_unregister_family(&rmnet_core_genl_family);
return RMNET_GENL_FAILURE;
}
rm_err("CORE_GNL: successfully registered generic netlink family: %s",
RMNET_CORE_GENL_FAMILY_NAME);
return RMNET_GENL_SUCCESS;
}
/* Unregister the generic netlink family */
int rmnet_core_genl_deinit(void)
{
int ret;
ret = genl_unregister_family(&rmnet_core_genl_family);
if (ret != 0)
rm_err("CORE_GNL: unregister family failed: %i\n", ret);
return RMNET_GENL_SUCCESS;
}

96
core/rmnet_genl.h Normal file
View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*
* RMNET Data Generic Netlink
*
*/
#ifndef _RMNET_GENL_H_
#define _RMNET_GENL_H_
#include <net/genetlink.h>
#define RMNET_CORE_DEBUG 0
#define rm_err(fmt, ...) \
do { if (RMNET_CORE_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
/* Generic Netlink Definitions */
#define RMNET_CORE_GENL_VERSION 1
#define RMNET_CORE_GENL_FAMILY_NAME "RMNET_CORE"
#define RMNET_CORE_GENL_MAX_PIDS 32
#define RMNET_GENL_SUCCESS (0)
#define RMNET_GENL_FAILURE (-1)
extern int rmnet_core_userspace_connected;
enum {
RMNET_CORE_GENL_CMD_UNSPEC,
RMNET_CORE_GENL_CMD_PID_BPS_REQ,
RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
__RMNET_CORE_GENL_CMD_MAX,
};
enum {
RMNET_CORE_GENL_ATTR_UNSPEC,
RMNET_CORE_GENL_ATTR_STR,
RMNET_CORE_GENL_ATTR_INT,
RMNET_CORE_GENL_ATTR_PID_BPS,
RMNET_CORE_GENL_ATTR_PID_BOOST,
__RMNET_CORE_GENL_ATTR_MAX,
};
#define RMNET_CORE_GENL_ATTR_MAX (__RMNET_CORE_GENL_ATTR_MAX - 1)
struct rmnet_core_pid_bps_info {
u64 tx_bps;
u32 pid;
u32 boost_remaining_ms;
};
struct rmnet_core_pid_boost_info {
u32 boost_enabled;
/* Boost period in ms */
u32 boost_period;
u32 pid;
};
struct rmnet_core_pid_bps_req {
struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
u64 timestamp;
u16 list_len;
u8 valid;
};
struct rmnet_core_pid_bps_resp {
struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
u64 timestamp;
u16 list_len;
u8 valid;
};
struct rmnet_core_pid_boost_req {
struct rmnet_core_pid_boost_info list[RMNET_CORE_GENL_MAX_PIDS];
u64 timestamp;
u16 list_len;
u8 valid;
};
/* Function Prototypes */
int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
struct genl_info *info);
/* Called by vnd select queue */
void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
int *boost_enable, u64 *boost_period);
int rmnet_core_genl_init(void);
int rmnet_core_genl_deinit(void);
#endif /*_RMNET_CORE_GENL_H_*/

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -69,6 +69,21 @@ void rmnet_set_skb_proto(struct sk_buff *skb)
}
EXPORT_SYMBOL(rmnet_set_skb_proto);
bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
bool rmnet_slow_start_on(u32 hash_key)
{
bool (*rmnet_shs_slow_start_on)(u32 hash_key);
rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
if (rmnet_shs_slow_start_on)
return rmnet_shs_slow_start_on(hash_key);
return false;
}
EXPORT_SYMBOL(rmnet_slow_start_on);
/* Shs hook handler */
int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
struct rmnet_port *port) __rcu __read_mostly;
@@ -245,7 +260,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
if (skb->dev->type == ARPHRD_ETHER) {
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
@@ -311,7 +326,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
} else if ((port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) ||
(port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
@@ -323,8 +339,12 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
return -ENOMEM;
}
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
qmi_rmnet_work_maybe_restart(port);
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
port);
@@ -334,19 +354,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
int non_linear_skb;
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
skb_is_nonlinear(skb);
if (non_linear_skb) {
if (unlikely(__skb_linearize(skb)))
goto done;
}
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
@@ -430,9 +440,9 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM)
if (err == -ENOMEM) {
goto drop;
else if (err == -EINPROGRESS) {
} else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);
return;
}

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013, 2016-2017, 2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013, 2016-2017, 2019-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@ void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
enum rmnet_packet_context ctx);
void rmnet_set_skb_proto(struct sk_buff *skb);
bool rmnet_slow_start_on(u32 hash_key);
rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,7 +80,9 @@ struct rmnet_map_header {
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 hw_reserved:5;
u8 priority:1;
u8 hw_reserved_bit:1;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);
@@ -122,7 +124,7 @@ struct rmnet_map_dl_csum_trailer {
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ip4_ind:1;
u16 udp_ind:1;
u16 csum_enabled:1;
} __aligned(1);
@@ -179,16 +181,18 @@ struct rmnet_map_dl_ind_trl {
struct rmnet_map_dl_ind {
u8 priority;
union {
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *,
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
struct
rmnet_map_control_command_header *);
rmnet_map_control_command_header
* qcmd);
} __aligned(1);
union {
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *,
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
struct
rmnet_map_control_command_header *);
rmnet_map_control_command_header
* qcmd);
} __aligned(1);
struct list_head list;
};
@@ -258,8 +262,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type);
bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len);
@@ -267,6 +273,8 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
u8 count, u8 features, u32 time);
void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dl_hdr);
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -230,9 +230,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ip4_ind = 1;
ul_header->udp_ind = 1;
else
ul_header->udp_ip4_ind = 0;
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -263,6 +263,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
@@ -270,7 +271,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
ul_header->udp_ip4_ind = 0;
if (ip6h->nexthdr == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -479,12 +484,25 @@ sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ip4_ind = 0;
ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
static void rmnet_map_v5_check_priority(struct sk_buff *skb,
struct net_device *orig_dev,
struct rmnet_map_v5_csum_header *hdr)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
if (skb->priority) {
priv->stats.ul_prio++;
hdr->priority = 1;
}
}
void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
@@ -495,6 +513,13 @@ void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
rmnet_map_v5_check_priority(skb, orig_dev, ul_header);
/* Allow priority w/o csum offload */
if (!(port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5))
return;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = (char *)ul_header + sizeof(*ul_header);
void *trans;
@@ -535,6 +560,7 @@ sw_csum:
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type)
{
@@ -543,13 +569,36 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
break;
default:
break;
}
}
bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
{
/* Only applies to frames with a single packet */
if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
return false;
/* TCP header has FIN or PUSH set */
if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
return true;
/* Hit packet limit, byte limit, or time limit/EOF on DMA */
if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
switch (coal_hdr->close_value) {
case RMNET_MAP_COAL_CLOSE_HW_PKT:
case RMNET_MAP_COAL_CLOSE_HW_BYTE:
case RMNET_MAP_COAL_CLOSE_HW_TIME:
return true;
}
}
return false;
}
static void rmnet_map_move_headers(struct sk_buff *skb)
{
struct iphdr *iph;
@@ -636,50 +685,67 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
struct rmnet_map_coal_metadata *coal_meta)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct iphdr *iph = ip_hdr(skb);
if (coal_meta->trans_proto == IPPROTO_TCP)
shinfo->gso_type = (coal_meta->ip_proto == 4) ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
else
shinfo->gso_type = SKB_GSO_UDP_L4;
shinfo->gso_size = coal_meta->data_len;
shinfo->gso_segs = coal_meta->pkt_count;
}
/* Handles setting up the partial checksum in the skb. Sets the transport
* checksum to the pseudoheader checksum and sets the csum offload metadata
*/
static void rmnet_map_partial_csum(struct sk_buff *skb,
struct rmnet_map_coal_metadata *coal_meta)
{
unsigned char *data = skb->data;
__sum16 pseudo;
u16 pkt_len = skb->len - coal_meta->ip_len;
bool ipv4 = coal_meta->ip_proto == 4;
if (ipv4) {
if (coal_meta->ip_proto == 4) {
struct iphdr *iph = (struct iphdr *)data;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
pkt_len, coal_meta->trans_proto,
0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
pkt_len, coal_meta->trans_proto, 0);
}
if (coal_meta->trans_proto == IPPROTO_TCP) {
struct tcphdr *tp = tcp_hdr(skb);
struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
tp->check = pseudo;
shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
struct udphdr *up = udp_hdr(skb);
struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
up->check = pseudo;
shinfo->gso_type = SKB_GSO_UDP_L4;
skb->csum_offset = offsetof(struct udphdr, check);
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
shinfo->gso_size = coal_meta->data_len;
shinfo->gso_segs = coal_meta->pkt_count;
}
static void
__rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
struct rmnet_map_coal_metadata *coal_meta,
struct sk_buff_head *list, u8 pkt_id)
struct sk_buff_head *list, u8 pkt_id,
bool csum_valid)
{
struct sk_buff *skbn;
struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
__sum16 *check = NULL;
u32 alloc_len;
bool zero_csum = false;
/* We can avoid copying the data if the SKB we got from the lower-level
* drivers was nonlinear.
@@ -688,7 +754,7 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
alloc_len = coal_meta->ip_len + coal_meta->trans_len;
else
alloc_len = coal_meta->ip_len + coal_meta->trans_len +
coal_meta->data_len;
(coal_meta->data_len * coal_meta->pkt_count);
skbn = alloc_skb(alloc_len, GFP_ATOMIC);
if (!skbn)
@@ -705,8 +771,14 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
struct tcphdr *th = tcp_hdr(skbn);
th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
check = &th->check;
} else if (coal_meta->trans_proto == IPPROTO_UDP) {
udp_hdr(skbn)->len = htons(skbn->len);
struct udphdr *uh = udp_hdr(skbn);
uh->len = htons(skbn->len);
check = &uh->check;
if (coal_meta->ip_proto == 4 && !uh->check)
zero_csum = true;
}
/* Push IP header and update necessary fields */
@@ -726,7 +798,45 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
sizeof(struct ipv6hdr));
}
skbn->ip_summed = CHECKSUM_UNNECESSARY;
/* Handle checksum status */
if (likely(csum_valid) || zero_csum) {
/* Set the partial checksum information */
rmnet_map_partial_csum(skbn, coal_meta);
} else if (check) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
* reliable way to do it is to calculate the actual checksum
* and corrupt it.
*/
__wsum csum;
unsigned int offset = skb_transport_offset(skbn);
__sum16 pseudo;
/* Calculate pseudo header */
if (coal_meta->ip_proto == 4) {
struct iphdr *iph = ip_hdr(skbn);
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skbn->len -
coal_meta->ip_len,
coal_meta->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(skbn);
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
skbn->len - coal_meta->ip_len,
coal_meta->trans_proto, 0);
}
*check = pseudo;
csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
/* Add 1 to corrupt. This cannot produce a final value of 0
* since csum_fold() can't return a value of 0xFFFF.
*/
*check = csum16_add(csum_fold(csum), htons(1));
skbn->ip_summed = CHECKSUM_NONE;
}
skbn->dev = coal_skb->dev;
priv->stats.coal.coal_reconstruct++;
@@ -742,6 +852,34 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
coal_meta->pkt_count = 0;
}
static bool rmnet_map_validate_csum(struct sk_buff *skb,
struct rmnet_map_coal_metadata *meta)
{
u8 *data = rmnet_map_data_ptr(skb);
unsigned int datagram_len;
__wsum csum;
__sum16 pseudo;
datagram_len = skb->len - meta->ip_len;
if (meta->ip_proto == 4) {
struct iphdr *iph = (struct iphdr *)data;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
datagram_len,
meta->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
datagram_len, meta->trans_proto,
0);
}
csum = skb_checksum(skb, meta->ip_len, datagram_len,
csum_unfold(pseudo));
return !csum_fold(csum);
}
/* Converts the coalesced SKB into a list of SKBs.
* NLOs containing csum erros will not be included.
* The original coalesced SKB should be treated as invalid and
@@ -759,6 +897,7 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
u8 pkt, total_pkt = 0;
u8 nlo;
bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
bool zero_csum = false;
memset(&coal_meta, 0, sizeof(coal_meta));
@@ -820,43 +959,85 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
coal_meta.trans_len = sizeof(*uh);
coal_meta.trans_header = uh;
/* Check for v4 zero checksum */
if (coal_meta.ip_proto == 4 && !uh->check)
zero_csum = true;
} else {
priv->stats.coal.coal_trans_invalid++;
return;
}
if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
rmnet_map_move_headers(coal_skb);
/* Mark as valid if it checks out */
if (rmnet_map_validate_csum(coal_skb, &coal_meta))
coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
__skb_queue_tail(list, coal_skb);
return;
}
/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
* no checksum errors, and are allowing GRO. We can just reuse this
* SKB unchanged.
*/
if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
rmnet_map_move_headers(coal_skb);
coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
if (coal_meta.pkt_count > 1) {
rmnet_map_partial_csum(coal_skb, &coal_meta);
rmnet_map_gso_stamp(coal_skb, &coal_meta);
}
__skb_queue_tail(list, coal_skb);
return;
}
/* Segment the coalesced SKB into new packets */
for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
coal_meta.data_len = pkt_len;
for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
pkt++, total_pkt++) {
nlo_err_mask <<= 1;
if (nlo_err_mask & (1ULL << 63)) {
pkt++, total_pkt++, nlo_err_mask >>= 1) {
bool csum_err = nlo_err_mask & 1;
/* Segment the packet if we're not sending the larger
* packet up the stack.
*/
if (!gro) {
coal_meta.pkt_count = 1;
if (csum_err)
priv->stats.coal.coal_csum_err++;
__rmnet_map_segment_coal_skb(coal_skb,
&coal_meta, list,
total_pkt,
!csum_err);
continue;
}
if (csum_err) {
priv->stats.coal.coal_csum_err++;
/* Segment out the good data */
if (gro && coal_meta.pkt_count) {
if (gro && coal_meta.pkt_count)
__rmnet_map_segment_coal_skb(coal_skb,
&coal_meta,
list,
total_pkt);
}
total_pkt,
true);
/* skip over bad packet */
coal_meta.data_offset += pkt_len;
coal_meta.pkt_id = total_pkt + 1;
/* Segment out the bad checksum */
coal_meta.pkt_count = 1;
__rmnet_map_segment_coal_skb(coal_skb,
&coal_meta, list,
total_pkt, false);
} else {
coal_meta.pkt_count++;
/* Segment the packet if we aren't sending the
* larger packet up the stack.
*/
if (!gro)
__rmnet_map_segment_coal_skb(coal_skb,
&coal_meta,
list,
total_pkt);
}
}
@@ -864,27 +1045,9 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
* the previous one, if we haven't done so. NLOs only switch
* when the packet length changes.
*/
if (gro && coal_meta.pkt_count) {
/* Fast forward the (hopefully) common case.
* Frames with only one NLO (i.e. one packet length) and
* no checksum errors don't need to be segmented here.
* We can just pass off the original skb.
*/
if (pkt_len * coal_meta.pkt_count ==
coal_skb->len - coal_meta.ip_len -
coal_meta.trans_len) {
rmnet_map_move_headers(coal_skb);
coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (coal_meta.pkt_count > 1)
rmnet_map_gso_stamp(coal_skb,
&coal_meta);
__skb_queue_tail(list, coal_skb);
return;
}
if (coal_meta.pkt_count)
__rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
total_pkt);
}
total_pkt, true);
}
}
@@ -964,7 +1127,7 @@ static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
u8 pkt = coal_hdr->nl_pairs[i].num_packets;
mask |= ((u64)err) << (7 - i) * 8;
mask |= ((u64)err) << (8 * i);
/* Track total packets in frame */
pkts += pkt;
@@ -1105,13 +1268,154 @@ enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
return HRTIMER_NORESTART;
}
static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
{
unsigned int linear = src->len - src->data_len, target = src->len;
unsigned char *src_buf;
struct sk_buff *skb;
src_buf = src->data;
skb_put_data(dst, src_buf, linear);
target -= linear;
skb = src;
while (target) {
unsigned int i = 0, non_linear = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
skb_put_data(dst, src_buf, non_linear);
target -= non_linear;
}
if (skb_shinfo(skb)->frag_list) {
skb = skb_shinfo(skb)->frag_list;
continue;
}
if (skb->next)
skb = skb->next;
}
}
static void rmnet_free_agg_pages(struct rmnet_port *port)
{
struct rmnet_agg_page *agg_page, *idx;
list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
list_del(&agg_page->list);
put_page(agg_page->page);
kfree(agg_page);
}
port->agg_head = NULL;
}
static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
{
struct rmnet_agg_page *agg_page;
struct page *page = NULL;
int i = 0;
if (!(port->egress_agg_params.agg_features & RMNET_PAGE_RECYCLE))
goto alloc;
do {
agg_page = port->agg_head;
if (unlikely(!agg_page))
break;
if (page_ref_count(agg_page->page) == 1) {
page = agg_page->page;
page_ref_inc(agg_page->page);
port->stats.agg.ul_agg_reuse++;
port->agg_head = list_next_entry(agg_page, list);
break;
}
port->agg_head = list_next_entry(agg_page, list);
i++;
} while (i <= 5);
alloc:
if (!page) {
page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
port->stats.agg.ul_agg_alloc++;
}
return page;
}
static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
{
struct rmnet_agg_page *agg_page;
struct page *page;
agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
if (!agg_page)
return NULL;
page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
if (!page) {
kfree(agg_page);
return NULL;
}
agg_page->page = page;
INIT_LIST_HEAD(&agg_page->list);
return agg_page;
}
static void rmnet_alloc_agg_pages(struct rmnet_port *port)
{
struct rmnet_agg_page *agg_page = NULL;
int i = 0;
for (i = 0; i < 512; i++) {
agg_page = __rmnet_alloc_agg_pages(port);
if (agg_page)
list_add_tail(&agg_page->list, &port->agg_list);
}
port->agg_head = list_first_entry_or_null(&port->agg_list,
struct rmnet_agg_page, list);
}
static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
{
struct sk_buff *skb;
unsigned int size;
struct page *page;
void *vaddr;
page = rmnet_get_agg_pages(port);
if (!page)
return NULL;
vaddr = page_address(page);
size = PAGE_SIZE << port->agg_size_order;
skb = build_skb(vaddr, size);
if (!skb) {
put_page(page);
return NULL;
}
return skb;
}
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec64 diff, last;
int size, agg_count = 0;
struct sk_buff *agg_skb;
unsigned long flags;
u8 *dest_buff;
new_packet:
spin_lock_irqsave(&port->agg_lock, flags);
@@ -1133,7 +1437,7 @@ new_packet:
return;
}
port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
port->agg_skb = rmnet_map_build_skb(port);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
@@ -1143,6 +1447,9 @@ new_packet:
dev_queue_xmit(skb);
return;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_skb->dev = skb->dev;
port->agg_skb->protocol = htons(ETH_P_MAP);
port->agg_count = 1;
ktime_get_real_ts64(&port->agg_time);
@@ -1167,8 +1474,7 @@ new_packet:
goto new_packet;
}
dest_buff = skb_put(port->agg_skb, skb->len);
memcpy(dest_buff, skb->data, skb->len);
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_count++;
dev_kfree_skb_any(skb);
@@ -1182,14 +1488,51 @@ schedule:
spin_unlock_irqrestore(&port->agg_lock, flags);
}
void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
u8 count, u8 features, u32 time)
{
unsigned long irq_flags;
spin_lock_irqsave(&port->agg_lock, irq_flags);
port->egress_agg_params.agg_count = count;
port->egress_agg_params.agg_time = time;
port->egress_agg_params.agg_size = size;
port->egress_agg_params.agg_features = features;
rmnet_free_agg_pages(port);
/* This effectively disables recycling in case the UL aggregation
* size is lesser than PAGE_SIZE.
*/
if (size < PAGE_SIZE)
goto done;
port->agg_size_order = get_order(size);
size = PAGE_SIZE << port->agg_size_order;
size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
port->egress_agg_params.agg_size = size;
if (port->egress_agg_params.agg_features == RMNET_PAGE_RECYCLE)
rmnet_alloc_agg_pages(port);
done:
spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
port->egress_agg_params.agg_size = 8192;
port->egress_agg_params.agg_count = 20;
port->egress_agg_params.agg_time = 3000000;
spin_lock_init(&port->agg_lock);
INIT_LIST_HEAD(&port->agg_list);
/* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
* This is done to reduce memory usage in cases where
* UL aggregation is disabled.
* Additionally, the features flag is also set to 0.
*/
rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 0, 3000000);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
@@ -1213,6 +1556,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
port->agg_state = 0;
}
rmnet_free_agg_pages(port);
spin_unlock_irqrestore(&port->agg_lock, flags);
}
@@ -1224,7 +1568,7 @@ void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
port = rmnet_get_port(qmap_skb->dev);
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
if (port && (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION)) {
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_skb) {
agg_skb = port->agg_skb;

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,10 +30,16 @@
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
/* UL Packet prioritization */
#define RMNET_EGRESS_FORMAT_PRIORITY BIT(28)
/* Power save feature*/
#define RMNET_INGRESS_FORMAT_PS BIT(27)
#define RMNET_FORMAT_PS_NOTIF BIT(26)
/* UL Aggregation parameters */
#define RMNET_PAGE_RECYCLE BIT(0)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,9 +16,11 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#define CONFIG_QTI_QMI_RMNET 1
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
#ifdef CONFIG_QTI_QMI_RMNET
void *rmnet_get_qmi_pt(void *port);
void *rmnet_get_qos_pt(struct net_device *dev);
void *rmnet_get_rmnet_port(struct net_device *dev);
@@ -31,4 +33,68 @@ void rmnet_set_powersave_format(void *port);
void rmnet_clear_powersave_format(void *port);
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
int rmnet_get_powersave_notif(void *port);
struct net_device *rmnet_get_real_dev(void *port);
int rmnet_get_dlmarker_info(void *port);
#else
static inline void *rmnet_get_qmi_pt(void *port)
{
return NULL;
}
static inline void *rmnet_get_qos_pt(struct net_device *dev)
{
return NULL;
}
static inline void *rmnet_get_rmnet_port(struct net_device *dev)
{
return NULL;
}
static inline struct net_device *rmnet_get_rmnet_dev(void *port,
u8 mux_id)
{
return NULL;
}
static inline void rmnet_reset_qmi_pt(void *port)
{
}
static inline void rmnet_init_qmi_pt(void *port, void *qmi)
{
}
static inline void rmnet_enable_all_flows(void *port)
{
}
static inline bool rmnet_all_flows_enabled(void *port)
{
return true;
}
static inline void rmnet_set_port_format(void *port)
{
}
static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
{
}
static inline int rmnet_get_powersave_notif(void *port)
{
return 0;
}
static inline struct net_device *rmnet_get_real_dev(void *port)
{
return NULL;
}
static inline int rmnet_get_dlmarker_info(void *port)
{
return 0;
}
#endif /* CONFIG_QTI_QMI_RMNET */
#endif /*_RMNET_QMI_H*/

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_vnd.h"
#include "rmnet_genl.h"
#include "qmi_rmnet.h"
#include "rmnet_qmi.h"
@@ -127,8 +128,7 @@ static void rmnet_vnd_uninit(struct net_device *dev)
qos = priv->qos_info;
RCU_INIT_POINTER(priv->qos_info, NULL);
synchronize_rcu();
qmi_rmnet_qos_exit(dev, qos);
qmi_rmnet_qos_exit_pre(qos);
}
static void rmnet_get_stats64(struct net_device *dev,
@@ -167,11 +167,23 @@ static u16 rmnet_vnd_select_queue(struct net_device *dev,
struct net_device *sb_dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
u64 boost_period = 0;
int boost_trigger = 0;
int txq = 0;
if (priv->real_dev)
txq = qmi_rmnet_get_queue(dev, skb);
if (rmnet_core_userspace_connected) {
rmnet_update_pid_and_check_boost(task_pid_nr(current),
skb->len,
&boost_trigger,
&boost_period);
if (boost_trigger)
(void) boost_period;
}
return (txq < dev->real_num_tx_queues) ? txq : 0;
}
@@ -219,6 +231,7 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Coalescing packets over VEID1",
"Coalescing packets over VEID2",
"Coalescing packets over VEID3",
"Uplink priority packets",
};
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -234,6 +247,8 @@ static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"DL header total pkts received",
"DL trailer last seen sequence",
"DL trailer pkts received",
"UL agg reuse",
"UL agg alloc",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -284,6 +299,7 @@ static int rmnet_stats_reset(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_port_priv_stats *stp;
struct rmnet_priv_stats *st;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
@@ -293,6 +309,11 @@ static int rmnet_stats_reset(struct net_device *dev)
stp = &port->stats;
memset(stp, 0, sizeof(*stp));
st = &priv->stats;
memset(st, 0, sizeof(*st));
return 0;
}

View File

@@ -1,4 +1,4 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -259,7 +259,8 @@ out:
return ret;
}
static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
int dl_marker)
{
struct wda_qmi_data *data = container_of(wda_handle,
struct wda_qmi_data, handle);
@@ -289,7 +290,8 @@ static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
req->ep_id.ep_type = data->svc.ep_type;
req->ep_id.iface_id = data->svc.iface_id;
req->req_data_cfg_valid = 1;
req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
req->req_data_cfg = dl_marker ? WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 :
WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01;
ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
@@ -321,11 +323,22 @@ static void wda_svc_config(struct work_struct *work)
struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
svc_arrive);
struct qmi_info *qmi;
int rc;
int rc, dl_marker = 0;
while (!rtnl_trylock()) {
if (!data->restart_state)
cond_resched();
else
return;
}
dl_marker = rmnet_get_dlmarker_info(data->rmnet_port);
rtnl_unlock();
if (data->restart_state == 1)
return;
rc = wda_set_powersave_config_req(&data->handle);
rc = wda_set_powersave_config_req(&data->handle, dl_marker);
if (rc < 0) {
pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
return;
@@ -339,6 +352,7 @@ static void wda_svc_config(struct work_struct *work)
else
return;
}
qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
if (!qmi) {
rtnl_unlock();
@@ -353,7 +367,8 @@ static void wda_svc_config(struct work_struct *work)
rtnl_unlock();
pr_info("Connection established with the WDA Service\n");
pr_info("Connection established with the WDA Service, DL Marker %s\n",
dl_marker ? "enabled" : "disabled");
}
static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)