core: Rmnet initial commit

Inital commit of rmnet_core net device driver in dlkm form
in datarmnet. This requires rmnet to be disabled in the
kernel and for it to be loaded before dependent modules.

CRs-Fixed: 2558810
Change-Id: I742e85033fa0999bf9069d43ce73ab9a622a8388
Acked-by: Raul Martinez <mraul@qti.qualcomm.com>
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
This commit is contained in:
Subash Abhinov Kasiviswanathan
2019-12-09 23:20:02 -07:00
parent f226883d3b
commit 08d4972b2a
28 changed files with 9731 additions and 0 deletions

26
core/Android.mk Normal file
View File

@@ -0,0 +1,26 @@
ifneq ($(TARGET_PRODUCT),qssi)
RMNET_CORE_DLKM_PLATFORMS_LIST := lahaina
ifeq ($(call is-board-platform-in-list, $(RMNET_CORE_DLKM_PLATFORMS_LIST)),true)
#Make file to create RMNET_CORE DLKM
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror
LOCAL_CLANG :=true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
LOCAL_MODULE := rmnet_core.ko
LOCAL_SRC_FILES := rmnet_config.c rmnet_descriptor.c rmnet_handlers.c rmnet_map_command.c rmnet_map_data.c rmnet_vnd.c dfc_qmi.c qmi_rmnet.c wda_qmi.c
RMNET_BLD_DIR := ../../vendor/qcom/opensource/datarmnet/core
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
KBUILD_OPTIONS := $(RMNET_BLD_DIR)
$(warning $(DLKM_DIR))
include $(DLKM_DIR)/AndroidKernelModule.mk
endif #End of Check for target
endif #End of Check for qssi target

5
core/Kbuild Normal file
View File

@@ -0,0 +1,5 @@
obj-m += rmnet_core.o
rmnet_core-y := rmnet_config.o rmnet_handlers.o rmnet_descriptor.o rmnet_map_command.o rmnet_map_data.o rmnet_vnd.o rmnet_descriptor.o\
qmi_rmnet.o wda_qmi.o dfc_qmi.o

12
core/Kconfig Normal file
View File

@@ -0,0 +1,12 @@
#
# RMNET MAP driver
#
menuconfig RMNET_CORE
default m
select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)
format in the embedded data path. RMNET devices can be attached to
any IP mode physical device.

13
core/Makefile Normal file
View File

@@ -0,0 +1,13 @@
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
KBUILD_OPTIONS := RMNET_CORE_ROOT=$(PWD)
KBUILD_OPTIONS += MODNAME?=rmnet_core
all:
$(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules $(KBUILD_OPTIONS)
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
clean:
$(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean

254
core/dfc.h Normal file
View File

@@ -0,0 +1,254 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dfc
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
#define TRACE_INCLUDE_FILE dfc
#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_DFC_H
#include <linux/tracepoint.h>
TRACE_EVENT(dfc_qmi_tc,
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
int qlen, u32 tcm_handle, int enable),
TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
TP_STRUCT__entry(
__string(dev_name, name)
__field(u8, bid)
__field(u32, fid)
__field(u32, grant)
__field(int, qlen)
__field(u32, tcm_handle)
__field(int, enable)
),
TP_fast_assign(
__assign_str(dev_name, name);
__entry->bid = bearer_id;
__entry->fid = flow_id;
__entry->grant = grant;
__entry->qlen = qlen;
__entry->tcm_handle = tcm_handle;
__entry->enable = enable;
),
TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
__get_str(dev_name),
__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
__entry->tcm_handle,
__entry->enable ? "enable" : "disable")
);
TRACE_EVENT(dfc_flow_ind,
TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
u16 seq_num, u8 ack_req, u32 ancillary),
TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req,
ancillary),
TP_STRUCT__entry(
__field(int, src)
__field(int, idx)
__field(u8, mid)
__field(u8, bid)
__field(u32, grant)
__field(u16, seq)
__field(u8, ack_req)
__field(u32, ancillary)
),
TP_fast_assign(
__entry->src = src;
__entry->idx = idx;
__entry->mid = mux_id;
__entry->bid = bearer_id;
__entry->grant = grant;
__entry->seq = seq_num;
__entry->ack_req = ack_req;
__entry->ancillary = ancillary;
),
TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u",
__entry->src, __entry->idx, __entry->mid, __entry->bid,
__entry->grant, __entry->seq, __entry->ack_req,
__entry->ancillary)
);
TRACE_EVENT(dfc_flow_check,
TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
TP_ARGS(name, bearer_id, len, grant),
TP_STRUCT__entry(
__string(dev_name, name)
__field(u8, bearer_id)
__field(unsigned int, len)
__field(u32, grant)
),
TP_fast_assign(
__assign_str(dev_name, name)
__entry->bearer_id = bearer_id;
__entry->len = len;
__entry->grant = grant;
),
TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
__get_str(dev_name),
__entry->bearer_id, __entry->len, __entry->grant)
);
TRACE_EVENT(dfc_flow_info,
TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type,
u32 handle, int add),
TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add),
TP_STRUCT__entry(
__string(dev_name, name)
__field(u8, bid)
__field(u32, fid)
__field(int, ip)
__field(u32, handle)
__field(int, action)
),
TP_fast_assign(
__assign_str(dev_name, name)
__entry->bid = bearer_id;
__entry->fid = flow_id;
__entry->ip = ip_type;
__entry->handle = handle;
__entry->action = add;
),
TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
__entry->action ? "add flow" : "delete flow",
__get_str(dev_name),
__entry->bid, __entry->fid, __entry->ip, __entry->handle)
);
TRACE_EVENT(dfc_client_state_up,
TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface),
TP_ARGS(idx, instance, ep_type, iface),
TP_STRUCT__entry(
__field(int, idx)
__field(u32, instance)
__field(u32, ep_type)
__field(u32, iface)
),
TP_fast_assign(
__entry->idx = idx;
__entry->instance = instance;
__entry->ep_type = ep_type;
__entry->iface = iface;
),
TP_printk("Client[%d]: Connection established with DFC Service "
"instance=%u ep_type=%u iface_id=%u",
__entry->idx, __entry->instance,
__entry->ep_type, __entry->iface)
);
TRACE_EVENT(dfc_client_state_down,
TP_PROTO(int idx, int from_cb),
TP_ARGS(idx, from_cb),
TP_STRUCT__entry(
__field(int, idx)
__field(int, from_cb)
),
TP_fast_assign(
__entry->idx = idx;
__entry->from_cb = from_cb;
),
TP_printk("Client[%d]: Connection with DFC service lost. "
"Exit by callback %d",
__entry->idx, __entry->from_cb)
);
TRACE_EVENT(dfc_qmap_cmd,
TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran),
TP_ARGS(mux_id, bearer_id, seq_num, type, tran),
TP_STRUCT__entry(
__field(u8, mid)
__field(u8, bid)
__field(u16, seq)
__field(u8, type)
__field(u32, tran)
),
TP_fast_assign(
__entry->mid = mux_id;
__entry->bid = bearer_id;
__entry->seq = seq_num;
__entry->type = type;
__entry->tran = tran;
),
TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u",
__entry->mid, __entry->bid, __entry->seq,
__entry->type, __entry->tran)
);
TRACE_EVENT(dfc_tx_link_status_ind,
TP_PROTO(int src, int idx, u8 status, u8 mux_id, u8 bearer_id),
TP_ARGS(src, idx, status, mux_id, bearer_id),
TP_STRUCT__entry(
__field(int, src)
__field(int, idx)
__field(u8, status)
__field(u8, mid)
__field(u8, bid)
),
TP_fast_assign(
__entry->src = src;
__entry->idx = idx;
__entry->status = status;
__entry->mid = mux_id;
__entry->bid = bearer_id;
),
TP_printk("src=%d [%d]: status=%u mux_id=%u bearer_id=%u",
__entry->src, __entry->idx, __entry->status,
__entry->mid, __entry->bid)
);
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

96
core/dfc_defs.h Normal file
View File

@@ -0,0 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DFC_DEFS_H
#define _DFC_DEFS_H
#include <linux/soc/qcom/qmi.h>
#include "qmi_rmnet_i.h"
#define DFC_ACK_TYPE_DISABLE 1
#define DFC_ACK_TYPE_THRESHOLD 2
#define DFC_MASK_TCP_BIDIR 0x1
#define DFC_MASK_RAT_SWITCH 0x2
#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
#define DFC_MAX_QOS_ID_V01 2
struct dfc_qmi_data {
void *rmnet_port;
struct workqueue_struct *dfc_wq;
struct work_struct svc_arrive;
struct qmi_handle handle;
struct sockaddr_qrtr ssctl;
struct svc_info svc;
struct work_struct qmi_ind_work;
struct list_head qmi_ind_q;
spinlock_t qmi_ind_lock;
int index;
int restart_state;
};
enum dfc_ip_type_enum_v01 {
DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
DFC_IPV4_TYPE_V01 = 0x4,
DFC_IPV6_TYPE_V01 = 0x6,
DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};
struct dfc_qos_id_type_v01 {
u32 qos_id;
enum dfc_ip_type_enum_v01 ip_type;
};
struct dfc_flow_status_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
u32 num_bytes;
u16 seq_num;
u8 qos_ids_len;
struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
};
struct dfc_ancillary_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
u32 reserved;
};
struct dfc_flow_status_ind_msg_v01 {
u8 flow_status_valid;
u8 flow_status_len;
struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
u8 eod_ack_reqd_valid;
u8 eod_ack_reqd;
u8 ancillary_info_valid;
u8 ancillary_info_len;
struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};
struct dfc_bearer_info_type_v01 {
u8 subs_id;
u8 mux_id;
u8 bearer_id;
enum dfc_ip_type_enum_v01 ip_type;
};
struct dfc_tx_link_status_ind_msg_v01 {
u8 tx_status;
u8 bearer_info_valid;
u8 bearer_info_len;
struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
};
void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
struct dfc_flow_status_ind_msg_v01 *ind);
void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
struct dfc_tx_link_status_ind_msg_v01 *ind);
#endif /* _DFC_DEFS_H */

513
core/dfc_qmap.c Normal file
View File

@@ -0,0 +1,513 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include <net/pkt_sched.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
#include <trace/events/dfc.h>
#include <soc/qcom/rmnet_ctl.h>
#include "dfc_defs.h"
#define QMAP_DFC_VER 1
#define QMAP_CMD_DONE -1
#define QMAP_CMD_REQUEST 0
#define QMAP_CMD_ACK 1
#define QMAP_CMD_UNSUPPORTED 2
#define QMAP_CMD_INVALID 3
#define QMAP_DFC_CONFIG 10
#define QMAP_DFC_IND 11
#define QMAP_DFC_QUERY 12
#define QMAP_DFC_END_MARKER 13
struct qmap_hdr {
u8 cd_pad;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
struct qmap_cmd_hdr {
u8 pad_len:6;
u8 reserved_bit:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
u8 cmd_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
__be32 tx_id;
} __aligned(1);
struct qmap_dfc_config {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 cmd_id;
u8 reserved;
u8 tx_info:1;
u8 reserved2:7;
__be32 ep_type;
__be32 iface_id;
u32 reserved3;
} __aligned(1);
struct qmap_dfc_ind {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved;
__be16 seq_num;
u8 reserved2;
u8 tx_info_valid:1;
u8 tx_info:1;
u8 reserved3:6;
u8 bearer_id;
u8 tcp_bidir:1;
u8 bearer_status:3;
u8 reserved4:4;
__be32 grant;
u32 reserved5;
u32 reserved6;
} __aligned(1);
struct qmap_dfc_query {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved;
u8 bearer_id;
u8 reserved2;
u32 reserved3;
} __aligned(1);
struct qmap_dfc_query_resp {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 bearer_id;
u8 tcp_bidir:1;
u8 reserved:7;
u8 invalid:1;
u8 reserved2:7;
__be32 grant;
u32 reserved3;
u32 reserved4;
} __aligned(1);
struct qmap_dfc_end_marker_req {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved;
u8 bearer_id;
u8 reserved2;
u16 reserved3;
__be16 seq_num;
u32 reserved4;
} __aligned(1);
struct qmap_dfc_end_marker_cnf {
struct qmap_cmd_hdr hdr;
u8 cmd_ver;
u8 reserved;
u8 bearer_id;
u8 reserved2;
u16 reserved3;
__be16 seq_num;
u32 reserved4;
} __aligned(1);
static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
static struct dfc_qmi_data __rcu *qmap_dfc_data;
static atomic_t qmap_txid;
static void *rmnet_ctl_handle;
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
u8 bearer_id, u16 seq, u32 tx_id);
static void dfc_qmap_send_cmd(struct sk_buff *skb)
{
trace_dfc_qmap(skb->data, skb->len, false);
if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
pr_err("Failed to send to rmnet ctl\n");
kfree_skb(skb);
}
}
static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
cmd = (struct qmap_cmd_hdr *)skb->data;
skb->protocol = htons(ETH_P_MAP);
skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
trace_dfc_qmap(skb->data, skb->len, false);
dev_queue_xmit(skb);
}
static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
{
struct qmap_dfc_ind *cmd;
if (skb->len < sizeof(struct qmap_dfc_ind))
return QMAP_CMD_INVALID;
cmd = (struct qmap_dfc_ind *)skb->data;
if (cmd->tx_info_valid) {
memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
qmap_tx_ind.tx_status = cmd->tx_info;
qmap_tx_ind.bearer_info_valid = 1;
qmap_tx_ind.bearer_info_len = 1;
qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
/* Ignore grant since it is always 0 */
goto done;
}
memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
qmap_flow_ind.flow_status_valid = 1;
qmap_flow_ind.flow_status_len = 1;
qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
if (cmd->tcp_bidir) {
qmap_flow_ind.ancillary_info_valid = 1;
qmap_flow_ind.ancillary_info_len = 1;
qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
}
dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
done:
return QMAP_CMD_ACK;
}
static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
{
struct qmap_dfc_query_resp *cmd;
if (skb->len < sizeof(struct qmap_dfc_query_resp))
return QMAP_CMD_DONE;
cmd = (struct qmap_dfc_query_resp *)skb->data;
if (cmd->invalid)
return QMAP_CMD_DONE;
memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
qmap_flow_ind.flow_status_valid = 1;
qmap_flow_ind.flow_status_len = 1;
qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
if (cmd->tcp_bidir) {
qmap_flow_ind.ancillary_info_valid = 1;
qmap_flow_ind.ancillary_info_len = 1;
qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
}
dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
return QMAP_CMD_DONE;
}
static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
u8 bearer_id, u16 seq_num, u32 tx_id)
{
struct net_device *dev;
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
if (!dev)
return;
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
if (!qos)
return;
spin_lock_bh(&qos->qos_lock);
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
bearer->ack_req = 1;
bearer->ack_txid = tx_id;
} else {
dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
}
spin_unlock_bh(&qos->qos_lock);
}
static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
struct sk_buff *skb)
{
struct qmap_dfc_end_marker_req *cmd;
if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
return QMAP_CMD_INVALID;
cmd = (struct qmap_dfc_end_marker_req *)skb->data;
dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
return QMAP_CMD_DONE;
}
static void dfc_qmap_cmd_handler(struct sk_buff *skb)
{
struct qmap_cmd_hdr *cmd;
struct dfc_qmi_data *dfc;
int rc = QMAP_CMD_DONE;
if (!skb)
return;
trace_dfc_qmap(skb->data, skb->len, true);
if (skb->len < sizeof(struct qmap_cmd_hdr))
goto free_skb;
cmd = (struct qmap_cmd_hdr *)skb->data;
if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
goto free_skb;
if (cmd->cmd_name == QMAP_DFC_QUERY) {
if (cmd->cmd_type != QMAP_CMD_ACK)
goto free_skb;
} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
goto free_skb;
}
rcu_read_lock();
dfc = rcu_dereference(qmap_dfc_data);
if (!dfc || READ_ONCE(dfc->restart_state)) {
rcu_read_unlock();
goto free_skb;
}
switch (cmd->cmd_name) {
case QMAP_DFC_IND:
rc = dfc_qmap_handle_ind(dfc, skb);
qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
break;
case QMAP_DFC_QUERY:
rc = dfc_qmap_handle_query_resp(dfc, skb);
break;
case QMAP_DFC_END_MARKER:
rc = dfc_qmap_handle_end_marker_req(dfc, skb);
break;
default:
rc = QMAP_CMD_UNSUPPORTED;
}
/* Send ack */
if (rc != QMAP_CMD_DONE) {
cmd->cmd_type = rc;
if (cmd->cmd_name == QMAP_DFC_IND)
dfc_qmap_send_inband_ack(dfc, skb);
else
dfc_qmap_send_cmd(skb);
rcu_read_unlock();
return;
}
rcu_read_unlock();
free_skb:
kfree_skb(skb);
}
static void dfc_qmap_send_config(struct dfc_qmi_data *data)
{
struct sk_buff *skb;
struct qmap_dfc_config *dfc_config;
unsigned int len = sizeof(struct qmap_dfc_config);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_MAP);
dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
memset(dfc_config, 0, len);
dfc_config->hdr.cd_bit = 1;
dfc_config->hdr.mux_id = 0;
dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
dfc_config->cmd_ver = QMAP_DFC_VER;
dfc_config->cmd_id = QMAP_DFC_IND;
dfc_config->tx_info = 1;
dfc_config->ep_type = htonl(data->svc.ep_type);
dfc_config->iface_id = htonl(data->svc.iface_id);
dfc_qmap_send_cmd(skb);
}
static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
{
struct sk_buff *skb;
struct qmap_dfc_query *dfc_query;
unsigned int len = sizeof(struct qmap_dfc_query);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_MAP);
dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
memset(dfc_query, 0, len);
dfc_query->hdr.cd_bit = 1;
dfc_query->hdr.mux_id = mux_id;
dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
dfc_query->cmd_ver = QMAP_DFC_VER;
dfc_query->bearer_id = bearer_id;
dfc_qmap_send_cmd(skb);
}
static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
u8 bearer_id, u16 seq, u32 tx_id)
{
struct sk_buff *skb;
struct qmap_dfc_end_marker_cnf *em_cnf;
unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
memset(em_cnf, 0, len);
em_cnf->hdr.cd_bit = 1;
em_cnf->hdr.mux_id = qos->mux_id;
em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
em_cnf->hdr.tx_id = htonl(tx_id);
em_cnf->cmd_ver = QMAP_DFC_VER;
em_cnf->bearer_id = bearer_id;
em_cnf->seq_num = htons(seq);
skb->protocol = htons(ETH_P_MAP);
skb->dev = qos->real_dev;
/* This cmd needs to be sent in-band */
trace_dfc_qmap(skb->data, skb->len, false);
rmnet_map_tx_qmap_cmd(skb);
}
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
{
struct rmnet_bearer_map *bearer;
if (type == DFC_ACK_TYPE_DISABLE) {
bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
if (bearer)
dfc_qmap_send_end_marker_cnf(qos, bearer_id,
seq, bearer->ack_txid);
} else if (type == DFC_ACK_TYPE_THRESHOLD) {
dfc_qmap_send_query(qos->mux_id, bearer_id);
}
}
static struct rmnet_ctl_client_hooks cb = {
.ctl_dl_client_hook = dfc_qmap_cmd_handler,
};
int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi)
{
struct dfc_qmi_data *data;
if (!port || !qmi)
return -EINVAL;
data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->rmnet_port = port;
data->index = index;
memcpy(&data->svc, psvc, sizeof(data->svc));
qmi->dfc_clients[index] = (void *)data;
rcu_assign_pointer(qmap_dfc_data, data);
atomic_set(&qmap_txid, 0);
rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
if (!rmnet_ctl_handle)
pr_err("Failed to register with rmnet ctl\n");
trace_dfc_client_state_up(data->index, data->svc.instance,
data->svc.ep_type, data->svc.iface_id);
pr_info("DFC QMAP init\n");
dfc_qmap_send_config(data);
return 0;
}
void dfc_qmap_client_exit(void *dfc_data)
{
struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
if (!data) {
pr_err("%s() data is null\n", __func__);
return;
}
trace_dfc_client_state_down(data->index, 0);
rmnet_ctl_unregister_client(rmnet_ctl_handle);
WRITE_ONCE(data->restart_state, 1);
RCU_INIT_POINTER(qmap_dfc_data, NULL);
synchronize_rcu();
kfree(data);
pr_info("DFC QMAP exit\n");
}

1596
core/dfc_qmi.c Normal file

File diff suppressed because it is too large Load Diff

1032
core/qmi_rmnet.c Normal file

File diff suppressed because it is too large Load Diff

146
core/qmi_rmnet.h Normal file
View File

@@ -0,0 +1,146 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _QMI_RMNET_H
#define _QMI_RMNET_H
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#define CONFIG_QTI_QMI_RMNET 1
#define CONFIG_QTI_QMI_DFC 1
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
struct qmi_rmnet_ps_ind {
void (*ps_on_handler)(void *);
void (*ps_off_handler)(void *);
struct list_head list;
};
#ifdef CONFIG_QTI_QMI_RMNET
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
void qmi_rmnet_enable_all_flows(struct net_device *dev);
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
#else
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
{
}
static inline void
qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
{
}
static inline void
qmi_rmnet_enable_all_flows(struct net_device *dev)
{
}
static inline bool
qmi_rmnet_all_flows_enabled(struct net_device *dev)
{
return true;
}
#endif
#ifdef CONFIG_QTI_QMI_DFC
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
#else
static inline void *
qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
{
return NULL;
}
static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
{
}
static inline void
qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)
{
}
static inline int qmi_rmnet_get_queue(struct net_device *dev,
struct sk_buff *skb)
{
return 0;
}
#endif
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable);
void qmi_rmnet_work_init(void *port);
void qmi_rmnet_work_exit(void *port);
void qmi_rmnet_work_maybe_restart(void *port);
void qmi_rmnet_set_dl_msg_active(void *port);
bool qmi_rmnet_ignore_grant(void *port);
int qmi_rmnet_ps_ind_register(void *port,
struct qmi_rmnet_ps_ind *ps_ind);
int qmi_rmnet_ps_ind_deregister(void *port,
struct qmi_rmnet_ps_ind *ps_ind);
void qmi_rmnet_ps_off_notify(void *port);
void qmi_rmnet_ps_on_notify(void *port);
#else
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
{
return 0;
}
static inline void qmi_rmnet_work_init(void *port)
{
}
static inline void qmi_rmnet_work_exit(void *port)
{
}
static inline void qmi_rmnet_work_maybe_restart(void *port)
{
}
static inline void qmi_rmnet_set_dl_msg_active(void *port)
{
}
static inline bool qmi_rmnet_ignore_grant(void *port)
{
return false;
}
static inline int qmi_rmnet_ps_ind_register(void *port,
struct qmi_rmnet_ps_ind *ps_ind)
{
return 0;
}
static inline int qmi_rmnet_ps_ind_deregister(void *port,
struct qmi_rmnet_ps_ind *ps_ind)
{
return 0;
}
static inline void qmi_rmnet_ps_off_notify(void *port)
{
}
static inline void qmi_rmnet_ps_on_notify(void *port)
{
}
#endif
#endif /*_QMI_RMNET_H*/

203
core/qmi_rmnet_i.h Normal file
View File

@@ -0,0 +1,203 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_QMI_I_H
#define _RMNET_QMI_I_H
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#define IP_VER_4 4
#define IP_VER_6 6
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
#define DEFAULT_GRANT 1
#define DFC_MAX_BEARERS_V01 16
#define CONFIG_QTI_QMI_RMNET 1
#define CONFIG_QTI_QMI_DFC 1
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
struct rmnet_flow_map {
struct list_head list;
u8 bearer_id;
u32 flow_id;
int ip_type;
u32 tcm_handle;
};
struct rmnet_bearer_map {
struct list_head list;
u8 bearer_id;
int flow_ref;
u32 grant_size;
u32 grant_thresh;
u16 seq;
u8 ack_req;
u32 last_grant;
u16 last_seq;
bool tcp_bidir;
bool rat_switch;
bool tx_off;
};
struct svc_info {
u32 instance;
u32 ep_type;
u32 iface_id;
};
struct qos_info {
u8 mux_id;
struct net_device *real_dev;
struct list_head flow_head;
struct list_head bearer_head;
u32 default_grant;
u32 tran_num;
spinlock_t qos_lock;
};
struct flow_info {
struct net_device *dev;
struct rmnet_flow_map *itm;
};
struct qmi_info {
int flag;
void *wda_client;
void *wda_pending;
void *dfc_clients[MAX_CLIENT_NUM];
void *dfc_pending[MAX_CLIENT_NUM];
unsigned long ps_work_active;
bool ps_enabled;
bool dl_msg_active;
bool ps_ignore_grant;
};
enum data_ep_type_enum_v01 {
DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN,
DATA_EP_TYPE_RESERVED_V01 = 0x00,
DATA_EP_TYPE_HSIC_V01 = 0x01,
DATA_EP_TYPE_HSUSB_V01 = 0x02,
DATA_EP_TYPE_PCIE_V01 = 0x03,
DATA_EP_TYPE_EMBEDDED_V01 = 0x04,
DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX
};
struct data_ep_id_type_v01 {
enum data_ep_type_enum_v01 ep_type;
u32 iface_id;
};
extern struct qmi_elem_info data_ep_id_type_v01_ei[];
void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
#ifdef CONFIG_QTI_QMI_DFC
struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
u32 flow_id, int ip_type);
struct rmnet_bearer_map *
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
unsigned int qmi_rmnet_grant_per(unsigned int grant);
int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi);
void dfc_qmi_client_exit(void *dfc_data);
void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
void dfc_qmi_query_flow(void *dfc_data);
int dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
struct qos_info *qos);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
uint32_t flow_id, int ip_type)
{
return NULL;
}
static inline struct rmnet_bearer_map *
qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
{
return NULL;
}
static inline int
dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
struct qmi_info *qmi)
{
return -EINVAL;
}
static inline void dfc_qmi_client_exit(void *dfc_data)
{
}
static inline void
dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
int ip_type, u32 mark, unsigned int len)
{
}
static inline void
dfc_qmi_query_flow(void *dfc_data)
{
}
static inline int
dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
struct qos_info *qos)
{
return 0;
}
#endif
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
int
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
void wda_qmi_client_exit(void *wda_data);
int wda_set_powersave_mode(void *wda_data, u8 enable);
void qmi_rmnet_flush_ps_wq(void);
#else
static inline int
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
{
return -EINVAL;
}
static inline void wda_qmi_client_exit(void *wda_data)
{
}
static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
{
return -EINVAL;
}
static inline void qmi_rmnet_flush_ps_wq(void)
{
}
#endif
#endif /*_RMNET_QMI_I_H*/

738
core/rmnet_config.c Normal file
View File

@@ -0,0 +1,738 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET configuration engine
*
*/
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
#define CONFIG_QTI_QMI_RMNET 1
#define CONFIG_QTI_QMI_DFC 1
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
enum {
IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
IFLA_RMNET_UL_AGG_PARAMS,
__IFLA_RMNET_EXT_MAX,
};
static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = {
[IFLA_RMNET_MUX_ID] = {
.type = NLA_U16
},
[IFLA_RMNET_FLAGS] = {
.len = sizeof(struct ifla_rmnet_flags)
},
[IFLA_RMNET_DFC_QOS] = {
.len = sizeof(struct tcmsg)
},
[IFLA_RMNET_UL_AGG_PARAMS] = {
.len = sizeof(struct rmnet_egress_agg_params)
},
};
int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
EXPORT_SYMBOL(rmnet_is_real_dev_registered);
/* Needs rtnl lock */
static struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device *real_dev)
{
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
{
if (port->nr_rmnet_devs)
return -EINVAL;
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
rmnet_descriptor_deinit(port);
kfree(port);
netdev_rx_handler_unregister(real_dev);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
static int rmnet_register_real_device(struct net_device *real_dev)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
if (rmnet_is_real_dev_registered(real_dev))
return 0;
port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
return -ENOMEM;
port->dev = real_dev;
rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
if (rc) {
kfree(port);
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
rc = rmnet_descriptor_init(port);
if (rc) {
rmnet_descriptor_deinit(port);
return rc;
}
rmnet_map_tx_aggregate_init(port);
rmnet_map_cmd_init(port);
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
static void rmnet_unregister_bridge(struct net_device *dev,
struct rmnet_port *port)
{
struct rmnet_port *bridge_port;
struct net_device *bridge_dev;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
/* bridge slave handling */
if (!port->nr_rmnet_devs) {
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
bridge_port->bridge_ep = NULL;
bridge_port->rmnet_mode = RMNET_EPMODE_VND;
} else {
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
rmnet_unregister_real_device(bridge_dev, bridge_port);
}
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
struct net_device *real_dev;
int mode = RMNET_EPMODE_VND;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
int err = 0;
u16 mux_id;
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev)
return -ENODEV;
if (!data[IFLA_RMNET_MUX_ID])
return -EINVAL;
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
if (!ep)
return -ENOMEM;
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
err = rmnet_register_real_device(real_dev);
if (err)
goto err0;
port = rmnet_get_port_rtnl(real_dev);
err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
if (err)
goto err1;
port->rmnet_mode = mode;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
if (data[IFLA_RMNET_FLAGS]) {
struct ifla_rmnet_flags *flags;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
data_format = flags->flags & flags->mask;
}
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
port->data_format = data_format;
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
void *agg_params;
unsigned long irq_flags;
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
spin_lock_irqsave(&port->agg_lock, irq_flags);
memcpy(&port->egress_agg_params, agg_params,
sizeof(port->egress_agg_params));
spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}
return 0;
err1:
rmnet_unregister_real_device(real_dev, port);
err0:
kfree(ep);
return err;
}
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u8 mux_id;
real_dev = priv->real_dev;
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
return;
port = rmnet_get_port_rtnl(real_dev);
mux_id = rmnet_vnd_get_mux(dev);
ep = rmnet_get_endpoint(port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
rmnet_unregister_bridge(dev, port);
rmnet_vnd_dellink(mux_id, port, ep);
synchronize_rcu();
kfree(ep);
}
if (!port->nr_rmnet_devs)
qmi_rmnet_qmi_exit(port->qmi_info, port);
rmnet_unregister_real_device(real_dev, port);
unregister_netdevice_queue(dev, head);
}
static void rmnet_force_unassociate_device(struct net_device *dev)
{
struct net_device *real_dev = dev;
struct hlist_node *tmp_ep;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
if (!rmnet_is_real_dev_registered(real_dev))
return;
ASSERT_RTNL();
port = rmnet_get_port_rtnl(dev);
qmi_rmnet_qmi_exit(port->qmi_info, port);
rmnet_unregister_bridge(dev, port);
hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
unregister_netdevice_queue(ep->egress_dev, &list);
rmnet_vnd_dellink(ep->mux_id, port, ep);
hlist_del_init_rcu(&ep->hlnode);
synchronize_rcu();
kfree(ep);
}
unregister_netdevice_many(&list);
rmnet_unregister_real_device(real_dev, port);
}
static int rmnet_config_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
struct net_device *dev = netdev_notifier_info_to_dev(data);
if (!dev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
netdev_dbg(dev, "Kernel unregister\n");
rmnet_force_unassociate_device(dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block rmnet_dev_notifier __read_mostly = {
.notifier_call = rmnet_config_notify_cb,
};
static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct rmnet_egress_agg_params *agg_params;
u16 mux_id;
if (!data) {
return -EINVAL;
} else {
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
return -ERANGE;
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
if (agg_params->agg_time < 3000000)
return -EINVAL;
}
}
return 0;
}
static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
hlist_del_init_rcu(&ep->hlnode);
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
ep->mux_id = mux_id;
priv->mux_id = mux_id;
}
if (data[IFLA_RMNET_FLAGS]) {
struct ifla_rmnet_flags *flags;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
port->data_format = flags->flags & flags->mask;
}
if (data[IFLA_RMNET_DFC_QOS]) {
struct tcmsg *tcm;
tcm = nla_data(data[IFLA_RMNET_DFC_QOS]);
qmi_rmnet_change_link(dev, port, tcm);
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
void *agg_params;
unsigned long irq_flags;
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
spin_lock_irqsave(&port->agg_lock, irq_flags);
memcpy(&port->egress_agg_params, agg_params,
sizeof(port->egress_agg_params));
spin_unlock_irqrestore(&port->agg_lock, irq_flags);
}
return 0;
}
static size_t rmnet_get_size(const struct net_device *dev)
{
return
/* IFLA_RMNET_MUX_ID */
nla_total_size(2) +
/* IFLA_RMNET_FLAGS */
nla_total_size(sizeof(struct ifla_rmnet_flags)) +
/* IFLA_RMNET_DFC_QOS */
nla_total_size(sizeof(struct tcmsg)) +
/* IFLA_RMNET_UL_AGG_PARAMS */
nla_total_size(sizeof(struct rmnet_egress_agg_params));
}
static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct ifla_rmnet_flags f;
struct rmnet_port *port = NULL;
real_dev = priv->real_dev;
if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
goto nla_put_failure;
if (rmnet_is_real_dev_registered(real_dev)) {
port = rmnet_get_port_rtnl(real_dev);
f.flags = port->data_format;
} else {
f.flags = 0;
}
f.mask = ~0;
if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
goto nla_put_failure;
if (port) {
if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS,
sizeof(port->egress_agg_params),
&port->egress_agg_params))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.kind = "rmnet",
.maxtype = __IFLA_RMNET_EXT_MAX,
.priv_size = sizeof(struct rmnet_priv),
.setup = rmnet_vnd_setup,
.validate = rmnet_rtnl_validate,
.newlink = rmnet_newlink,
.dellink = rmnet_dellink,
.get_size = rmnet_get_size,
.changelink = rmnet_changelink,
.policy = rmnet_policy,
.fill_info = rmnet_fill_info,
};
/* Needs either rcu_read_lock() or rtnl lock */
struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
else
return NULL;
}
EXPORT_SYMBOL(rmnet_get_port);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
if (ep->mux_id == mux_id)
return ep;
}
return NULL;
}
EXPORT_SYMBOL(rmnet_get_endpoint);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
int err;
port = rmnet_get_port(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
*/
if (port->nr_rmnet_devs > 1)
return -EINVAL;
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
err = rmnet_register_real_device(slave_dev);
if (err)
return -EBUSY;
slave_port = rmnet_get_port(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
netdev_dbg(slave_dev, "registered with rmnet as slave\n");
return 0;
}
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
struct net_device *real_dev = priv->real_dev;
struct rmnet_port *port, *slave_port;
port = rmnet_get_port(real_dev);
port->rmnet_mode = RMNET_EPMODE_VND;
port->bridge_ep = NULL;
slave_port = rmnet_get_port(slave_dev);
rmnet_unregister_real_device(slave_dev, slave_port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
}
void *rmnet_get_qmi_pt(void *port)
{
if (port)
return ((struct rmnet_port *)port)->qmi_info;
return NULL;
}
EXPORT_SYMBOL(rmnet_get_qmi_pt);
void *rmnet_get_qos_pt(struct net_device *dev)
{
if (dev)
return rcu_dereference(
((struct rmnet_priv *)netdev_priv(dev))->qos_info);
return NULL;
}
EXPORT_SYMBOL(rmnet_get_qos_pt);
void *rmnet_get_rmnet_port(struct net_device *dev)
{
struct rmnet_priv *priv;
if (dev) {
priv = netdev_priv(dev);
return (void *)rmnet_get_port(priv->real_dev);
}
return NULL;
}
EXPORT_SYMBOL(rmnet_get_rmnet_port);
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
if (port) {
ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
if (ep)
return ep->egress_dev;
}
return NULL;
}
EXPORT_SYMBOL(rmnet_get_rmnet_dev);
void rmnet_reset_qmi_pt(void *port)
{
if (port)
((struct rmnet_port *)port)->qmi_info = NULL;
}
EXPORT_SYMBOL(rmnet_reset_qmi_pt);
void rmnet_init_qmi_pt(void *port, void *qmi)
{
if (port)
((struct rmnet_port *)port)->qmi_info = qmi;
}
EXPORT_SYMBOL(rmnet_init_qmi_pt);
void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
{
struct rmnet_priv *priv;
struct rmnet_pcpu_stats *ps;
unsigned int cpu, start;
struct rmnet_endpoint *ep;
unsigned long bkt;
if (!port || !tx || !rx)
return;
*tx = 0;
*rx = 0;
rcu_read_lock();
hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
priv = netdev_priv(ep->egress_dev);
for_each_possible_cpu(cpu) {
ps = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&ps->syncp);
*tx += ps->stats.tx_pkts;
*rx += ps->stats.rx_pkts;
} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
}
}
rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_get_packets);
void rmnet_set_powersave_format(void *port)
{
if (!port)
return;
((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
}
EXPORT_SYMBOL(rmnet_set_powersave_format);
void rmnet_clear_powersave_format(void *port)
{
if (!port)
return;
((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
}
EXPORT_SYMBOL(rmnet_clear_powersave_format);
void rmnet_enable_all_flows(void *port)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
if (unlikely(!port))
return;
rcu_read_lock();
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
qmi_rmnet_enable_all_flows(ep->egress_dev);
}
rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_enable_all_flows);
bool rmnet_all_flows_enabled(void *port)
{
struct rmnet_endpoint *ep;
unsigned long bkt;
bool ret = true;
if (unlikely(!port))
return true;
rcu_read_lock();
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
bkt, ep, hlnode) {
if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
ret = false;
goto out;
}
}
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(rmnet_all_flows_enabled);
int rmnet_get_powersave_notif(void *port)
{
if (!port)
return 0;
return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
}
EXPORT_SYMBOL(rmnet_get_powersave_notif);
/* Startup/Shutdown */
static int __init rmnet_init(void)
{
int rc;
WARN_ON(1);
rc = register_netdevice_notifier(&rmnet_dev_notifier);
if (rc != 0)
return rc;
rc = rtnl_link_register(&rmnet_link_ops);
if (rc != 0) {
unregister_netdevice_notifier(&rmnet_dev_notifier);
return rc;
}
return rc;
}
static void __exit rmnet_exit(void)
{
unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
}
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_LICENSE("GPL v2");

179
core/rmnet_config.h Normal file
View File

@@ -0,0 +1,179 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data configuration engine
*
*/
#include <linux/skbuff.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
#define RMNET_MAX_LOGICAL_EP 255
#define RMNET_MAX_VEID 4
struct rmnet_endpoint {
u8 mux_id;
struct net_device *egress_dev;
struct hlist_node hlnode;
};
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
u64 dl_hdr_last_trans_id;
u64 dl_hdr_last_seq;
u64 dl_hdr_last_bytes;
u64 dl_hdr_last_pkts;
u64 dl_hdr_last_flows;
u64 dl_hdr_count;
u64 dl_hdr_total_bytes;
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
};
struct rmnet_egress_agg_params {
u16 agg_size;
u16 agg_count;
u32 agg_time;
};
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
struct rmnet_port {
struct net_device *dev;
u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
void *rmnet_perf;
struct rmnet_egress_agg_params egress_agg_params;
/* Protect aggregation related elements */
spinlock_t agg_lock;
struct sk_buff *agg_skb;
int agg_state;
u8 agg_count;
struct timespec64 agg_time;
struct timespec64 agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
void *qmi_info;
/* dl marker elements */
struct list_head dl_list;
struct rmnet_port_priv_stats stats;
/* Descriptor pool */
spinlock_t desc_pool_lock;
struct rmnet_frag_descriptor_pool *frag_desc_pool;
};
extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_vnd_stats {
u64 rx_pkts;
u64 rx_bytes;
u64 tx_pkts;
u64 tx_bytes;
u32 tx_drops;
};
struct rmnet_pcpu_stats {
struct rmnet_vnd_stats stats;
struct u64_stats_sync syncp;
};
struct rmnet_coal_close_stats {
u64 non_coal;
u64 ip_miss;
u64 trans_miss;
u64 hw_nl;
u64 hw_pkt;
u64 hw_byte;
u64 hw_time;
u64 hw_evict;
u64 coal;
};
struct rmnet_coal_stats {
u64 coal_rx;
u64 coal_pkts;
u64 coal_hdr_nlo_err;
u64 coal_hdr_pkt_err;
u64 coal_csum_err;
u64 coal_reconstruct;
u64 coal_ip_invalid;
u64 coal_trans_invalid;
struct rmnet_coal_close_stats close;
u64 coal_veid[RMNET_MAX_VEID];
};
struct rmnet_priv_stats {
u64 csum_ok;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
u64 csum_err_invalid_ip_version;
u64 csum_err_invalid_transport;
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
struct rmnet_coal_stats coal;
};
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
struct rmnet_priv_stats stats;
void __rcu *qos_info;
};
enum rmnet_dl_marker_prio {
RMNET_PERF,
RMNET_SHS,
};
enum rmnet_trace_func {
RMNET_MODULE,
NW_STACK_MODULE,
};
enum rmnet_trace_evt {
RMNET_DLVR_SKB,
RMNET_RCV_FROM_PND,
RMNET_TX_UL_PKT,
NW_STACK_DEV_Q_XMIT,
NW_STACK_NAPI_GRO_FLUSH,
NW_STACK_RX,
NW_STACK_TX,
};
int rmnet_is_real_dev_registered(const struct net_device *real_dev);
struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
struct netlink_ext_ack *extack);
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev);
#endif /* _RMNET_CONFIG_H_ */

1087
core/rmnet_descriptor.c Normal file

File diff suppressed because it is too large Load Diff

147
core/rmnet_descriptor.h Normal file
View File

@@ -0,0 +1,147 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#ifndef _RMNET_DESCRIPTOR_H_
#define _RMNET_DESCRIPTOR_H_
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
struct rmnet_frag_descriptor_pool {
struct list_head free_list;
u32 pool_size;
};
struct rmnet_frag_descriptor {
struct list_head list;
struct list_head sub_frags;
skb_frag_t frag;
u8 *hdr_ptr;
struct net_device *dev;
u32 hash;
__be32 tcp_seq;
__be16 ip_id;
u16 data_offset;
u16 gso_size;
u16 gso_segs;
u16 ip_len;
u16 trans_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 csum_valid:1,
hdrs_valid:1,
ip_id_set:1,
tcp_seq_set:1,
flush_shs:1,
reserved:3;
};
/* Descriptor management */
struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port);
void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len);
int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
int start, u8 *nexthdrp, __be16 *fragp);
/* QMAP command packets */
void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port);
int rmnet_frag_flow_command(struct rmnet_map_header *qmap,
struct rmnet_port *port, u16 pkt_len);
/* Ingress data handlers */
void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list);
void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len);
void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
int rmnet_descriptor_init(struct rmnet_port *port);
void rmnet_descriptor_deinit(struct rmnet_port *port);
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
{
return skb_frag_address(&frag_desc->frag);
}
static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (size >= skb_frag_size(&frag_desc->frag)) {
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
frag_desc->frag.bv_offset += size;
skb_frag_size_sub(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (!size) {
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
if (size < skb_frag_size(&frag_desc->frag))
skb_frag_size_set(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
struct page *p, u32 page_offset, u32 len)
{
get_page(p);
__skb_frag_set_page(&frag_desc->frag, p);
skb_frag_size_set(&frag_desc->frag, len);
frag_desc->frag.bv_offset = page_offset;
}
static inline u8
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
#endif /* _RMNET_DESCRIPTOR_H_ */

448
core/rmnet_handlers.c Normal file
View File

@@ -0,0 +1,448 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sock.h>
#include <linux/tracepoint.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
#include "rmnet_map.h"
#include "rmnet_handlers.h"
#include "rmnet_descriptor.h"
#include "rmnet_qmi.h"
#include "qmi_rmnet.h"
#define RMNET_IP_VERSION_4 0x40
#define RMNET_IP_VERSION_6 0x60
#define CREATE_TRACE_POINTS
#include "rmnet_trace.h"
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
/* Helper Functions */
void rmnet_set_skb_proto(struct sk_buff *skb)
{
switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
case RMNET_IP_VERSION_4:
skb->protocol = htons(ETH_P_IP);
break;
case RMNET_IP_VERSION_6:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
skb->protocol = htons(ETH_P_MAP);
break;
}
}
EXPORT_SYMBOL(rmnet_set_skb_proto);
/* Shs hook handler */
int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
struct rmnet_port *port) __rcu __read_mostly;
EXPORT_SYMBOL(rmnet_shs_skb_entry);
/* Shs hook handler for work queue*/
int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
struct rmnet_port *port) __rcu __read_mostly;
EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
/* Generic handler */
void
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
{
int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
0xDEF, 0xDEF, (void *)skb, NULL);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
rmnet_vnd_rx_fixup(skb->dev, skb->len);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
rcu_read_lock();
rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
if (rmnet_shs_stamp) {
rmnet_shs_stamp(skb, port);
rcu_read_unlock();
return;
}
rcu_read_unlock();
netif_receive_skb(skb);
}
EXPORT_SYMBOL(rmnet_deliver_skb);
/* Important to note, port cannot be used here if it has gone stale */
void
rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
enum rmnet_packet_context ctx)
{
int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
struct rmnet_priv *priv = netdev_priv(skb->dev);
trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
0xDEF, 0xDEF, (void *)skb, NULL);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
rmnet_vnd_rx_fixup(skb->dev, skb->len);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
/* packets coming from work queue context due to packet flush timer
* must go through the special workqueue path in SHS driver
*/
rcu_read_lock();
rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
rcu_dereference(rmnet_shs_skb_entry_wq);
if (rmnet_shs_stamp) {
rmnet_shs_stamp(skb, port);
rcu_read_unlock();
return;
}
rcu_read_unlock();
if (ctx == RMNET_NET_RX_CTX)
netif_receive_skb(skb);
else
gro_cells_receive(&priv->gro_cells, skb);
}
EXPORT_SYMBOL(rmnet_deliver_skb_wq);
/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
struct rmnet_port *port)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(head))) {
rmnet_set_skb_proto(skb);
rmnet_deliver_skb(skb, port);
}
}
/* MAP handler */
static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct sk_buff_head list;
u16 len, pad;
u8 mux_id;
/* We don't need the spinlock since only we touch this */
__skb_queue_head_init(&list);
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
if (qmap->cd_bit) {
qmi_rmnet_set_dl_msg_active(port);
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
if (!rmnet_map_flow_command(skb, port, false))
return;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto free_skb;
skb->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_map_process_next_hdr_packet(skb, &list, len))
goto free_skb;
} else {
/* We only have the main QMAP header to worry about */
pskb_pull(skb, sizeof(*qmap));
rmnet_set_skb_proto(skb);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
pskb_trim(skb, len);
/* Push the single packet onto the list */
__skb_queue_tail(&list, skb);
}
if (port->data_format & RMNET_INGRESS_FORMAT_PS)
qmi_rmnet_work_maybe_restart(port);
rmnet_deliver_skb_list(&list, port);
return;
free_skb:
kfree_skb(skb);
}
int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
struct rmnet_port *port) __rcu __read_mostly;
EXPORT_SYMBOL(rmnet_perf_deag_entry);
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
struct rmnet_port *port);
if (skb->dev->type == ARPHRD_ETHER) {
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
kfree_skb(skb);
return;
}
skb_push(skb, ETH_HLEN);
}
if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
if (skb_is_nonlinear(skb)) {
rmnet_frag_ingress_handler(skb, port);
return;
}
}
/* No aggregation. Pass the frame on as is */
if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
__rmnet_map_ingress_handler(skb, port);
return;
}
/* Pass off handling to rmnet_perf module, if present */
rcu_read_lock();
rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
if (rmnet_perf_core_deaggregate) {
rmnet_perf_core_deaggregate(skb, port);
rcu_read_unlock();
return;
}
rcu_read_unlock();
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
__rmnet_map_ingress_handler(skbn, port);
if (skbn == skb)
goto next_skb;
}
consume_skb(skb);
next_skb:
skb = skb_frag;
}
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len, csum_type;
struct rmnet_map_header *map_header;
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
csum_type = 0;
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
required_headroom += additional_header_len;
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
port);
if (!map_header)
return -ENOMEM;
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
int non_linear_skb;
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
skb_is_nonlinear(skb);
if (non_linear_skb) {
if (unlikely(__skb_linearize(skb)))
goto done;
}
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
done:
skb->protocol = htons(ETH_P_MAP);
return 0;
}
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
}
/* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical
* endpoint is determined from packet inspection. Packet is then sent to the
* egress device listed in the logical endpoint configuration.
*/
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev;
if (!skb)
goto done;
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
0xDEF, 0xDEF, 0xDEF, NULL, NULL);
dev = skb->dev;
port = rmnet_get_port(dev);
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
break;
}
done:
return RX_HANDLER_CONSUMED;
}
EXPORT_SYMBOL(rmnet_rx_handler);
/* Modifies packet as per logical endpoint configuration and egress data format
* for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device.
*/
void rmnet_egress_handler(struct sk_buff *skb)
{
struct net_device *orig_dev;
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
int err;
u32 skb_len;
trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
0xDEF, (void *)skb, NULL);
sk_pacing_shift_update(skb->sk, 8);
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
if (!port)
goto drop;
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM)
goto drop;
else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);
return;
}
rmnet_vnd_tx_fixup(orig_dev, skb_len);
dev_queue_xmit(skb);
return;
drop:
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}

35
core/rmnet_handlers.h Normal file
View File

@@ -0,0 +1,35 @@
/* Copyright (c) 2013, 2016-2017, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#ifndef _RMNET_HANDLERS_H_
#define _RMNET_HANDLERS_H_
#include "rmnet_config.h"
enum rmnet_packet_context {
RMNET_NET_RX_CTX,
RMNET_WQ_CTX,
};
void rmnet_egress_handler(struct sk_buff *skb);
void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
enum rmnet_packet_context ctx);
void rmnet_set_skb_proto(struct sk_buff *skb);
rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
#endif /* _RMNET_HANDLERS_H_ */

289
core/rmnet_map.h Normal file
View File

@@ -0,0 +1,289 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#include <linux/skbuff.h>
#include "rmnet_config.h"
struct rmnet_map_control_command {
u8 command_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
u32 transaction_id;
union {
struct {
u16 ip_family:2;
u16 reserved:14;
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
u8 data[0];
};
} __aligned(1);
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
RMNET_MAP_COMMAND_FLOW_ENABLE,
RMNET_MAP_COMMAND_FLOW_START = 7,
RMNET_MAP_COMMAND_FLOW_END = 8,
/* These should always be the last 2 elements */
RMNET_MAP_COMMAND_UNKNOWN,
RMNET_MAP_COMMAND_ENUM_LENGTH
};
enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
enum rmnet_map_v5_close_type {
RMNET_MAP_COAL_CLOSE_NON_COAL,
RMNET_MAP_COAL_CLOSE_IP_MISS,
RMNET_MAP_COAL_CLOSE_TRANS_MISS,
RMNET_MAP_COAL_CLOSE_HW,
RMNET_MAP_COAL_CLOSE_COAL,
};
enum rmnet_map_v5_close_value {
RMNET_MAP_COAL_CLOSE_HW_NL,
RMNET_MAP_COAL_CLOSE_HW_PKT,
RMNET_MAP_COAL_CLOSE_HW_BYTE,
RMNET_MAP_COAL_CLOSE_HW_TIME,
RMNET_MAP_COAL_CLOSE_HW_EVICT,
};
/* Main QMAP header */
struct rmnet_map_header {
u8 pad_len:6;
u8 next_hdr:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);
struct rmnet_map_v5_nl_pair {
__be16 pkt_len;
u8 csum_error_bitmap;
u8 num_packets;
} __aligned(1);
/* NLO: Number-length object */
#define RMNET_MAP_V5_MAX_NLOS (6)
#define RMNET_MAP_V5_MAX_PACKETS (48)
struct rmnet_map_v5_coal_header {
u8 next_hdr:1;
u8 header_type:7;
u8 reserved1:4;
u8 num_nlos:3;
u8 csum_valid:1;
u8 close_type:4;
u8 close_value:4;
u8 reserved2:4;
u8 virtual_channel_id:4;
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);
/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
u8 valid:1;
u8 reserved2:7;
u16 csum_start_offset;
u16 csum_length;
__be16 csum_value;
} __aligned(1);
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ip4_ind:1;
u16 csum_enabled:1;
} __aligned(1);
struct rmnet_map_control_command_header {
u8 command_name;
u8 cmd_type:2;
u8 reserved:5;
u8 e:1;
u16 source_id:15;
u16 ext:1;
u32 transaction_id;
} __aligned(1);
struct rmnet_map_flow_info_le {
__be32 mux_id;
__be32 flow_id;
__be32 bytes;
__be32 pkts;
} __aligned(1);
struct rmnet_map_flow_info_be {
u32 mux_id;
u32 flow_id;
u32 bytes;
u32 pkts;
} __aligned(1);
struct rmnet_map_dl_ind_hdr {
union {
struct {
u32 seq;
u32 bytes;
u32 pkts;
u32 flows;
struct rmnet_map_flow_info_le flow[0];
} le __aligned(1);
struct {
__be32 seq;
__be32 bytes;
__be32 pkts;
__be32 flows;
struct rmnet_map_flow_info_be flow[0];
} be __aligned(1);
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind_trl {
union {
__be32 seq_be;
u32 seq_le;
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind {
u8 priority;
union {
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
union {
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
struct list_head list;
};
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
(Y)->data)->cd_bit)
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
(Y)->data)->pad_len)
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
((Y)->data + \
sizeof(struct rmnet_map_header)))
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
(Y)->data)->pkt_len))
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
#define RMNET_MAP_COMMAND_INVALID 3
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
{
/* Nonlinear packets we receive are entirely within frag 0 */
if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
return skb_frag_address(skb_shinfo(skb)->frags);
return skb->data;
}
static inline struct rmnet_map_control_command *
rmnet_map_get_cmd_start(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return (struct rmnet_map_control_command *)data;
}
static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dl_hdr);
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dl_hdr,
struct rmnet_map_control_command_header *qcmd);
void rmnet_map_dl_trl_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_trl *dltrl);
void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
struct rmnet_map_dl_ind_trl *dltrl,
struct rmnet_map_control_command_header *qcmd);
int rmnet_map_flow_command(struct sk_buff *skb,
struct rmnet_port *port,
bool rmnet_perf);
void rmnet_map_cmd_init(struct rmnet_port *port);
int rmnet_map_dl_ind_register(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind);
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind);
void rmnet_map_cmd_exit(struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

366
core/rmnet_map_command.c Normal file
View File

@@ -0,0 +1,366 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_header *qmap;
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
mux_id = qmap->mux_id;
cmd = rmnet_map_get_cmd_start(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
ep = rmnet_get_endpoint(port, mux_id);
if (!ep) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r) {
kfree_skb(skb);
return RMNET_MAP_COMMAND_UNSUPPORTED;
} else {
return RMNET_MAP_COMMAND_ACK;
}
}
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = skb->dev;
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pskb_trim(skb,
skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
void
rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dlhdr,
struct rmnet_map_control_command_header *qcmd)
{
struct rmnet_map_dl_ind *tmp;
list_for_each_entry(tmp, &port->dl_list, list)
tmp->dl_hdr_handler_v2(dlhdr, qcmd);
}
void
rmnet_map_dl_hdr_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dlhdr)
{
struct rmnet_map_dl_ind *tmp;
list_for_each_entry(tmp, &port->dl_list, list)
tmp->dl_hdr_handler(dlhdr);
}
void
rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
struct rmnet_map_dl_ind_trl *dltrl,
struct rmnet_map_control_command_header *qcmd)
{
struct rmnet_map_dl_ind *tmp;
list_for_each_entry(tmp, &port->dl_list, list)
tmp->dl_trl_handler_v2(dltrl, qcmd);
}
void
rmnet_map_dl_trl_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_trl *dltrl)
{
struct rmnet_map_dl_ind *tmp;
list_for_each_entry(tmp, &port->dl_list, list)
tmp->dl_trl_handler(dltrl);
}
static void rmnet_map_process_flow_start(struct sk_buff *skb,
struct rmnet_port *port,
bool rmnet_perf)
{
struct rmnet_map_dl_ind_hdr *dlhdr;
struct rmnet_map_control_command_header *qcmd;
u32 data_format;
bool is_dl_mark_v2;
if (skb->len < RMNET_DL_IND_HDR_SIZE)
return;
data_format = port->data_format;
is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
if (is_dl_mark_v2) {
pskb_pull(skb, sizeof(struct rmnet_map_header));
qcmd = (struct rmnet_map_control_command_header *)
rmnet_map_data_ptr(skb);
port->stats.dl_hdr_last_ep_id = qcmd->source_id;
port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
} else {
pskb_pull(skb, RMNET_MAP_CMD_SIZE);
}
dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
port->stats.dl_hdr_last_seq = dlhdr->le.seq;
port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
port->stats.dl_hdr_last_flows = dlhdr->le.flows;
port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
port->stats.dl_hdr_count++;
if (is_dl_mark_v2)
rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
else
rmnet_map_dl_hdr_notify(port, dlhdr);
if (rmnet_perf) {
unsigned int pull_size;
pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
pskb_pull(skb, pull_size);
}
}
static void rmnet_map_process_flow_end(struct sk_buff *skb,
struct rmnet_port *port,
bool rmnet_perf)
{
struct rmnet_map_dl_ind_trl *dltrl;
struct rmnet_map_control_command_header *qcmd;
u32 data_format;
bool is_dl_mark_v2;
if (skb->len < RMNET_DL_IND_TRL_SIZE)
return;
data_format = port->data_format;
is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
if (is_dl_mark_v2) {
pskb_pull(skb, sizeof(struct rmnet_map_header));
qcmd = (struct rmnet_map_control_command_header *)
rmnet_map_data_ptr(skb);
pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
} else {
pskb_pull(skb, RMNET_MAP_CMD_SIZE);
}
dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
port->stats.dl_trl_last_seq = dltrl->seq_le;
port->stats.dl_trl_count++;
if (is_dl_mark_v2)
rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
else
rmnet_map_dl_trl_notify(port, dltrl);
if (rmnet_perf) {
unsigned int pull_size;
pull_size = sizeof(struct rmnet_map_dl_ind_trl);
if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
pskb_pull(skb, pull_size);
}
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = rmnet_map_get_cmd_start(skb);
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_map_do_flow_control(skb, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_map_do_flow_control(skb, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
kfree_skb(skb);
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc, port);
}
int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
bool rmnet_perf)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
cmd = rmnet_map_get_cmd_start(skb);
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_START:
rmnet_map_process_flow_start(skb, port, rmnet_perf);
break;
case RMNET_MAP_COMMAND_FLOW_END:
rmnet_map_process_flow_end(skb, port, rmnet_perf);
break;
default:
return 1;
}
/* rmnet_perf module will handle the consuming */
if (!rmnet_perf)
consume_skb(skb);
return 0;
}
EXPORT_SYMBOL(rmnet_map_flow_command);
void rmnet_map_cmd_exit(struct rmnet_port *port)
{
struct rmnet_map_dl_ind *tmp, *idx;
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
list_del_rcu(&tmp->list);
}
void rmnet_map_cmd_init(struct rmnet_port *port)
{
INIT_LIST_HEAD(&port->dl_list);
}
int rmnet_map_dl_ind_register(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind)
{
struct rmnet_map_dl_ind *dl_ind_iterator;
bool empty_ind_list = true;
if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
!dl_ind->dl_trl_handler)
return -EINVAL;
list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
empty_ind_list = false;
if (dl_ind_iterator->priority < dl_ind->priority) {
if (dl_ind_iterator->list.next) {
if (dl_ind->priority
< list_entry_rcu(dl_ind_iterator->list.next,
typeof(*dl_ind_iterator), list)->priority) {
list_add_rcu(&dl_ind->list,
&dl_ind_iterator->list);
break;
}
} else {
list_add_rcu(&dl_ind->list,
&dl_ind_iterator->list);
break;
}
} else {
list_add_tail_rcu(&dl_ind->list,
&dl_ind_iterator->list);
break;
}
}
if (empty_ind_list)
list_add_rcu(&dl_ind->list, &port->dl_list);
return 0;
}
EXPORT_SYMBOL(rmnet_map_dl_ind_register);
int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind)
{
struct rmnet_map_dl_ind *tmp;
if (!port || !dl_ind)
return -EINVAL;
list_for_each_entry(tmp, &port->dl_list, list) {
if (tmp == dl_ind) {
list_del_rcu(&dl_ind->list);
goto done;
}
}
done:
return 0;
}
EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);

1245
core/rmnet_map_data.c Normal file

File diff suppressed because it is too large Load Diff

42
core/rmnet_private.h Normal file
View File

@@ -0,0 +1,42 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_PRIVATE_H_
#define _RMNET_PRIVATE_H_
#define RMNET_MAX_PACKET_SIZE 16384
#define RMNET_DFLT_PACKET_SIZE 1500
#define RMNET_NEEDED_HEADROOM 16
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29)
#define RMNET_FLAGS_INGRESS_COALESCE BIT(4)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 BIT(5)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 BIT(6)
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
/* Power save feature*/
#define RMNET_INGRESS_FORMAT_PS BIT(27)
#define RMNET_FORMAT_PS_NOTIF BIT(26)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
#define RMNET_EPMODE_BRIDGE (2)
#endif /* _RMNET_PRIVATE_H_ */

34
core/rmnet_qmi.h Normal file
View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_QMI_H
#define _RMNET_QMI_H
#include <linux/netdevice.h>
#include <linux/skbuff.h>
void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
void *rmnet_get_qmi_pt(void *port);
void *rmnet_get_qos_pt(struct net_device *dev);
void *rmnet_get_rmnet_port(struct net_device *dev);
struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id);
void rmnet_reset_qmi_pt(void *port);
void rmnet_init_qmi_pt(void *port, void *qmi);
void rmnet_enable_all_flows(void *port);
bool rmnet_all_flows_enabled(void *port);
void rmnet_set_powersave_format(void *port);
void rmnet_clear_powersave_format(void *port);
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
int rmnet_get_powersave_notif(void *port);
#endif /*_RMNET_QMI_H*/

253
core/rmnet_trace.h Normal file
View File

@@ -0,0 +1,253 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rmnet
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
#define TRACE_INCLUDE_FILE rmnet_trace
#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RMNET_H
#include <linux/skbuff.h>
#include <linux/tracepoint.h>
/*****************************************************************************/
/* Trace events for rmnet module */
/*****************************************************************************/
TRACE_EVENT(rmnet_xmit_skb,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb),
TP_STRUCT__entry(
__string(dev_name, skb->dev->name)
__field(unsigned int, len)
),
TP_fast_assign(
__assign_str(dev_name, skb->dev->name);
__entry->len = skb->len;
),
TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len)
);
DECLARE_EVENT_CLASS
(rmnet_mod_template,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
TP_STRUCT__entry(__field(u8, func)
__field(u8, evt)
__field(u32, uint1)
__field(u32, uint2)
__field(u64, ulong1)
__field(u64, ulong2)
__field(void *, ptr1)
__field(void *, ptr2)
),
TP_fast_assign(__entry->func = func;
__entry->evt = evt;
__entry->uint1 = uint1;
__entry->uint2 = uint2;
__entry->ulong1 = ulong1;
__entry->ulong2 = ulong2;
__entry->ptr1 = ptr1;
__entry->ptr2 = ptr2;
),
TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK",
__entry->func, __entry->evt,
__entry->uint1, __entry->uint2,
__entry->ulong1, __entry->ulong2,
__entry->ptr1, __entry->ptr2)
)
DEFINE_EVENT
(rmnet_mod_template, rmnet_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_perf module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_perf_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
/*****************************************************************************/
/* Trace events for rmnet_shs module */
/*****************************************************************************/
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_low,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_high,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DEFINE_EVENT
(rmnet_mod_template, rmnet_shs_wq_err,
TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
);
DECLARE_EVENT_CLASS
(rmnet_freq_template,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq),
TP_STRUCT__entry(__field(u8, core)
__field(u32, newfreq)
),
TP_fast_assign(__entry->core = core;
__entry->newfreq = newfreq;
),
TP_printk("freq policy core:%u freq floor :%u",
__entry->core, __entry->newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_boost,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
DEFINE_EVENT
(rmnet_freq_template, rmnet_freq_reset,
TP_PROTO(u8 core, u32 newfreq),
TP_ARGS(core, newfreq)
);
TRACE_EVENT
(rmnet_freq_update,
TP_PROTO(u8 core, u32 lowfreq, u32 highfreq),
TP_ARGS(core, lowfreq, highfreq),
TP_STRUCT__entry(__field(u8, core)
__field(u32, lowfreq)
__field(u32, highfreq)
),
TP_fast_assign(__entry->core = core;
__entry->lowfreq = lowfreq;
__entry->highfreq = highfreq;
),
TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u",
__entry->core, __entry->lowfreq, __entry->highfreq)
);
#endif /* _TRACE_RMNET_H */
#include <trace/define_trace.h>

399
core/rmnet_vnd.c Normal file
View File

@@ -0,0 +1,399 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* RMNET Data virtual network driver
*
*/
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <net/pkt_sched.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_vnd.h"
#include "qmi_rmnet.h"
#include "rmnet_qmi.h"
#include "rmnet_trace.h"
/* RX/TX Fixup */
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.rx_pkts++;
pcpu_ptr->stats.rx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
pcpu_ptr->stats.tx_bytes += skb_len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
/* Network Device Operations */
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct rmnet_priv *priv;
int ip_type;
u32 mark;
unsigned int len;
priv = netdev_priv(dev);
if (priv->real_dev) {
ip_type = (ip_hdr(skb)->version == 4) ?
AF_INET : AF_INET6;
mark = skb->mark;
len = skb->len;
trace_rmnet_xmit_skb(skb);
rmnet_egress_handler(skb);
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));
} else {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}
return NETDEV_TX_OK;
}
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
{
if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
return -EINVAL;
rmnet_dev->mtu = new_mtu;
return 0;
}
static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
return priv->real_dev->ifindex;
}
static int rmnet_vnd_init(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
int err;
priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
if (!priv->pcpu_stats)
return -ENOMEM;
err = gro_cells_init(&priv->gro_cells, dev);
if (err) {
free_percpu(priv->pcpu_stats);
return err;
}
return 0;
}
static void rmnet_vnd_uninit(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
void *qos;
gro_cells_destroy(&priv->gro_cells);
free_percpu(priv->pcpu_stats);
qos = priv->qos_info;
RCU_INIT_POINTER(priv->qos_info, NULL);
synchronize_rcu();
qmi_rmnet_qos_exit(dev, qos);
}
static void rmnet_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_vnd_stats total_stats;
struct rmnet_pcpu_stats *pcpu_ptr;
unsigned int cpu, start;
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
for_each_possible_cpu(cpu) {
pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
}
s->rx_packets = total_stats.rx_pkts;
s->rx_bytes = total_stats.rx_bytes;
s->tx_packets = total_stats.tx_pkts;
s->tx_bytes = total_stats.tx_bytes;
s->tx_dropped = total_stats.tx_drops;
}
static u16 rmnet_vnd_select_queue(struct net_device *dev,
struct sk_buff *skb,
struct net_device *sb_dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
int txq = 0;
if (priv->real_dev)
txq = qmi_rmnet_get_queue(dev, skb);
return (txq < dev->real_num_tx_queues) ? txq : 0;
}
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
.ndo_get_iflink = rmnet_vnd_get_iflink,
.ndo_add_slave = rmnet_add_bridge,
.ndo_del_slave = rmnet_del_bridge,
.ndo_init = rmnet_vnd_init,
.ndo_uninit = rmnet_vnd_uninit,
.ndo_get_stats64 = rmnet_get_stats64,
.ndo_select_queue = rmnet_vnd_select_queue,
};
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
"Checksum error bad ip version",
"Checksum error bad transport",
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
"Checksum computed in hardware",
"Coalescing packets received",
"Coalesced packets",
"Coalescing header NLO errors",
"Coalescing header pcount errors",
"Coalescing checksum errors",
"Coalescing packet reconstructs",
"Coalescing IP version invalid",
"Coalescing L4 header invalid",
"Coalescing close Non-coalescable",
"Coalescing close L3 mismatch",
"Coalescing close L4 mismatch",
"Coalescing close HW NLO limit",
"Coalescing close HW packet limit",
"Coalescing close HW byte limit",
"Coalescing close HW time limit",
"Coalescing close HW eviction",
"Coalescing close Coalescable",
"Coalescing packets over VEID0",
"Coalescing packets over VEID1",
"Coalescing packets over VEID2",
"Coalescing packets over VEID3",
};
static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"MAP Cmd last version",
"MAP Cmd last ep id",
"MAP Cmd last transaction id",
"DL header last seen sequence",
"DL header last seen bytes",
"DL header last seen packets",
"DL header last seen flows",
"DL header pkts received",
"DL header total bytes received",
"DL header total pkts received",
"DL trailer last seen sequence",
"DL trailer pkts received",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &rmnet_gstrings_stats,
sizeof(rmnet_gstrings_stats));
memcpy(buf + sizeof(rmnet_gstrings_stats),
&rmnet_port_gstrings_stats,
sizeof(rmnet_port_gstrings_stats));
break;
}
}
static int rmnet_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(rmnet_gstrings_stats) +
ARRAY_SIZE(rmnet_port_gstrings_stats);
default:
return -EOPNOTSUPP;
}
}
static void rmnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_priv_stats *st = &priv->stats;
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!data || !port)
return;
stp = &port->stats;
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp,
ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
}
static int rmnet_stats_reset(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_port_priv_stats *stp;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
if (!port)
return -EINVAL;
stp = &port->stats;
memset(stp, 0, sizeof(*stp));
return 0;
}
static const struct ethtool_ops rmnet_ethtool_ops = {
.get_ethtool_stats = rmnet_get_ethtool_stats,
.get_strings = rmnet_get_strings,
.get_sset_count = rmnet_get_sset_count,
.nway_reset = rmnet_stats_reset,
};
/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
* flags, ARP type, needed headroom, etc...
*/
void rmnet_vnd_setup(struct net_device *rmnet_dev)
{
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
random_ether_addr(rmnet_dev->dev_addr);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
rmnet_dev->header_ops = NULL; /* No header */
rmnet_dev->type = ARPHRD_RAWIP;
rmnet_dev->hard_header_len = 0;
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
}
/* Exposed API */
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep)
{
struct rmnet_priv *priv = netdev_priv(rmnet_dev);
int rc;
if (ep->egress_dev)
return -EINVAL;
if (rmnet_get_endpoint(port, id))
return -EBUSY;
rmnet_dev->hw_features = NETIF_F_RXCSUM;
rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
rmnet_dev->hw_features |= NETIF_F_SG;
rmnet_dev->hw_features |= NETIF_F_GRO_HW;
priv->real_dev = real_dev;
rc = register_netdevice(rmnet_dev);
if (!rc) {
ep->egress_dev = rmnet_dev;
ep->mux_id = id;
port->nr_rmnet_devs++;
rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
priv->mux_id = id;
priv->qos_info = qmi_rmnet_qos_init(real_dev, id);
netdev_dbg(rmnet_dev, "rmnet dev created\n");
}
return rc;
}
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep)
{
if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
return -EINVAL;
ep->egress_dev = NULL;
port->nr_rmnet_devs--;
return 0;
}
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
{
struct rmnet_priv *priv;
priv = netdev_priv(rmnet_dev);
return priv->mux_id;
}
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
/* Although we expect similar number of enable/disable
* commands, optimize for the disable. That is more
* latency sensitive than enable
*/
if (unlikely(enable))
netif_wake_queue(rmnet_dev);
else
netif_stop_queue(rmnet_dev);
return 0;
}

30
core/rmnet_vnd.h Normal file
View File

@@ -0,0 +1,30 @@
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data Virtual Network Device APIs
*
*/
#ifndef _RMNET_VND_H_
#define _RMNET_VND_H_
int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
struct rmnet_port *port,
struct net_device *real_dev,
struct rmnet_endpoint *ep);
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */

77
core/wda.h Normal file
View File

@@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM wda
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
#define TRACE_INCLUDE_FILE wda
#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_WDA_H
#include <linux/tracepoint.h>
TRACE_EVENT(wda_set_powersave_mode,
TP_PROTO(int enable),
TP_ARGS(enable),
TP_STRUCT__entry(
__field(int, enable)
),
TP_fast_assign(
__entry->enable = enable;
),
TP_printk("set powersave mode to %s",
__entry->enable ? "enable" : "disable")
);
TRACE_EVENT(wda_client_state_up,
TP_PROTO(u32 instance, u32 ep_type, u32 iface),
TP_ARGS(instance, ep_type, iface),
TP_STRUCT__entry(
__field(u32, instance)
__field(u32, ep_type)
__field(u32, iface)
),
TP_fast_assign(
__entry->instance = instance;
__entry->ep_type = ep_type;
__entry->iface = iface;
),
TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u",
__entry->instance, __entry->ep_type, __entry->iface)
);
TRACE_EVENT(wda_client_state_down,
TP_PROTO(int from_cb),
TP_ARGS(from_cb),
TP_STRUCT__entry(
__field(int, from_cb)
),
TP_fast_assign(
__entry->from_cb = from_cb;
),
TP_printk("Client: Connection with WDA lost Exit by callback %d",
__entry->from_cb)
);
#endif /* _TRACE_WDA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

458
core/wda_qmi.c Normal file
View File

@@ -0,0 +1,458 @@
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/rtnetlink.h>
#include <linux/soc/qcom/qmi.h>
#include "rmnet_qmi.h"
#define CREATE_TRACE_POINTS
#include "wda.h"
#undef CREATE_TRACE_POINTS
#include "qmi_rmnet_i.h"
struct wda_qmi_data {
void *rmnet_port;
struct workqueue_struct *wda_wq;
struct work_struct svc_arrive;
struct qmi_handle handle;
struct sockaddr_qrtr ssctl;
struct svc_info svc;
int restart_state;
};
static void wda_svc_config(struct work_struct *work);
/* **************************************************** */
#define WDA_SERVICE_ID_V01 0x1A
#define WDA_SERVICE_VERS_V01 0x01
#define WDA_TIMEOUT_JF msecs_to_jiffies(1000)
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 4
#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
enum wda_powersave_config_mask_enum_v01 {
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00,
WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01,
WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02,
WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF,
WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647
};
struct wda_set_powersave_config_req_msg_v01 {
/* Mandatory */
struct data_ep_id_type_v01 ep_id;
/* Optional */
uint8_t req_data_cfg_valid;
enum wda_powersave_config_mask_enum_v01 req_data_cfg;
};
struct wda_set_powersave_config_resp_msg_v01 {
/* Mandatory */
struct qmi_response_type_v01 resp;
/* Optional */
uint8_t data_cfg_valid;
enum wda_powersave_config_mask_enum_v01 data_cfg;
};
struct wda_set_powersave_mode_req_msg_v01 {
/* Mandatory */
uint8_t powersave_control_flag;
};
struct wda_set_powersave_mode_resp_msg_v01 {
/* Mandatory */
struct qmi_response_type_v01 resp;
};
static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct data_ep_id_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct
wda_set_powersave_config_req_msg_v01,
ep_id),
.ei_array = data_ep_id_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct
wda_set_powersave_config_req_msg_v01,
req_data_cfg_valid),
.ei_array = NULL,
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum
wda_powersave_config_mask_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct
wda_set_powersave_config_req_msg_v01,
req_data_cfg),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct
wda_set_powersave_config_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct
wda_set_powersave_config_resp_msg_v01,
data_cfg_valid),
.ei_array = NULL,
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum
wda_powersave_config_mask_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct
wda_set_powersave_config_resp_msg_v01,
data_cfg),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct
wda_set_powersave_mode_req_msg_v01,
powersave_control_flag),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct
wda_set_powersave_mode_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
{
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
struct wda_set_powersave_mode_resp_msg_v01 *resp;
struct wda_set_powersave_mode_req_msg_v01 *req;
struct qmi_txn txn;
int ret;
if (!data || !data->rmnet_port)
return -EINVAL;
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
if (!resp) {
kfree(req);
return -ENOMEM;
}
ret = qmi_txn_init(&data->handle, &txn,
wda_set_powersave_mode_resp_msg_v01_ei, resp);
if (ret < 0) {
pr_err("%s() Failed init for response, err: %d\n",
__func__, ret);
goto out;
}
req->powersave_control_flag = enable;
ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
wda_set_powersave_mode_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
pr_err("%s() Failed sending request, err: %d\n",
__func__, ret);
goto out;
}
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
pr_err("%s() Request rejected, result: %d, err: %d\n",
__func__, resp->resp.result, resp->resp.error);
ret = -resp->resp.result;
}
out:
kfree(resp);
kfree(req);
return ret;
}
static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
{
struct wda_qmi_data *data = container_of(wda_handle,
struct wda_qmi_data, handle);
struct wda_set_powersave_config_resp_msg_v01 *resp;
struct wda_set_powersave_config_req_msg_v01 *req;
struct qmi_txn txn;
int ret;
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
if (!resp) {
kfree(req);
return -ENOMEM;
}
ret = qmi_txn_init(wda_handle, &txn,
wda_set_powersave_config_resp_msg_v01_ei, resp);
if (ret < 0) {
pr_err("%s() Failed init for response, err: %d\n",
__func__, ret);
goto out;
}
req->ep_id.ep_type = data->svc.ep_type;
req->ep_id.iface_id = data->svc.iface_id;
req->req_data_cfg_valid = 1;
req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
wda_set_powersave_config_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
pr_err("%s() Failed sending request, err: %d\n", __func__, ret);
goto out;
}
ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
pr_err("%s() Request rejected, result: %d, error: %d\n",
__func__, resp->resp.result, resp->resp.error);
ret = -resp->resp.result;
}
out:
kfree(resp);
kfree(req);
return ret;
}
static void wda_svc_config(struct work_struct *work)
{
struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
svc_arrive);
struct qmi_info *qmi;
int rc;
if (data->restart_state == 1)
return;
rc = wda_set_powersave_config_req(&data->handle);
if (rc < 0) {
pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
return;
}
if (data->restart_state == 1)
return;
while (!rtnl_trylock()) {
if (!data->restart_state)
cond_resched();
else
return;
}
qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
if (!qmi) {
rtnl_unlock();
return;
}
qmi->wda_pending = NULL;
qmi->wda_client = (void *)data;
trace_wda_client_state_up(data->svc.instance,
data->svc.ep_type,
data->svc.iface_id);
rtnl_unlock();
pr_info("Connection established with the WDA Service\n");
}
static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
{
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
handle);
data->ssctl.sq_family = AF_QIPCRTR;
data->ssctl.sq_node = svc->node;
data->ssctl.sq_port = svc->port;
queue_work(data->wda_wq, &data->svc_arrive);
return 0;
}
static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
{
struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
handle);
if (!data)
pr_info("%s() data is null\n", __func__);
}
static struct qmi_ops server_ops = {
.new_server = wda_svc_arrive,
.del_server = wda_svc_exit,
};
int
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
{
struct wda_qmi_data *data;
int rc = -ENOMEM;
if (!port || !qmi)
return -EINVAL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->wda_wq = create_singlethread_workqueue("wda_wq");
if (!data->wda_wq) {
pr_err("%s Could not create workqueue\n", __func__);
goto err0;
}
data->rmnet_port = port;
data->restart_state = 0;
memcpy(&data->svc, psvc, sizeof(data->svc));
INIT_WORK(&data->svc_arrive, wda_svc_config);
rc = qmi_handle_init(&data->handle,
QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN,
&server_ops, NULL);
if (rc < 0) {
pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
goto err1;
}
rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
WDA_SERVICE_VERS_V01, psvc->instance);
if (rc < 0) {
pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
goto err2;
}
qmi->wda_pending = (void *)data;
return 0;
err2:
qmi_handle_release(&data->handle);
err1:
destroy_workqueue(data->wda_wq);
err0:
kfree(data);
return rc;
}
void wda_qmi_client_exit(void *wda_data)
{
struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
if (!data) {
pr_info("%s() data is null\n", __func__);
return;
}
data->restart_state = 1;
trace_wda_client_state_down(0);
qmi_handle_release(&data->handle);
destroy_workqueue(data->wda_wq);
kfree(data);
}
int wda_set_powersave_mode(void *wda_data, uint8_t enable)
{
trace_wda_set_powersave_mode(enable);
return wda_set_powersave_mode_req(wda_data, enable);
}

View File

@@ -0,0 +1,8 @@
#Build rmnet core
DATA_DLKM_BOARD_PLATFORMS_LIST := lahaina
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_core.ko
endif
endif