diff --git a/core/Android.mk b/core/Android.mk new file mode 100644 index 0000000000..3c9aaa72ec --- /dev/null +++ b/core/Android.mk @@ -0,0 +1,26 @@ +ifneq ($(TARGET_PRODUCT),qssi) +RMNET_CORE_DLKM_PLATFORMS_LIST := lahaina + +ifeq ($(call is-board-platform-in-list, $(RMNET_CORE_DLKM_PLATFORMS_LIST)),true) +#Make file to create RMNET_CORE DLKM +LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror +LOCAL_CLANG :=true + +LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT) +LOCAL_MODULE := rmnet_core.ko + +LOCAL_SRC_FILES := rmnet_config.c rmnet_descriptor.c rmnet_handlers.c rmnet_map_command.c rmnet_map_data.c rmnet_vnd.c dfc_qmi.c qmi_rmnet.c wda_qmi.c + +RMNET_BLD_DIR := ../../vendor/qcom/opensource/datarmnet/core +DLKM_DIR := $(TOP)/device/qcom/common/dlkm + +KBUILD_OPTIONS := $(RMNET_BLD_DIR) + +$(warning $(DLKM_DIR)) +include $(DLKM_DIR)/AndroidKernelModule.mk + +endif #End of Check for target +endif #End of Check for qssi target diff --git a/core/Kbuild b/core/Kbuild new file mode 100644 index 0000000000..d63e0ab4f6 --- /dev/null +++ b/core/Kbuild @@ -0,0 +1,5 @@ +obj-m += rmnet_core.o +rmnet_core-y := rmnet_config.o rmnet_handlers.o rmnet_descriptor.o rmnet_map_command.o rmnet_map_data.o rmnet_vnd.o rmnet_descriptor.o\ + qmi_rmnet.o wda_qmi.o dfc_qmi.o + + diff --git a/core/Kconfig b/core/Kconfig new file mode 100644 index 0000000000..b6356c1ee2 --- /dev/null +++ b/core/Kconfig @@ -0,0 +1,12 @@ +# +# RMNET MAP driver +# + +menuconfig RMNET_CORE + default m + select GRO_CELLS + ---help--- + If you select this, you will enable the RMNET module which is used + for handling data in the multiplexing and aggregation protocol (MAP) + format in the embedded data path. RMNET devices can be attached to + any IP mode physical device. diff --git a/core/Makefile b/core/Makefile new file mode 100644 index 0000000000..ed85c84a41 --- /dev/null +++ b/core/Makefile @@ -0,0 +1,13 @@ +KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build + +KBUILD_OPTIONS := RMNET_CORE_ROOT=$(PWD) +KBUILD_OPTIONS += MODNAME?=rmnet_core + +all: + $(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules $(KBUILD_OPTIONS) + +modules_install: + $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install + +clean: + $(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean diff --git a/core/dfc.h b/core/dfc.h new file mode 100644 index 0000000000..48a20935fb --- /dev/null +++ b/core/dfc.h @@ -0,0 +1,254 @@ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dfc +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core +#define TRACE_INCLUDE_FILE dfc + +#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DFC_H + +#include + +TRACE_EVENT(dfc_qmi_tc, + + TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant, + int qlen, u32 tcm_handle, int enable), + + TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable), + + TP_STRUCT__entry( + __string(dev_name, name) + __field(u8, bid) + __field(u32, fid) + __field(u32, grant) + __field(int, qlen) + __field(u32, tcm_handle) + __field(int, enable) + ), + + TP_fast_assign( + __assign_str(dev_name, name); + __entry->bid = bearer_id; + __entry->fid = flow_id; + __entry->grant = grant; + __entry->qlen = qlen; + __entry->tcm_handle = tcm_handle; + __entry->enable = enable; + ), + + TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s", + __get_str(dev_name), + __entry->bid, __entry->grant, __entry->qlen, __entry->fid, + __entry->tcm_handle, + __entry->enable ? "enable" : "disable") +); + +TRACE_EVENT(dfc_flow_ind, + + TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant, + u16 seq_num, u8 ack_req, u32 ancillary), + + TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req, + ancillary), + + TP_STRUCT__entry( + __field(int, src) + __field(int, idx) + __field(u8, mid) + __field(u8, bid) + __field(u32, grant) + __field(u16, seq) + __field(u8, ack_req) + __field(u32, ancillary) + ), + + TP_fast_assign( + __entry->src = src; + __entry->idx = idx; + __entry->mid = mux_id; + __entry->bid = bearer_id; + __entry->grant = grant; + __entry->seq = seq_num; + __entry->ack_req = ack_req; + __entry->ancillary = ancillary; + ), + + TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u", + __entry->src, __entry->idx, __entry->mid, __entry->bid, + __entry->grant, __entry->seq, __entry->ack_req, + __entry->ancillary) +); + +TRACE_EVENT(dfc_flow_check, + + TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant), + + TP_ARGS(name, bearer_id, len, grant), + + TP_STRUCT__entry( + __string(dev_name, name) + __field(u8, bearer_id) + __field(unsigned int, len) + __field(u32, grant) + ), + + TP_fast_assign( + __assign_str(dev_name, name) + __entry->bearer_id = bearer_id; + __entry->len = len; + __entry->grant = grant; + ), + + TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u", + __get_str(dev_name), + __entry->bearer_id, __entry->len, __entry->grant) +); + +TRACE_EVENT(dfc_flow_info, + + TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type, + u32 handle, int add), + + TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add), + + TP_STRUCT__entry( + __string(dev_name, name) + __field(u8, bid) + __field(u32, fid) + __field(int, ip) + __field(u32, handle) + __field(int, action) + ), + + TP_fast_assign( + __assign_str(dev_name, name) + __entry->bid = bearer_id; + __entry->fid = flow_id; + __entry->ip = ip_type; + __entry->handle = handle; + __entry->action = add; + ), + + TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d", + __entry->action ? "add flow" : "delete flow", + __get_str(dev_name), + __entry->bid, __entry->fid, __entry->ip, __entry->handle) +); + +TRACE_EVENT(dfc_client_state_up, + + TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface), + + TP_ARGS(idx, instance, ep_type, iface), + + TP_STRUCT__entry( + __field(int, idx) + __field(u32, instance) + __field(u32, ep_type) + __field(u32, iface) + ), + + TP_fast_assign( + __entry->idx = idx; + __entry->instance = instance; + __entry->ep_type = ep_type; + __entry->iface = iface; + ), + + TP_printk("Client[%d]: Connection established with DFC Service " + "instance=%u ep_type=%u iface_id=%u", + __entry->idx, __entry->instance, + __entry->ep_type, __entry->iface) +); + +TRACE_EVENT(dfc_client_state_down, + + TP_PROTO(int idx, int from_cb), + + TP_ARGS(idx, from_cb), + + TP_STRUCT__entry( + __field(int, idx) + __field(int, from_cb) + ), + + TP_fast_assign( + __entry->idx = idx; + __entry->from_cb = from_cb; + ), + + TP_printk("Client[%d]: Connection with DFC service lost. " + "Exit by callback %d", + __entry->idx, __entry->from_cb) +); + +TRACE_EVENT(dfc_qmap_cmd, + + TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran), + + TP_ARGS(mux_id, bearer_id, seq_num, type, tran), + + TP_STRUCT__entry( + __field(u8, mid) + __field(u8, bid) + __field(u16, seq) + __field(u8, type) + __field(u32, tran) + ), + + TP_fast_assign( + __entry->mid = mux_id; + __entry->bid = bearer_id; + __entry->seq = seq_num; + __entry->type = type; + __entry->tran = tran; + ), + + TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u", + __entry->mid, __entry->bid, __entry->seq, + __entry->type, __entry->tran) +); + +TRACE_EVENT(dfc_tx_link_status_ind, + + TP_PROTO(int src, int idx, u8 status, u8 mux_id, u8 bearer_id), + + TP_ARGS(src, idx, status, mux_id, bearer_id), + + TP_STRUCT__entry( + __field(int, src) + __field(int, idx) + __field(u8, status) + __field(u8, mid) + __field(u8, bid) + ), + + TP_fast_assign( + __entry->src = src; + __entry->idx = idx; + __entry->status = status; + __entry->mid = mux_id; + __entry->bid = bearer_id; + ), + + TP_printk("src=%d [%d]: status=%u mux_id=%u bearer_id=%u", + __entry->src, __entry->idx, __entry->status, + __entry->mid, __entry->bid) +); + +#endif /* _TRACE_DFC_H */ + +/* This part must be outside protection */ +#include diff --git a/core/dfc_defs.h b/core/dfc_defs.h new file mode 100644 index 0000000000..7553707d3a --- /dev/null +++ b/core/dfc_defs.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#ifndef _DFC_DEFS_H +#define _DFC_DEFS_H + +#include +#include "qmi_rmnet_i.h" + +#define DFC_ACK_TYPE_DISABLE 1 +#define DFC_ACK_TYPE_THRESHOLD 2 + +#define DFC_MASK_TCP_BIDIR 0x1 +#define DFC_MASK_RAT_SWITCH 0x2 +#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR) +#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH) + +#define DFC_MAX_QOS_ID_V01 2 + +struct dfc_qmi_data { + void *rmnet_port; + struct workqueue_struct *dfc_wq; + struct work_struct svc_arrive; + struct qmi_handle handle; + struct sockaddr_qrtr ssctl; + struct svc_info svc; + struct work_struct qmi_ind_work; + struct list_head qmi_ind_q; + spinlock_t qmi_ind_lock; + int index; + int restart_state; +}; + +enum dfc_ip_type_enum_v01 { + DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + DFC_IPV4_TYPE_V01 = 0x4, + DFC_IPV6_TYPE_V01 = 0x6, + DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 +}; + +struct dfc_qos_id_type_v01 { + u32 qos_id; + enum dfc_ip_type_enum_v01 ip_type; +}; + +struct dfc_flow_status_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 num_bytes; + u16 seq_num; + u8 qos_ids_len; + struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01]; +}; + +struct dfc_ancillary_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 reserved; +}; + +struct dfc_flow_status_ind_msg_v01 { + u8 flow_status_valid; + u8 flow_status_len; + struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; + u8 eod_ack_reqd_valid; + u8 eod_ack_reqd; + u8 ancillary_info_valid; + u8 ancillary_info_len; + struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01]; +}; + +struct dfc_bearer_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + enum dfc_ip_type_enum_v01 ip_type; +}; + +struct dfc_tx_link_status_ind_msg_v01 { + u8 tx_status; + u8 bearer_info_valid; + u8 bearer_info_len; + struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01]; +}; + +void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc, + struct dfc_flow_status_ind_msg_v01 *ind); + +void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc, + struct dfc_tx_link_status_ind_msg_v01 *ind); + +#endif /* _DFC_DEFS_H */ diff --git a/core/dfc_qmap.c b/core/dfc_qmap.c new file mode 100644 index 0000000000..a4b2095b9e --- /dev/null +++ b/core/dfc_qmap.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "dfc_defs.h" + +#define QMAP_DFC_VER 1 + +#define QMAP_CMD_DONE -1 + +#define QMAP_CMD_REQUEST 0 +#define QMAP_CMD_ACK 1 +#define QMAP_CMD_UNSUPPORTED 2 +#define QMAP_CMD_INVALID 3 + +#define QMAP_DFC_CONFIG 10 +#define QMAP_DFC_IND 11 +#define QMAP_DFC_QUERY 12 +#define QMAP_DFC_END_MARKER 13 + +struct qmap_hdr { + u8 cd_pad; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +#define QMAP_HDR_LEN sizeof(struct qmap_hdr) + +struct qmap_cmd_hdr { + u8 pad_len:6; + u8 reserved_bit:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; + u8 cmd_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + __be32 tx_id; +} __aligned(1); + +struct qmap_dfc_config { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 cmd_id; + u8 reserved; + u8 tx_info:1; + u8 reserved2:7; + __be32 ep_type; + __be32 iface_id; + u32 reserved3; +} __aligned(1); + +struct qmap_dfc_ind { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 reserved; + __be16 seq_num; + u8 reserved2; + u8 tx_info_valid:1; + u8 tx_info:1; + u8 reserved3:6; + u8 bearer_id; + u8 tcp_bidir:1; + u8 bearer_status:3; + u8 reserved4:4; + __be32 grant; + u32 reserved5; + u32 reserved6; +} __aligned(1); + +struct qmap_dfc_query { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 reserved; + u8 bearer_id; + u8 reserved2; + u32 reserved3; +} __aligned(1); + +struct qmap_dfc_query_resp { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 bearer_id; + u8 tcp_bidir:1; + u8 reserved:7; + u8 invalid:1; + u8 reserved2:7; + __be32 grant; + u32 reserved3; + u32 reserved4; +} __aligned(1); + +struct qmap_dfc_end_marker_req { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 reserved; + u8 bearer_id; + u8 reserved2; + u16 reserved3; + __be16 seq_num; + u32 reserved4; +} __aligned(1); + +struct qmap_dfc_end_marker_cnf { + struct qmap_cmd_hdr hdr; + u8 cmd_ver; + u8 reserved; + u8 bearer_id; + u8 reserved2; + u16 reserved3; + __be16 seq_num; + u32 reserved4; +} __aligned(1); + +static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind; +static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind; +static struct dfc_qmi_data __rcu *qmap_dfc_data; +static atomic_t qmap_txid; +static void *rmnet_ctl_handle; + +static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, + u8 bearer_id, u16 seq, u32 tx_id); + +static void dfc_qmap_send_cmd(struct sk_buff *skb) +{ + trace_dfc_qmap(skb->data, skb->len, false); + + if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) { + pr_err("Failed to send to rmnet ctl\n"); + kfree_skb(skb); + } +} + +static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc, + struct sk_buff *skb) +{ + struct qmap_cmd_hdr *cmd; + + cmd = (struct qmap_cmd_hdr *)skb->data; + + skb->protocol = htons(ETH_P_MAP); + skb->dev = rmnet_get_real_dev(dfc->rmnet_port); + + trace_dfc_qmap(skb->data, skb->len, false); + dev_queue_xmit(skb); +} + +static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc, + struct sk_buff *skb) +{ + struct qmap_dfc_ind *cmd; + + if (skb->len < sizeof(struct qmap_dfc_ind)) + return QMAP_CMD_INVALID; + + cmd = (struct qmap_dfc_ind *)skb->data; + + if (cmd->tx_info_valid) { + memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind)); + qmap_tx_ind.tx_status = cmd->tx_info; + qmap_tx_ind.bearer_info_valid = 1; + qmap_tx_ind.bearer_info_len = 1; + qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id; + qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id; + + dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind); + + /* Ignore grant since it is always 0 */ + goto done; + } + + memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind)); + qmap_flow_ind.flow_status_valid = 1; + qmap_flow_ind.flow_status_len = 1; + qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id; + qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id; + qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant); + qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num); + + if (cmd->tcp_bidir) { + qmap_flow_ind.ancillary_info_valid = 1; + qmap_flow_ind.ancillary_info_len = 1; + qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id; + qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id; + qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR; + } + + dfc_do_burst_flow_control(dfc, &qmap_flow_ind); + +done: + return QMAP_CMD_ACK; +} + +static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc, + struct sk_buff *skb) +{ + struct qmap_dfc_query_resp *cmd; + + if (skb->len < sizeof(struct qmap_dfc_query_resp)) + return QMAP_CMD_DONE; + + cmd = (struct qmap_dfc_query_resp *)skb->data; + + if (cmd->invalid) + return QMAP_CMD_DONE; + + memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind)); + qmap_flow_ind.flow_status_valid = 1; + qmap_flow_ind.flow_status_len = 1; + + qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id; + qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id; + qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant); + qmap_flow_ind.flow_status[0].seq_num = 0xFFFF; + + if (cmd->tcp_bidir) { + qmap_flow_ind.ancillary_info_valid = 1; + qmap_flow_ind.ancillary_info_len = 1; + qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id; + qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id; + qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR; + } + + dfc_do_burst_flow_control(dfc, &qmap_flow_ind); + + return QMAP_CMD_DONE; +} + +static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id, + u8 bearer_id, u16 seq_num, u32 tx_id) +{ + struct net_device *dev; + struct qos_info *qos; + struct rmnet_bearer_map *bearer; + + dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id); + if (!dev) + return; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + return; + + spin_lock_bh(&qos->qos_lock); + + bearer = qmi_rmnet_get_bearer_map(qos, bearer_id); + + if (bearer && bearer->last_seq == seq_num && bearer->grant_size) { + bearer->ack_req = 1; + bearer->ack_txid = tx_id; + } else { + dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id); + } + + spin_unlock_bh(&qos->qos_lock); +} + +static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc, + struct sk_buff *skb) +{ + struct qmap_dfc_end_marker_req *cmd; + + if (skb->len < sizeof(struct qmap_dfc_end_marker_req)) + return QMAP_CMD_INVALID; + + cmd = (struct qmap_dfc_end_marker_req *)skb->data; + + dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id, + ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id)); + + return QMAP_CMD_DONE; +} + +static void dfc_qmap_cmd_handler(struct sk_buff *skb) +{ + struct qmap_cmd_hdr *cmd; + struct dfc_qmi_data *dfc; + int rc = QMAP_CMD_DONE; + + if (!skb) + return; + + trace_dfc_qmap(skb->data, skb->len, true); + + if (skb->len < sizeof(struct qmap_cmd_hdr)) + goto free_skb; + + cmd = (struct qmap_cmd_hdr *)skb->data; + if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN) + goto free_skb; + + if (cmd->cmd_name == QMAP_DFC_QUERY) { + if (cmd->cmd_type != QMAP_CMD_ACK) + goto free_skb; + } else if (cmd->cmd_type != QMAP_CMD_REQUEST) { + goto free_skb; + } + + rcu_read_lock(); + + dfc = rcu_dereference(qmap_dfc_data); + if (!dfc || READ_ONCE(dfc->restart_state)) { + rcu_read_unlock(); + goto free_skb; + } + + switch (cmd->cmd_name) { + case QMAP_DFC_IND: + rc = dfc_qmap_handle_ind(dfc, skb); + qmi_rmnet_set_dl_msg_active(dfc->rmnet_port); + break; + + case QMAP_DFC_QUERY: + rc = dfc_qmap_handle_query_resp(dfc, skb); + break; + + case QMAP_DFC_END_MARKER: + rc = dfc_qmap_handle_end_marker_req(dfc, skb); + break; + + default: + rc = QMAP_CMD_UNSUPPORTED; + } + + /* Send ack */ + if (rc != QMAP_CMD_DONE) { + cmd->cmd_type = rc; + if (cmd->cmd_name == QMAP_DFC_IND) + dfc_qmap_send_inband_ack(dfc, skb); + else + dfc_qmap_send_cmd(skb); + + rcu_read_unlock(); + return; + } + + rcu_read_unlock(); + +free_skb: + kfree_skb(skb); +} + +static void dfc_qmap_send_config(struct dfc_qmi_data *data) +{ + struct sk_buff *skb; + struct qmap_dfc_config *dfc_config; + unsigned int len = sizeof(struct qmap_dfc_config); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = htons(ETH_P_MAP); + dfc_config = (struct qmap_dfc_config *)skb_put(skb, len); + memset(dfc_config, 0, len); + + dfc_config->hdr.cd_bit = 1; + dfc_config->hdr.mux_id = 0; + dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN); + dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG; + dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST; + dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid)); + + dfc_config->cmd_ver = QMAP_DFC_VER; + dfc_config->cmd_id = QMAP_DFC_IND; + dfc_config->tx_info = 1; + dfc_config->ep_type = htonl(data->svc.ep_type); + dfc_config->iface_id = htonl(data->svc.iface_id); + + dfc_qmap_send_cmd(skb); +} + +static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id) +{ + struct sk_buff *skb; + struct qmap_dfc_query *dfc_query; + unsigned int len = sizeof(struct qmap_dfc_query); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = htons(ETH_P_MAP); + dfc_query = (struct qmap_dfc_query *)skb_put(skb, len); + memset(dfc_query, 0, len); + + dfc_query->hdr.cd_bit = 1; + dfc_query->hdr.mux_id = mux_id; + dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN); + dfc_query->hdr.cmd_name = QMAP_DFC_QUERY; + dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST; + dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid)); + + dfc_query->cmd_ver = QMAP_DFC_VER; + dfc_query->bearer_id = bearer_id; + + dfc_qmap_send_cmd(skb); +} + +static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos, + u8 bearer_id, u16 seq, u32 tx_id) +{ + struct sk_buff *skb; + struct qmap_dfc_end_marker_cnf *em_cnf; + unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf); + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) + return; + + em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len); + memset(em_cnf, 0, len); + + em_cnf->hdr.cd_bit = 1; + em_cnf->hdr.mux_id = qos->mux_id; + em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN); + em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER; + em_cnf->hdr.cmd_type = QMAP_CMD_ACK; + em_cnf->hdr.tx_id = htonl(tx_id); + + em_cnf->cmd_ver = QMAP_DFC_VER; + em_cnf->bearer_id = bearer_id; + em_cnf->seq_num = htons(seq); + + skb->protocol = htons(ETH_P_MAP); + skb->dev = qos->real_dev; + + /* This cmd needs to be sent in-band */ + trace_dfc_qmap(skb->data, skb->len, false); + rmnet_map_tx_qmap_cmd(skb); +} + +void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type) +{ + struct rmnet_bearer_map *bearer; + + if (type == DFC_ACK_TYPE_DISABLE) { + bearer = qmi_rmnet_get_bearer_map(qos, bearer_id); + if (bearer) + dfc_qmap_send_end_marker_cnf(qos, bearer_id, + seq, bearer->ack_txid); + } else if (type == DFC_ACK_TYPE_THRESHOLD) { + dfc_qmap_send_query(qos->mux_id, bearer_id); + } +} + +static struct rmnet_ctl_client_hooks cb = { + .ctl_dl_client_hook = dfc_qmap_cmd_handler, +}; + +int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc, + struct qmi_info *qmi) +{ + struct dfc_qmi_data *data; + + if (!port || !qmi) + return -EINVAL; + + data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->rmnet_port = port; + data->index = index; + memcpy(&data->svc, psvc, sizeof(data->svc)); + + qmi->dfc_clients[index] = (void *)data; + rcu_assign_pointer(qmap_dfc_data, data); + + atomic_set(&qmap_txid, 0); + + rmnet_ctl_handle = rmnet_ctl_register_client(&cb); + if (!rmnet_ctl_handle) + pr_err("Failed to register with rmnet ctl\n"); + + trace_dfc_client_state_up(data->index, data->svc.instance, + data->svc.ep_type, data->svc.iface_id); + + pr_info("DFC QMAP init\n"); + + dfc_qmap_send_config(data); + + return 0; +} + +void dfc_qmap_client_exit(void *dfc_data) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + + if (!data) { + pr_err("%s() data is null\n", __func__); + return; + } + + trace_dfc_client_state_down(data->index, 0); + + rmnet_ctl_unregister_client(rmnet_ctl_handle); + + WRITE_ONCE(data->restart_state, 1); + RCU_INIT_POINTER(qmap_dfc_data, NULL); + synchronize_rcu(); + + kfree(data); + + pr_info("DFC QMAP exit\n"); +} diff --git a/core/dfc_qmi.c b/core/dfc_qmi.c new file mode 100644 index 0000000000..7d3e46b849 --- /dev/null +++ b/core/dfc_qmi.c @@ -0,0 +1,1596 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include "rmnet_qmi.h" +#include "qmi_rmnet.h" + +#include "qmi_rmnet_i.h" +#include "dfc.h" + +#define DFC_MASK_TCP_BIDIR 0x1 +#define DFC_MASK_RAT_SWITCH 0x2 +#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR) +#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH) + +#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6) + +#define DFC_MAX_QOS_ID_V01 2 + +#define DFC_ACK_TYPE_DISABLE 1 +#define DFC_ACK_TYPE_THRESHOLD 2 + +struct dfc_qmap_header { + u8 pad_len:6; + u8 reserved_bit:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +struct dfc_ack_cmd { + struct dfc_qmap_header header; + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + u8 ver:2; + u8 reserved3:6; + u8 type:2; + u8 reserved4:6; + u16 dfc_seq; + u8 reserved5[3]; + u8 bearer_id; +} __aligned(1); + +struct dfc_qmi_data { + void *rmnet_port; + struct workqueue_struct *dfc_wq; + struct work_struct svc_arrive; + struct qmi_handle handle; + struct sockaddr_qrtr ssctl; + struct svc_info svc; + struct work_struct qmi_ind_work; + struct list_head qmi_ind_q; + spinlock_t qmi_ind_lock; + int index; + int restart_state; +}; + +static void dfc_svc_init(struct work_struct *work); + +/* **************************************************** */ +#define DFC_SERVICE_ID_V01 0x4E +#define DFC_SERVICE_VERS_V01 0x01 +#define DFC_TIMEOUT_JF msecs_to_jiffies(1000) + +#define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020 +#define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020 +#define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN 11 +#define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7 + +#define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001 +#define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001 +#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8 +#define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7 + +#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022 +#define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024 + +#define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023 +#define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023 +#define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20 +#define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543 + +struct dfc_bind_client_req_msg_v01 { + u8 ep_id_valid; + struct data_ep_id_type_v01 ep_id; +}; + +struct dfc_bind_client_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct dfc_indication_register_req_msg_v01 { + u8 report_flow_status_valid; + u8 report_flow_status; + u8 report_tx_link_status_valid; + u8 report_tx_link_status; +}; + +struct dfc_indication_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +enum dfc_ip_type_enum_v01 { + DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + DFC_IPV4_TYPE_V01 = 0x4, + DFC_IPV6_TYPE_V01 = 0x6, + DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647 +}; + +struct dfc_qos_id_type_v01 { + u32 qos_id; + enum dfc_ip_type_enum_v01 ip_type; +}; + +struct dfc_flow_status_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 num_bytes; + u16 seq_num; + u8 qos_ids_len; + struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01]; +}; + +static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct dfc_qos_id_type_v01, + qos_id), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum dfc_ip_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct dfc_qos_id_type_v01, + ip_type), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + subs_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + mux_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + bearer_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + num_bytes), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + seq_num), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + qos_ids_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_QOS_ID_V01, + .elem_size = sizeof(struct dfc_qos_id_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_info_type_v01, + qos_ids), + .ei_array = dfc_qos_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct dfc_ancillary_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + u32 reserved; +}; + +static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + subs_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + mux_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + bearer_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_ancillary_info_type_v01, + reserved), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct dfc_flow_status_ind_msg_v01 { + u8 flow_status_valid; + u8 flow_status_len; + struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; + u8 eod_ack_reqd_valid; + u8 eod_ack_reqd; + u8 ancillary_info_valid; + u8 ancillary_info_len; + struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01]; +}; + +struct dfc_bearer_info_type_v01 { + u8 subs_id; + u8 mux_id; + u8 bearer_id; + enum dfc_ip_type_enum_v01 ip_type; +}; + +struct dfc_tx_link_status_ind_msg_v01 { + u8 tx_status; + u8 bearer_info_valid; + u8 bearer_info_len; + struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01]; +}; + +struct dfc_get_flow_status_req_msg_v01 { + u8 bearer_id_list_valid; + u8 bearer_id_list_len; + u8 bearer_id_list[DFC_MAX_BEARERS_V01]; +}; + +struct dfc_get_flow_status_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 flow_status_valid; + u8 flow_status_len; + struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01]; +}; + +struct dfc_svc_ind { + struct list_head list; + u16 msg_id; + union { + struct dfc_flow_status_ind_msg_v01 dfc_info; + struct dfc_tx_link_status_ind_msg_v01 tx_status; + } d; +}; + +static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct dfc_bind_client_req_msg_v01, + ep_id_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct data_ep_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct dfc_bind_client_req_msg_v01, + ep_id), + .ei_array = data_ep_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct dfc_bind_client_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_flow_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_flow_status), + .ei_array = NULL, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_tx_link_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_indication_register_req_msg_v01, + report_tx_link_status), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + dfc_indication_register_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_flow_status_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + flow_status), + .ei_array = dfc_flow_status_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + eod_ack_reqd_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + eod_ack_reqd), + .ei_array = NULL, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_ancillary_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + dfc_flow_status_ind_msg_v01, + ancillary_info), + .ei_array = dfc_ancillary_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_req_msg_v01, + bearer_id_list_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_req_msg_v01, + bearer_id_list_len), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(u8), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_req_msg_v01, + bearer_id_list), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + dfc_get_flow_status_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_resp_msg_v01, + flow_status_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_resp_msg_v01, + flow_status_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_flow_status_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_get_flow_status_resp_msg_v01, + flow_status), + .ei_array = dfc_flow_status_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_bearer_info_type_v01, + subs_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_bearer_info_type_v01, + mux_id), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_bearer_info_type_v01, + bearer_id), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum dfc_ip_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dfc_bearer_info_type_v01, + ip_type), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + dfc_tx_link_status_ind_msg_v01, + tx_status), + .ei_array = NULL, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_tx_link_status_ind_msg_v01, + bearer_info_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_tx_link_status_ind_msg_v01, + bearer_info_len), + .ei_array = NULL, + }, + { + .data_type = QMI_STRUCT, + .elem_len = DFC_MAX_BEARERS_V01, + .elem_size = sizeof(struct + dfc_bearer_info_type_v01), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + dfc_tx_link_status_ind_msg_v01, + bearer_info), + .ei_array = dfc_bearer_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static int +dfc_bind_client_req(struct qmi_handle *dfc_handle, + struct sockaddr_qrtr *ssctl, struct svc_info *svc) +{ + struct dfc_bind_client_resp_msg_v01 *resp; + struct dfc_bind_client_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + ret = qmi_txn_init(dfc_handle, &txn, + dfc_bind_client_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init for response, err: %d\n", + __func__, ret); + goto out; + } + + req->ep_id_valid = 1; + req->ep_id.ep_type = svc->ep_type; + req->ep_id.iface_id = svc->iface_id; + ret = qmi_send_request(dfc_handle, ssctl, &txn, + QMI_DFC_BIND_CLIENT_REQ_V01, + QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN, + dfc_bind_client_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("%s() Failed sending request, err: %d\n", + __func__, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF); + if (ret < 0) { + pr_err("%s() Response waiting failed, err: %d\n", + __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() Request rejected, result: %d, err: %d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static int +dfc_indication_register_req(struct qmi_handle *dfc_handle, + struct sockaddr_qrtr *ssctl, u8 reg) +{ + struct dfc_indication_register_resp_msg_v01 *resp; + struct dfc_indication_register_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + ret = qmi_txn_init(dfc_handle, &txn, + dfc_indication_register_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init for response, err: %d\n", + __func__, ret); + goto out; + } + + req->report_flow_status_valid = 1; + req->report_flow_status = reg; + req->report_tx_link_status_valid = 1; + req->report_tx_link_status = reg; + + ret = qmi_send_request(dfc_handle, ssctl, &txn, + QMI_DFC_INDICATION_REGISTER_REQ_V01, + QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN, + dfc_indication_register_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("%s() Failed sending request, err: %d\n", + __func__, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF); + if (ret < 0) { + pr_err("%s() Response waiting failed, err: %d\n", + __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() Request rejected, result: %d, err: %d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static int +dfc_get_flow_status_req(struct qmi_handle *dfc_handle, + struct sockaddr_qrtr *ssctl, + struct dfc_get_flow_status_resp_msg_v01 *resp) +{ + struct dfc_get_flow_status_req_msg_v01 *req; + struct qmi_txn *txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + txn = kzalloc(sizeof(*txn), GFP_ATOMIC); + if (!txn) { + kfree(req); + return -ENOMEM; + } + + ret = qmi_txn_init(dfc_handle, txn, + dfc_get_flow_status_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init for response, err: %d\n", + __func__, ret); + goto out; + } + + ret = qmi_send_request(dfc_handle, ssctl, txn, + QMI_DFC_GET_FLOW_STATUS_REQ_V01, + QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN, + dfc_get_flow_status_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(txn); + pr_err("%s() Failed sending request, err: %d\n", + __func__, ret); + goto out; + } + + ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF); + if (ret < 0) { + pr_err("%s() Response waiting failed, err: %d\n", + __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() Request rejected, result: %d, err: %d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(txn); + kfree(req); + return ret; +} + +static int dfc_init_service(struct dfc_qmi_data *data) +{ + int rc; + + rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc); + if (rc < 0) + return rc; + + return dfc_indication_register_req(&data->handle, &data->ssctl, 1); +} + +static void +dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type) +{ + struct qos_info *qos = rmnet_get_qos_pt(dev); + struct sk_buff *skb; + struct dfc_ack_cmd *msg; + int data_size = sizeof(struct dfc_ack_cmd); + int header_size = sizeof(struct dfc_qmap_header); + + if (!qos) + return; + + skb = alloc_skb(data_size, GFP_ATOMIC); + if (!skb) + return; + + msg = (struct dfc_ack_cmd *)skb_put(skb, data_size); + memset(msg, 0, data_size); + + msg->header.cd_bit = 1; + msg->header.mux_id = mux_id; + msg->header.pkt_len = htons(data_size - header_size); + + msg->bearer_id = bearer_id; + msg->command_name = 4; + msg->cmd_type = 0; + msg->dfc_seq = htons(seq); + msg->type = type; + msg->ver = 2; + msg->transaction_id = htonl(qos->tran_num); + + skb->dev = qos->real_dev; + skb->protocol = htons(ETH_P_MAP); + + trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num); + qos->tran_num++; + + rmnet_map_tx_qmap_cmd(skb); +} + +int dfc_bearer_flow_ctl(struct net_device *dev, + struct rmnet_bearer_map *bearer, + struct qos_info *qos) +{ + struct rmnet_flow_map *itm; + int rc = 0, qlen; + int enable; + + enable = bearer->grant_size ? 1 : 0; + + list_for_each_entry(itm, &qos->flow_head, list) { + if (itm->bearer_id == bearer->bearer_id) { + /* + * Do not flow disable ancillary q if ancillary is true + */ + if (bearer->tcp_bidir && enable == 0 && + DFC_IS_ANCILLARY(itm->ip_type)) + continue; + + qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle, + enable); + trace_dfc_qmi_tc(dev->name, itm->bearer_id, + itm->flow_id, bearer->grant_size, + qlen, itm->tcm_handle, enable); + rc++; + } + } + + if (enable == 0 && bearer->ack_req) + dfc_send_ack(dev, bearer->bearer_id, + bearer->seq, qos->mux_id, + DFC_ACK_TYPE_DISABLE); + + return rc; +} + +static int dfc_all_bearer_flow_ctl(struct net_device *dev, + struct qos_info *qos, u8 ack_req, u32 ancillary, + struct dfc_flow_status_info_type_v01 *fc_info) +{ + struct rmnet_bearer_map *bearer_itm; + struct rmnet_flow_map *flow_itm; + int rc = 0, qlen; + bool enable; + + enable = fc_info->num_bytes > 0 ? 1 : 0; + + list_for_each_entry(bearer_itm, &qos->bearer_head, list) { + bearer_itm->grant_size = fc_info->num_bytes; + bearer_itm->grant_thresh = + qmi_rmnet_grant_per(bearer_itm->grant_size); + bearer_itm->seq = fc_info->seq_num; + bearer_itm->ack_req = ack_req; + bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary); + bearer_itm->last_grant = fc_info->num_bytes; + bearer_itm->last_seq = fc_info->seq_num; + } + + list_for_each_entry(flow_itm, &qos->flow_head, list) { + qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle, + enable); + trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id, + flow_itm->flow_id, fc_info->num_bytes, + qlen, flow_itm->tcm_handle, enable); + rc++; + } + + if (enable == 0 && ack_req) + dfc_send_ack(dev, fc_info->bearer_id, + fc_info->seq_num, fc_info->mux_id, + DFC_ACK_TYPE_DISABLE); + + return rc; +} + +static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos, + u8 ack_req, u32 ancillary, + struct dfc_flow_status_info_type_v01 *fc_info) +{ + struct rmnet_bearer_map *itm = NULL; + int rc = 0; + bool action = false; + + itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id); + if (itm) { + /* The RAT switch flag indicates the start and end of + * the switch. Ignore indications in between. + */ + if (DFC_IS_RAT_SWITCH(ancillary)) + itm->rat_switch = !fc_info->num_bytes; + else + if (itm->rat_switch) + return 0; + + /* If TX is OFF but we received grant, ignore it */ + if (itm->tx_off && fc_info->num_bytes > 0) + return 0; + + if ((itm->grant_size == 0 && fc_info->num_bytes > 0) || + (itm->grant_size > 0 && fc_info->num_bytes == 0)) + action = true; + + itm->grant_size = fc_info->num_bytes; + itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size); + itm->seq = fc_info->seq_num; + itm->ack_req = ack_req; + itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary); + itm->last_grant = fc_info->num_bytes; + itm->last_seq = fc_info->seq_num; + + if (action) + rc = dfc_bearer_flow_ctl(dev, itm, qos); + } else { + pr_debug("grant %u before flow activate", fc_info->num_bytes); + qos->default_grant = fc_info->num_bytes; + } + return rc; +} + +static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc, + struct dfc_svc_ind *svc_ind) +{ + struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info; + struct net_device *dev; + struct qos_info *qos; + struct dfc_flow_status_info_type_v01 *flow_status; + struct dfc_ancillary_info_type_v01 *ai; + u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0; + u32 ancillary; + int i, j; + + rcu_read_lock(); + + for (i = 0; i < ind->flow_status_len; i++) { + flow_status = &ind->flow_status[i]; + + ancillary = 0; + if (ind->ancillary_info_valid) { + for (j = 0; j < ind->ancillary_info_len; j++) { + ai = &ind->ancillary_info[j]; + if (ai->mux_id == flow_status->mux_id && + ai->bearer_id == flow_status->bearer_id) { + ancillary = ai->reserved; + break; + } + } + } + + trace_dfc_flow_ind(dfc->index, + i, flow_status->mux_id, + flow_status->bearer_id, + flow_status->num_bytes, + flow_status->seq_num, + ack_req, + ancillary); + + dev = rmnet_get_rmnet_dev(dfc->rmnet_port, + flow_status->mux_id); + if (!dev) + goto clean_out; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + continue; + + spin_lock_bh(&qos->qos_lock); + + if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) { + spin_unlock_bh(&qos->qos_lock); + continue; + } + + if (unlikely(flow_status->bearer_id == 0xFF)) + dfc_all_bearer_flow_ctl( + dev, qos, ack_req, ancillary, flow_status); + else + dfc_update_fc_map( + dev, qos, ack_req, ancillary, flow_status); + + spin_unlock_bh(&qos->qos_lock); + } + +clean_out: + rcu_read_unlock(); +} + +static void dfc_update_tx_link_status(struct net_device *dev, + struct qos_info *qos, u8 tx_status, + struct dfc_bearer_info_type_v01 *binfo) +{ + struct rmnet_bearer_map *itm = NULL; + + itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id); + if (!itm) + return; + + if (itm->grant_size && !tx_status) { + itm->grant_size = 0; + itm->tcp_bidir = false; + dfc_bearer_flow_ctl(dev, itm, qos); + } else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) { + itm->grant_size = DEFAULT_GRANT; + itm->grant_thresh = DEFAULT_GRANT; + itm->seq = 0; + itm->ack_req = 0; + dfc_bearer_flow_ctl(dev, itm, qos); + } + + itm->tx_off = !tx_status; +} + +static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc, + struct dfc_svc_ind *svc_ind) +{ + struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status; + struct net_device *dev; + struct qos_info *qos; + struct dfc_bearer_info_type_v01 *bearer_info; + int i; + + rcu_read_lock(); + + for (i = 0; i < ind->bearer_info_len; i++) { + bearer_info = &ind->bearer_info[i]; + + trace_dfc_tx_link_status_ind(dfc->index, i, + ind->tx_status, + bearer_info->mux_id, + bearer_info->bearer_id); + + dev = rmnet_get_rmnet_dev(dfc->rmnet_port, + bearer_info->mux_id); + if (!dev) + goto clean_out; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + continue; + + spin_lock_bh(&qos->qos_lock); + + dfc_update_tx_link_status( + dev, qos, ind->tx_status, bearer_info); + + spin_unlock_bh(&qos->qos_lock); + } + +clean_out: + rcu_read_unlock(); +} + +static void dfc_qmi_ind_work(struct work_struct *work) +{ + struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data, + qmi_ind_work); + struct dfc_svc_ind *svc_ind; + unsigned long flags; + + if (!dfc) + return; + + local_bh_disable(); + + do { + spin_lock_irqsave(&dfc->qmi_ind_lock, flags); + svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q, + struct dfc_svc_ind, list); + if (svc_ind) + list_del(&svc_ind->list); + spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags); + + if (!svc_ind) + break; + + if (!dfc->restart_state) { + if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01) + dfc_do_burst_flow_control(dfc, svc_ind); + else if (svc_ind->msg_id == + QMI_DFC_TX_LINK_STATUS_IND_V01) + dfc_handle_tx_link_status_ind(dfc, svc_ind); + } + kfree(svc_ind); + } while (1); + + local_bh_enable(); + + qmi_rmnet_set_dl_msg_active(dfc->rmnet_port); +} + +static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data, + handle); + struct dfc_flow_status_ind_msg_v01 *ind_msg; + struct dfc_svc_ind *svc_ind; + unsigned long flags; + + if (qmi != &dfc->handle) + return; + + ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data; + if (ind_msg->flow_status_valid) { + if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) { + pr_err("%s() Invalid fc info len: %d\n", + __func__, ind_msg->flow_status_len); + return; + } + + svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC); + if (!svc_ind) + return; + + svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01; + memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg)); + + spin_lock_irqsave(&dfc->qmi_ind_lock, flags); + list_add_tail(&svc_ind->list, &dfc->qmi_ind_q); + spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags); + + queue_work(dfc->dfc_wq, &dfc->qmi_ind_work); + } +} + +static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, const void *data) +{ + struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data, + handle); + struct dfc_tx_link_status_ind_msg_v01 *ind_msg; + struct dfc_svc_ind *svc_ind; + unsigned long flags; + + if (qmi != &dfc->handle) + return; + + ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data; + if (ind_msg->bearer_info_valid) { + if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) { + pr_err("%s() Invalid bearer info len: %d\n", + __func__, ind_msg->bearer_info_len); + return; + } + + svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC); + if (!svc_ind) + return; + + svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01; + memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg)); + + spin_lock_irqsave(&dfc->qmi_ind_lock, flags); + list_add_tail(&svc_ind->list, &dfc->qmi_ind_q); + spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags); + + queue_work(dfc->dfc_wq, &dfc->qmi_ind_work); + } +} + +static void dfc_svc_init(struct work_struct *work) +{ + int rc = 0; + struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data, + svc_arrive); + struct qmi_info *qmi; + + if (data->restart_state == 1) + return; + + rc = dfc_init_service(data); + if (rc < 0) { + pr_err("%s Failed to init service, err[%d]\n", __func__, rc); + return; + } + + if (data->restart_state == 1) + return; + while (!rtnl_trylock()) { + if (!data->restart_state) + cond_resched(); + else + return; + } + qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port); + if (!qmi) { + rtnl_unlock(); + return; + } + + qmi->dfc_pending[data->index] = NULL; + qmi->dfc_clients[data->index] = (void *)data; + trace_dfc_client_state_up(data->index, + data->svc.instance, + data->svc.ep_type, + data->svc.iface_id); + + rtnl_unlock(); + + pr_info("Connection established with the DFC Service\n"); +} + +static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data, + handle); + + data->ssctl.sq_family = AF_QIPCRTR; + data->ssctl.sq_node = svc->node; + data->ssctl.sq_port = svc->port; + + queue_work(data->dfc_wq, &data->svc_arrive); + + return 0; +} + +static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data, + handle); + + if (!data) + pr_debug("%s() data is null\n", __func__); +} + +static struct qmi_ops server_ops = { + .new_server = dfc_svc_arrive, + .del_server = dfc_svc_exit, +}; + +static struct qmi_msg_handler qmi_indication_handler[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_DFC_FLOW_STATUS_IND_V01, + .ei = dfc_flow_status_ind_v01_ei, + .decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01), + .fn = dfc_clnt_ind_cb, + }, + { + .type = QMI_INDICATION, + .msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01, + .ei = dfc_tx_link_status_ind_v01_ei, + .decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01), + .fn = dfc_tx_link_status_ind_cb, + }, + {}, +}; + +int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc, + struct qmi_info *qmi) +{ + struct dfc_qmi_data *data; + int rc = -ENOMEM; + + if (!port || !qmi) + return -EINVAL; + + data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->rmnet_port = port; + data->index = index; + data->restart_state = 0; + memcpy(&data->svc, psvc, sizeof(data->svc)); + + INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work); + INIT_LIST_HEAD(&data->qmi_ind_q); + spin_lock_init(&data->qmi_ind_lock); + + data->dfc_wq = create_singlethread_workqueue("dfc_wq"); + if (!data->dfc_wq) { + pr_err("%s Could not create workqueue\n", __func__); + goto err0; + } + + INIT_WORK(&data->svc_arrive, dfc_svc_init); + rc = qmi_handle_init(&data->handle, + QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN, + &server_ops, qmi_indication_handler); + if (rc < 0) { + pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc); + goto err1; + } + + rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01, + DFC_SERVICE_VERS_V01, + psvc->instance); + if (rc < 0) { + pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc); + goto err2; + } + + qmi->dfc_pending[index] = (void *)data; + + return 0; + +err2: + qmi_handle_release(&data->handle); +err1: + destroy_workqueue(data->dfc_wq); +err0: + kfree(data); + return rc; +} + +void dfc_qmi_client_exit(void *dfc_data) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + + if (!data) { + pr_err("%s() data is null\n", __func__); + return; + } + + data->restart_state = 1; + trace_dfc_client_state_down(data->index, 0); + qmi_handle_release(&data->handle); + + drain_workqueue(data->dfc_wq); + destroy_workqueue(data->dfc_wq); + kfree(data); +} + +void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, + int ip_type, u32 mark, unsigned int len) +{ + struct rmnet_bearer_map *bearer; + struct rmnet_flow_map *itm; + u32 start_grant; + + spin_lock_bh(&qos->qos_lock); + + itm = qmi_rmnet_get_flow_map(qos, mark, ip_type); + if (unlikely(!itm)) + goto out; + + bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id); + if (unlikely(!bearer)) + goto out; + + trace_dfc_flow_check(dev->name, bearer->bearer_id, + len, bearer->grant_size); + + if (!bearer->grant_size) + goto out; + + start_grant = bearer->grant_size; + if (len >= bearer->grant_size) + bearer->grant_size = 0; + else + bearer->grant_size -= len; + + if (start_grant > bearer->grant_thresh && + bearer->grant_size <= bearer->grant_thresh) { + dfc_send_ack(dev, bearer->bearer_id, + bearer->seq, qos->mux_id, + DFC_ACK_TYPE_THRESHOLD); + } + + if (!bearer->grant_size) + dfc_bearer_flow_ctl(dev, bearer, qos); + +out: + spin_unlock_bh(&qos->qos_lock); +} + +void dfc_qmi_query_flow(void *dfc_data) +{ + struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data; + struct dfc_get_flow_status_resp_msg_v01 *resp; + struct dfc_svc_ind *svc_ind; + int rc; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) + return; + + svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC); + if (!svc_ind) { + kfree(resp); + return; + } + + if (!data) + goto done; + + rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp); + + if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 || + resp->flow_status_len > DFC_MAX_BEARERS_V01) + goto done; + + svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid; + svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len; + memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status, + sizeof(resp->flow_status[0]) * resp->flow_status_len); + dfc_do_burst_flow_control(data, svc_ind); + +done: + kfree(svc_ind); + kfree(resp); +} diff --git a/core/qmi_rmnet.c b/core/qmi_rmnet.c new file mode 100644 index 0000000000..9de6769011 --- /dev/null +++ b/core/qmi_rmnet.c @@ -0,0 +1,1032 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include + +#include "qmi_rmnet_i.h" +#include "qmi_rmnet.h" +#include "rmnet_qmi.h" +#include +#include +#include +#define CREATE_TRACE_POINTS +#include "dfc.h" +#undef CREATE_TRACE_POINTS +#include +#include +#include +#include + + +#define NLMSG_FLOW_ACTIVATE 1 +#define NLMSG_FLOW_DEACTIVATE 2 +#define NLMSG_CLIENT_SETUP 4 +#define NLMSG_CLIENT_DELETE 5 + +#define FLAG_DFC_MASK 0x000F +#define FLAG_POWERSAVE_MASK 0x0010 +#define DFC_MODE_MULTIQ 2 + +unsigned int rmnet_wq_frequency __read_mostly = 1000; +module_param(rmnet_wq_frequency, uint, 0644); +MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check in ms"); + +#define PS_WORK_ACTIVE_BIT 0 +#define PS_INTERVAL (((!rmnet_wq_frequency) ? \ + 1 : rmnet_wq_frequency/10) * (HZ/100)) +#define NO_DELAY (0x0000 * HZ) + +#ifdef CONFIG_QTI_QMI_DFC +static unsigned int qmi_rmnet_scale_factor = 5; +#endif + +struct qmi_elem_info data_ep_id_type_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum data_ep_type_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct data_ep_id_type_v01, + ep_type), + .ei_array = NULL, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct data_ep_id_type_v01, + iface_id), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .elem_len = 0, + .elem_size = 0, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = 0, + .ei_array = NULL, + }, +}; +EXPORT_SYMBOL(data_ep_id_type_v01_ei); + +void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi) +{ + int i; + + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ)) + return NULL; + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + if (qmi->dfc_clients[i]) + return qmi->dfc_clients[i]; + } + + return NULL; +} + +static inline int +qmi_rmnet_has_client(struct qmi_info *qmi) +{ + if (qmi->wda_client) + return 1; + + return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0; +} + +static int +qmi_rmnet_has_pending(struct qmi_info *qmi) +{ + int i; + + if (qmi->wda_pending) + return 1; + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + if (qmi->dfc_pending[i]) + return 1; + } + + return 0; +} + +#ifdef CONFIG_QTI_QMI_DFC +static void +qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev, + struct qos_info *qos) +{ + struct rmnet_bearer_map *bearer, *br_tmp; + struct rmnet_flow_map *itm, *fl_tmp; + + ASSERT_RTNL(); + + list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) { + list_del(&itm->list); + kfree(itm); + } + + list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) { + list_del(&bearer->list); + kfree(bearer); + } +} + +struct rmnet_flow_map * +qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type) +{ + struct rmnet_flow_map *itm; + + if (!qos) + return NULL; + + list_for_each_entry(itm, &qos->flow_head, list) { + if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type)) + return itm; + } + return NULL; +} + +struct rmnet_bearer_map * +qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id) +{ + struct rmnet_bearer_map *itm; + + if (!qos) + return NULL; + + list_for_each_entry(itm, &qos->bearer_head, list) { + if (itm->bearer_id == bearer_id) + return itm; + } + return NULL; +} + +static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm, + struct rmnet_flow_map *new_map) +{ + itm->bearer_id = new_map->bearer_id; + itm->flow_id = new_map->flow_id; + itm->ip_type = new_map->ip_type; + itm->tcm_handle = new_map->tcm_handle; +} + +int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable) +{ + struct netdev_queue *q; + + if (unlikely(tcm_handle >= dev->num_tx_queues)) + return 0; + + q = netdev_get_tx_queue(dev, tcm_handle); + if (unlikely(!q)) + return 0; + + if (enable) + netif_tx_wake_queue(q); + else + netif_tx_stop_queue(q); + + return 0; +} + +static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq) +{ + struct Qdisc *qdisc; + + if (unlikely(txq >= dev->num_tx_queues)) + return; + + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc); + if (qdisc) { + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); + } +} + +static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, + struct qmi_info *qmi) +{ + struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); + struct rmnet_flow_map new_map, *itm; + struct rmnet_bearer_map *bearer; + + if (!qos_info) + return -EINVAL; + + ASSERT_RTNL(); + + /* flow activate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle + */ + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + new_map.tcm_handle = tcm->tcm_handle; + trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id, + new_map.ip_type, new_map.tcm_handle, 1); + + spin_lock_bh(&qos_info->qos_lock); + + itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, + new_map.ip_type); + if (itm) { + qmi_rmnet_update_flow_map(itm, &new_map); + } else { + itm = kzalloc(sizeof(*itm), GFP_ATOMIC); + if (!itm) { + spin_unlock_bh(&qos_info->qos_lock); + return -ENOMEM; + } + + qmi_rmnet_update_flow_map(itm, &new_map); + list_add(&itm->list, &qos_info->flow_head); + + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id); + if (bearer) { + bearer->flow_ref++; + } else { + bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC); + if (!bearer) { + spin_unlock_bh(&qos_info->qos_lock); + return -ENOMEM; + } + + bearer->bearer_id = new_map.bearer_id; + bearer->flow_ref = 1; + bearer->grant_size = qos_info->default_grant; + bearer->grant_thresh = + qmi_rmnet_grant_per(bearer->grant_size); + qos_info->default_grant = DEFAULT_GRANT; + list_add(&bearer->list, &qos_info->bearer_head); + } + + qmi_rmnet_flow_control(dev, itm->tcm_handle, + bearer->grant_size > 0 ? 1 : 0); + + trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id, + bearer->grant_size, 0, itm->tcm_handle, + bearer->grant_size > 0 ? 1 : 0); + } + + spin_unlock_bh(&qos_info->qos_lock); + + return 0; +} + +static int +qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, + struct qmi_info *qmi) +{ + struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev); + struct rmnet_flow_map new_map, *itm; + struct rmnet_bearer_map *bearer; + + if (!qos_info) + return -EINVAL; + + ASSERT_RTNL(); + + /* flow deactivate + * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id, + * tcm->tcm_ifindex - ip_type + */ + + spin_lock_bh(&qos_info->qos_lock); + + new_map.bearer_id = tcm->tcm__pad1; + new_map.flow_id = tcm->tcm_parent; + new_map.ip_type = tcm->tcm_ifindex; + itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id, + new_map.ip_type); + if (itm) { + trace_dfc_flow_info(dev->name, new_map.bearer_id, + new_map.flow_id, new_map.ip_type, + itm->tcm_handle, 0); + list_del(&itm->list); + + /*clear bearer map*/ + bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id); + if (bearer && --bearer->flow_ref == 0) { + list_del(&bearer->list); + kfree(bearer); + + /* Purge pending packets for dedicated flow */ + if (itm->flow_id) + qmi_rmnet_reset_txq(dev, itm->tcm_handle); + } + + /* Enable flow to allow new flow setup */ + qmi_rmnet_flow_control(dev, itm->tcm_handle, 1); + trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id, + 0, 0, itm->tcm_handle, 1); + + kfree(itm); + } + + if (list_empty(&qos_info->flow_head)) { + netif_tx_wake_all_queues(dev); + trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1); + } + + spin_unlock_bh(&qos_info->qos_lock); + + return 0; +} + +static void qmi_rmnet_query_flows(struct qmi_info *qmi) +{ + int i; + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + if (qmi->dfc_clients[i]) + dfc_qmi_query_flow(qmi->dfc_clients[i]); + } +} + +static int qmi_rmnet_set_scale_factor(const char *val, + const struct kernel_param *kp) +{ + int ret; + unsigned int num = 0; + + ret = kstrtouint(val, 10, &num); + if (ret != 0 || num == 0) + return -EINVAL; + + return param_set_uint(val, kp); +} + +static const struct kernel_param_ops qmi_rmnet_scale_ops = { + .set = qmi_rmnet_set_scale_factor, + .get = param_get_uint, +}; + +module_param_cb(qmi_rmnet_scale_factor, &qmi_rmnet_scale_ops, + &qmi_rmnet_scale_factor, 0664); +#else +static inline void +qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev, + struct rmnet_flow_map *itm, int add_flow) +{ +} + +static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos) +{ +} + +static inline void +qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm, + struct rmnet_flow_map *new_map) +{ +} + +static inline int +qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm, + struct qmi_info *qmi) +{ + return -EINVAL; +} + +static inline int +qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm, + struct qmi_info *qmi) +{ + return -EINVAL; +} + +static inline void qmi_rmnet_query_flows(struct qmi_info *qmi) +{ +} +#endif + +static int +qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) +{ + int idx, rc, err = 0; + struct svc_info svc; + + ASSERT_RTNL(); + + /* client setup + * tcm->tcm_handle - instance, tcm->tcm_info - ep_type, + * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags + */ + idx = (tcm->tcm_handle == 0) ? 0 : 1; + + if (!qmi) { + qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC); + if (!qmi) + return -ENOMEM; + + rmnet_init_qmi_pt(port, qmi); + } + + qmi->flag = tcm->tcm_ifindex; + svc.instance = tcm->tcm_handle; + svc.ep_type = tcm->tcm_info; + svc.iface_id = tcm->tcm_parent; + + if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) && + !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) { + rc = dfc_qmi_client_init(port, idx, &svc, qmi); + if (rc < 0) + err = rc; + } + + if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) && + (idx == 0) && !qmi->wda_client && !qmi->wda_pending) { + rc = wda_qmi_client_init(port, &svc, qmi); + if (rc < 0) + err = rc; + } + + return err; +} + +static int +__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx) +{ + void *data = NULL; + + ASSERT_RTNL(); + + if (qmi->dfc_clients[idx]) + data = qmi->dfc_clients[idx]; + else if (qmi->dfc_pending[idx]) + data = qmi->dfc_pending[idx]; + + if (data) { + dfc_qmi_client_exit(data); + qmi->dfc_clients[idx] = NULL; + qmi->dfc_pending[idx] = NULL; + } + + if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) { + rmnet_reset_qmi_pt(port); + kfree(qmi); + return 0; + } + + return 1; +} + +static void +qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm) +{ + int idx; + void *data = NULL; + + /* client delete: tcm->tcm_handle - instance*/ + idx = (tcm->tcm_handle == 0) ? 0 : 1; + + ASSERT_RTNL(); + if (qmi->wda_client) + data = qmi->wda_client; + else if (qmi->wda_pending) + data = qmi->wda_pending; + + if ((idx == 0) && data) { + wda_qmi_client_exit(data); + qmi->wda_client = NULL; + qmi->wda_pending = NULL; + } else { + qmi_rmnet_flush_ps_wq(); + } + + __qmi_rmnet_delete_client(port, qmi, idx); +} + +void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) +{ + struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + struct tcmsg *tcm = (struct tcmsg *)tcm_pt; + + switch (tcm->tcm_family) { + case NLMSG_FLOW_ACTIVATE: + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) || + !qmi_rmnet_has_dfc_client(qmi)) + return; + + qmi_rmnet_add_flow(dev, tcm, qmi); + break; + case NLMSG_FLOW_DEACTIVATE: + if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ)) + return; + + qmi_rmnet_del_flow(dev, tcm, qmi); + break; + case NLMSG_CLIENT_SETUP: + if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) && + !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK)) + return; + + if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) { + /* retrieve qmi again as it could have been changed */ + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (qmi && + !qmi_rmnet_has_client(qmi) && + !qmi_rmnet_has_pending(qmi)) { + rmnet_reset_qmi_pt(port); + kfree(qmi); + } + } else if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) { + qmi_rmnet_work_init(port); + rmnet_set_powersave_format(port); + } + break; + case NLMSG_CLIENT_DELETE: + if (!qmi) + return; + if (tcm->tcm_handle == 0) { /* instance 0 */ + rmnet_clear_powersave_format(port); + qmi_rmnet_work_exit(port); + } + qmi_rmnet_delete_client(port, qmi, tcm); + break; + default: + pr_debug("%s(): No handler\n", __func__); + break; + } +} +EXPORT_SYMBOL(qmi_rmnet_change_link); + +void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) +{ + struct qmi_info *qmi = (struct qmi_info *)qmi_pt; + int i; + void *data = NULL; + + if (!qmi) + return; + + ASSERT_RTNL(); + + qmi_rmnet_work_exit(port); + + if (qmi->wda_client) + data = qmi->wda_client; + else if (qmi->wda_pending) + data = qmi->wda_pending; + + if (data) { + wda_qmi_client_exit(data); + qmi->wda_client = NULL; + qmi->wda_pending = NULL; + } + + for (i = 0; i < MAX_CLIENT_NUM; i++) { + if (!__qmi_rmnet_delete_client(port, qmi, i)) + return; + } +} +EXPORT_SYMBOL(qmi_rmnet_qmi_exit); + +void qmi_rmnet_enable_all_flows(struct net_device *dev) +{ + struct qos_info *qos; + struct rmnet_bearer_map *bearer; + bool do_wake; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + return; + + spin_lock_bh(&qos->qos_lock); + + list_for_each_entry(bearer, &qos->bearer_head, list) { + if (bearer->tx_off) + continue; + do_wake = !bearer->grant_size; + bearer->grant_size = DEFAULT_GRANT; + bearer->grant_thresh = DEFAULT_GRANT; + bearer->seq = 0; + bearer->ack_req = 0; + bearer->tcp_bidir = false; + bearer->rat_switch = false; + + if (do_wake) + dfc_bearer_flow_ctl(dev, bearer, qos); + } + + spin_unlock_bh(&qos->qos_lock); +} +EXPORT_SYMBOL(qmi_rmnet_enable_all_flows); + +bool qmi_rmnet_all_flows_enabled(struct net_device *dev) +{ + struct qos_info *qos; + struct rmnet_bearer_map *bearer; + bool ret = true; + + qos = (struct qos_info *)rmnet_get_qos_pt(dev); + if (!qos) + return true; + + spin_lock_bh(&qos->qos_lock); + + list_for_each_entry(bearer, &qos->bearer_head, list) { + if (!bearer->grant_size) { + ret = false; + break; + } + } + + spin_unlock_bh(&qos->qos_lock); + + return ret; +} +EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled); + +#ifdef CONFIG_QTI_QMI_DFC +void qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len) +{ + struct qos_info *qos = rmnet_get_qos_pt(dev); + + if (!qos) + return; + + dfc_qmi_burst_check(dev, qos, ip_type, mark, len); +} +EXPORT_SYMBOL(qmi_rmnet_burst_fc_check); + +int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct qos_info *qos = rmnet_get_qos_pt(dev); + int txq = 0, ip_type = AF_INET; + unsigned int len = skb->len; + struct rmnet_flow_map *itm; + u32 mark = skb->mark; + + if (!qos) + return 0; + + switch (skb->protocol) { + /* TCPv4 ACKs */ + case htons(ETH_P_IP): + ip_type = AF_INET; + if ((!mark) && + (ip_hdr(skb)->protocol == IPPROTO_TCP) && + (len == 40 || len == 52) && + (ip_hdr(skb)->ihl == 5) && + ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK)) + return 1; + break; + + /* TCPv6 ACKs */ + case htons(ETH_P_IPV6): + ip_type = AF_INET6; + if ((!mark) && + (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) && + (len == 60 || len == 72) && + ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK)) + return 1; + /* Fall through */ + } + + /* Default flows */ + if (!mark) + return 0; + + /* Dedicated flows */ + spin_lock_bh(&qos->qos_lock); + + itm = qmi_rmnet_get_flow_map(qos, mark, ip_type); + if (unlikely(!itm)) + goto done; + + txq = itm->tcm_handle; + +done: + spin_unlock_bh(&qos->qos_lock); + return txq; +} +EXPORT_SYMBOL(qmi_rmnet_get_queue); + +inline unsigned int qmi_rmnet_grant_per(unsigned int grant) +{ + return grant / qmi_rmnet_scale_factor; +} +EXPORT_SYMBOL(qmi_rmnet_grant_per); + +void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) +{ + struct qos_info *qos; + + qos = kmalloc(sizeof(*qos), GFP_KERNEL); + if (!qos) + return NULL; + + qos->mux_id = mux_id; + qos->real_dev = real_dev; + qos->default_grant = DEFAULT_GRANT; + qos->tran_num = 0; + INIT_LIST_HEAD(&qos->flow_head); + INIT_LIST_HEAD(&qos->bearer_head); + spin_lock_init(&qos->qos_lock); + + return qos; +} +EXPORT_SYMBOL(qmi_rmnet_qos_init); + +void qmi_rmnet_qos_exit(struct net_device *dev, void *qos) +{ + void *port = rmnet_get_rmnet_port(dev); + struct qmi_info *qmi = rmnet_get_qmi_pt(port); + struct qos_info *qos_info = (struct qos_info *)qos; + + if (!qmi || !qos) + return; + + qmi_rmnet_clean_flow_list(qmi, dev, qos_info); + kfree(qos); +} +EXPORT_SYMBOL(qmi_rmnet_qos_exit); +#endif + +#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE +static struct workqueue_struct *rmnet_ps_wq; +static struct rmnet_powersave_work *rmnet_work; +static LIST_HEAD(ps_list); + +struct rmnet_powersave_work { + struct delayed_work work; + void *port; + u64 old_rx_pkts; + u64 old_tx_pkts; +}; + +void qmi_rmnet_ps_on_notify(void *port) +{ + struct qmi_rmnet_ps_ind *tmp; + + list_for_each_entry(tmp, &ps_list, list) + tmp->ps_on_handler(port); +} +EXPORT_SYMBOL(qmi_rmnet_ps_on_notify); + +void qmi_rmnet_ps_off_notify(void *port) +{ + struct qmi_rmnet_ps_ind *tmp; + + list_for_each_entry(tmp, &ps_list, list) + tmp->ps_off_handler(port); +} +EXPORT_SYMBOL(qmi_rmnet_ps_off_notify); + +int qmi_rmnet_ps_ind_register(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + + if (!port || !ps_ind || !ps_ind->ps_on_handler || + !ps_ind->ps_off_handler) + return -EINVAL; + + list_add_rcu(&ps_ind->list, &ps_list); + + return 0; +} +EXPORT_SYMBOL(qmi_rmnet_ps_ind_register); + +int qmi_rmnet_ps_ind_deregister(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + struct qmi_rmnet_ps_ind *tmp; + + if (!port || !ps_ind) + return -EINVAL; + + list_for_each_entry(tmp, &ps_list, list) { + if (tmp == ps_ind) { + list_del_rcu(&ps_ind->list); + goto done; + } + } + +done: + return 0; +} +EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister); + +int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable) +{ + int rc = -EINVAL; + struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + + if (!qmi || !qmi->wda_client) + return rc; + + rc = wda_set_powersave_mode(qmi->wda_client, enable); + if (rc < 0) { + pr_err("%s() failed set powersave mode[%u], err=%d\n", + __func__, enable, rc); + return rc; + } + + return 0; +} +EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode); + +static void qmi_rmnet_work_restart(void *port) +{ + if (!rmnet_ps_wq || !rmnet_work) + return; + queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY); +} + +static void qmi_rmnet_check_stats(struct work_struct *work) +{ + struct rmnet_powersave_work *real_work; + struct qmi_info *qmi; + u64 rxd, txd; + u64 rx, tx; + bool dl_msg_active; + + real_work = container_of(to_delayed_work(work), + struct rmnet_powersave_work, work); + + if (unlikely(!real_work || !real_work->port)) + return; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port); + if (unlikely(!qmi)) + return; + + if (qmi->ps_enabled) { + + /* Ready to accept grant */ + qmi->ps_ignore_grant = false; + + /* Register to get QMI DFC and DL marker */ + if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) { + /* If this failed need to retry quickly */ + queue_delayed_work(rmnet_ps_wq, + &real_work->work, HZ / 50); + return; + + } + qmi->ps_enabled = false; + + /* Do a query when coming out of powersave */ + qmi_rmnet_query_flows(qmi); + + if (rmnet_get_powersave_notif(real_work->port)) + qmi_rmnet_ps_off_notify(real_work->port); + + goto end; + } + + rmnet_get_packets(real_work->port, &rx, &tx); + rxd = rx - real_work->old_rx_pkts; + txd = tx - real_work->old_tx_pkts; + real_work->old_rx_pkts = rx; + real_work->old_tx_pkts = tx; + + dl_msg_active = qmi->dl_msg_active; + qmi->dl_msg_active = false; + + if (!rxd && !txd) { + /* If no DL msg received and there is a flow disabled, + * (likely in RLF), no need to enter powersave + */ + if (!dl_msg_active && + !rmnet_all_flows_enabled(real_work->port)) + goto end; + + /* Deregister to suppress QMI DFC and DL marker */ + if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) { + queue_delayed_work(rmnet_ps_wq, + &real_work->work, PS_INTERVAL); + return; + } + qmi->ps_enabled = true; + + /* Ignore grant after going into powersave */ + qmi->ps_ignore_grant = true; + + /* Clear the bit before enabling flow so pending packets + * can trigger the work again + */ + clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); + rmnet_enable_all_flows(real_work->port); + + if (rmnet_get_powersave_notif(real_work->port)) + qmi_rmnet_ps_on_notify(real_work->port); + + return; + } +end: + queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL); +} + +static void qmi_rmnet_work_set_active(void *port, int status) +{ + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) + return; + + if (status) + set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); + else + clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active); +} + +void qmi_rmnet_work_init(void *port) +{ + if (rmnet_ps_wq) + return; + + rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work", + WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); + + rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC); + if (!rmnet_work) { + destroy_workqueue(rmnet_ps_wq); + rmnet_ps_wq = NULL; + return; + } + INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats); + rmnet_work->port = port; + rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts, + &rmnet_work->old_tx_pkts); + + qmi_rmnet_work_set_active(rmnet_work->port, 1); + queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL); +} +EXPORT_SYMBOL(qmi_rmnet_work_init); + +void qmi_rmnet_work_maybe_restart(void *port) +{ + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) + return; + + if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) + qmi_rmnet_work_restart(port); +} +EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart); + +void qmi_rmnet_work_exit(void *port) +{ + if (!rmnet_ps_wq || !rmnet_work) + return; + cancel_delayed_work_sync(&rmnet_work->work); + destroy_workqueue(rmnet_ps_wq); + qmi_rmnet_work_set_active(port, 0); + rmnet_ps_wq = NULL; + kfree(rmnet_work); + rmnet_work = NULL; +} +EXPORT_SYMBOL(qmi_rmnet_work_exit); + +void qmi_rmnet_set_dl_msg_active(void *port) +{ + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) + return; + + qmi->dl_msg_active = true; +} +EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active); + +void qmi_rmnet_flush_ps_wq(void) +{ + if (rmnet_ps_wq) + flush_workqueue(rmnet_ps_wq); +} + +bool qmi_rmnet_ignore_grant(void *port) +{ + struct qmi_info *qmi; + + qmi = (struct qmi_info *)rmnet_get_qmi_pt(port); + if (unlikely(!qmi)) + return false; + + return qmi->ps_ignore_grant; +} +EXPORT_SYMBOL(qmi_rmnet_ignore_grant); + +#endif +MODULE_LICENSE("GPL v2"); diff --git a/core/qmi_rmnet.h b/core/qmi_rmnet.h new file mode 100644 index 0000000000..6fb284e2b9 --- /dev/null +++ b/core/qmi_rmnet.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _QMI_RMNET_H +#define _QMI_RMNET_H + +#include +#include +#define CONFIG_QTI_QMI_RMNET 1 +#define CONFIG_QTI_QMI_DFC 1 +#define CONFIG_QTI_QMI_POWER_COLLAPSE 1 + +struct qmi_rmnet_ps_ind { + void (*ps_on_handler)(void *); + void (*ps_off_handler)(void *); + struct list_head list; +}; + + +#ifdef CONFIG_QTI_QMI_RMNET +void qmi_rmnet_qmi_exit(void *qmi_pt, void *port); +void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt); +void qmi_rmnet_enable_all_flows(struct net_device *dev); +bool qmi_rmnet_all_flows_enabled(struct net_device *dev); +#else +static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port) +{ +} + +static inline void +qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt) +{ +} + +static inline void +qmi_rmnet_enable_all_flows(struct net_device *dev) +{ +} + +static inline bool +qmi_rmnet_all_flows_enabled(struct net_device *dev) +{ + return true; +} +#endif + +#ifdef CONFIG_QTI_QMI_DFC +void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id); +void qmi_rmnet_qos_exit(struct net_device *dev, void *qos); +void qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len); +int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb); +#else +static inline void * +qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id) +{ + return NULL; +} + +static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos) +{ +} + +static inline void +qmi_rmnet_burst_fc_check(struct net_device *dev, + int ip_type, u32 mark, unsigned int len) +{ +} + +static inline int qmi_rmnet_get_queue(struct net_device *dev, + struct sk_buff *skb) +{ + return 0; +} +#endif + +#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE +int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable); +void qmi_rmnet_work_init(void *port); +void qmi_rmnet_work_exit(void *port); +void qmi_rmnet_work_maybe_restart(void *port); +void qmi_rmnet_set_dl_msg_active(void *port); +bool qmi_rmnet_ignore_grant(void *port); + +int qmi_rmnet_ps_ind_register(void *port, + struct qmi_rmnet_ps_ind *ps_ind); +int qmi_rmnet_ps_ind_deregister(void *port, + struct qmi_rmnet_ps_ind *ps_ind); +void qmi_rmnet_ps_off_notify(void *port); +void qmi_rmnet_ps_on_notify(void *port); + +#else +static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable) +{ + return 0; +} +static inline void qmi_rmnet_work_init(void *port) +{ +} +static inline void qmi_rmnet_work_exit(void *port) +{ +} +static inline void qmi_rmnet_work_maybe_restart(void *port) +{ + +} +static inline void qmi_rmnet_set_dl_msg_active(void *port) +{ +} +static inline bool qmi_rmnet_ignore_grant(void *port) +{ + return false; +} + +static inline int qmi_rmnet_ps_ind_register(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + return 0; +} +static inline int qmi_rmnet_ps_ind_deregister(void *port, + struct qmi_rmnet_ps_ind *ps_ind) +{ + return 0; +} + +static inline void qmi_rmnet_ps_off_notify(void *port) +{ + +} + +static inline void qmi_rmnet_ps_on_notify(void *port) +{ + +} +#endif +#endif /*_QMI_RMNET_H*/ diff --git a/core/qmi_rmnet_i.h b/core/qmi_rmnet_i.h new file mode 100644 index 0000000000..27a34df98a --- /dev/null +++ b/core/qmi_rmnet_i.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_QMI_I_H +#define _RMNET_QMI_I_H + +#include +#include + +#define IP_VER_4 4 +#define IP_VER_6 6 + +#define MAX_CLIENT_NUM 2 +#define MAX_FLOW_NUM 32 +#define DEFAULT_GRANT 1 +#define DFC_MAX_BEARERS_V01 16 + +#define CONFIG_QTI_QMI_RMNET 1 +#define CONFIG_QTI_QMI_DFC 1 +#define CONFIG_QTI_QMI_POWER_COLLAPSE 1 + +struct rmnet_flow_map { + struct list_head list; + u8 bearer_id; + u32 flow_id; + int ip_type; + u32 tcm_handle; +}; + +struct rmnet_bearer_map { + struct list_head list; + u8 bearer_id; + int flow_ref; + u32 grant_size; + u32 grant_thresh; + u16 seq; + u8 ack_req; + u32 last_grant; + u16 last_seq; + bool tcp_bidir; + bool rat_switch; + bool tx_off; +}; + +struct svc_info { + u32 instance; + u32 ep_type; + u32 iface_id; +}; + +struct qos_info { + u8 mux_id; + struct net_device *real_dev; + struct list_head flow_head; + struct list_head bearer_head; + u32 default_grant; + u32 tran_num; + spinlock_t qos_lock; +}; + +struct flow_info { + struct net_device *dev; + struct rmnet_flow_map *itm; +}; + +struct qmi_info { + int flag; + void *wda_client; + void *wda_pending; + void *dfc_clients[MAX_CLIENT_NUM]; + void *dfc_pending[MAX_CLIENT_NUM]; + unsigned long ps_work_active; + bool ps_enabled; + bool dl_msg_active; + bool ps_ignore_grant; +}; + +enum data_ep_type_enum_v01 { + DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN, + DATA_EP_TYPE_RESERVED_V01 = 0x00, + DATA_EP_TYPE_HSIC_V01 = 0x01, + DATA_EP_TYPE_HSUSB_V01 = 0x02, + DATA_EP_TYPE_PCIE_V01 = 0x03, + DATA_EP_TYPE_EMBEDDED_V01 = 0x04, + DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX +}; + +struct data_ep_id_type_v01 { + + enum data_ep_type_enum_v01 ep_type; + u32 iface_id; +}; + +extern struct qmi_elem_info data_ep_id_type_v01_ei[]; + +void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi); + +#ifdef CONFIG_QTI_QMI_DFC +struct rmnet_flow_map * +qmi_rmnet_get_flow_map(struct qos_info *qos_info, + u32 flow_id, int ip_type); + +struct rmnet_bearer_map * +qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id); + +unsigned int qmi_rmnet_grant_per(unsigned int grant); + +int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc, + struct qmi_info *qmi); + +void dfc_qmi_client_exit(void *dfc_data); + +void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, + int ip_type, u32 mark, unsigned int len); + +int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable); + +void dfc_qmi_query_flow(void *dfc_data); + +int dfc_bearer_flow_ctl(struct net_device *dev, + struct rmnet_bearer_map *bearer, + struct qos_info *qos); +#else +static inline struct rmnet_flow_map * +qmi_rmnet_get_flow_map(struct qos_info *qos_info, + uint32_t flow_id, int ip_type) +{ + return NULL; +} + +static inline struct rmnet_bearer_map * +qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id) +{ + return NULL; +} + +static inline int +dfc_qmi_client_init(void *port, int index, struct svc_info *psvc, + struct qmi_info *qmi) +{ + return -EINVAL; +} + +static inline void dfc_qmi_client_exit(void *dfc_data) +{ +} + +static inline void +dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos, + int ip_type, u32 mark, unsigned int len) +{ +} + +static inline void +dfc_qmi_query_flow(void *dfc_data) +{ +} + +static inline int +dfc_bearer_flow_ctl(struct net_device *dev, + struct rmnet_bearer_map *bearer, + struct qos_info *qos) +{ + return 0; +} +#endif + +#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE +int +wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi); +void wda_qmi_client_exit(void *wda_data); +int wda_set_powersave_mode(void *wda_data, u8 enable); +void qmi_rmnet_flush_ps_wq(void); +#else +static inline int +wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi) +{ + return -EINVAL; +} + +static inline void wda_qmi_client_exit(void *wda_data) +{ +} + +static inline int wda_set_powersave_mode(void *wda_data, u8 enable) +{ + return -EINVAL; +} +static inline void qmi_rmnet_flush_ps_wq(void) +{ +} +#endif +#endif /*_RMNET_QMI_I_H*/ diff --git a/core/rmnet_config.c b/core/rmnet_config.c new file mode 100644 index 0000000000..907acd305c --- /dev/null +++ b/core/rmnet_config.c @@ -0,0 +1,738 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET configuration engine + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_vnd.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_descriptor.h" +#include "rmnet_qmi.h" +#include "qmi_rmnet.h" +#define CONFIG_QTI_QMI_RMNET 1 +#define CONFIG_QTI_QMI_DFC 1 +#define CONFIG_QTI_QMI_POWER_COLLAPSE 1 + +/* Locking scheme - + * The shared resource which needs to be protected is realdev->rx_handler_data. + * For the writer path, this is using rtnl_lock(). The writer paths are + * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These + * paths are already called with rtnl_lock() acquired in. There is also an + * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For + * dereference here, we will need to use rtnl_dereference(). Dev list writing + * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). + * For the reader path, the real_dev->rx_handler_data is called in the TX / RX + * path. We only need rcu_read_lock() for these scenarios. In these cases, + * the rcu_read_lock() is held in __dev_queue_xmit() and + * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() + * to get the relevant information. For dev list reading, we again acquire + * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). + * We also use unregister_netdevice_many() to free all rmnet devices in + * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in + * same context. + */ + +/* Local Definitions and Declarations */ + +enum { + IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX, + IFLA_RMNET_UL_AGG_PARAMS, + __IFLA_RMNET_EXT_MAX, +}; + +static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = { + [IFLA_RMNET_MUX_ID] = { + .type = NLA_U16 + }, + [IFLA_RMNET_FLAGS] = { + .len = sizeof(struct ifla_rmnet_flags) + }, + [IFLA_RMNET_DFC_QOS] = { + .len = sizeof(struct tcmsg) + }, + [IFLA_RMNET_UL_AGG_PARAMS] = { + .len = sizeof(struct rmnet_egress_agg_params) + }, +}; + +int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} +EXPORT_SYMBOL(rmnet_is_real_dev_registered); + +/* Needs rtnl lock */ +static struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev) +{ + return rtnl_dereference(real_dev->rx_handler_data); +} + +static int rmnet_unregister_real_device(struct net_device *real_dev, + struct rmnet_port *port) +{ + if (port->nr_rmnet_devs) + return -EINVAL; + + rmnet_map_cmd_exit(port); + rmnet_map_tx_aggregate_exit(port); + + rmnet_descriptor_deinit(port); + + kfree(port); + + netdev_rx_handler_unregister(real_dev); + + /* release reference on real_dev */ + dev_put(real_dev); + + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; +} + +static int rmnet_register_real_device(struct net_device *real_dev) +{ + struct rmnet_port *port; + int rc, entry; + + ASSERT_RTNL(); + + if (rmnet_is_real_dev_registered(real_dev)) + return 0; + + port = kzalloc(sizeof(*port), GFP_ATOMIC); + if (!port) + return -ENOMEM; + + port->dev = real_dev; + rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port); + if (rc) { + kfree(port); + return -EBUSY; + } + /* hold on to real dev for MAP data */ + dev_hold(real_dev); + + for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) + INIT_HLIST_HEAD(&port->muxed_ep[entry]); + + rc = rmnet_descriptor_init(port); + if (rc) { + rmnet_descriptor_deinit(port); + return rc; + } + + rmnet_map_tx_aggregate_init(port); + rmnet_map_cmd_init(port); + + netdev_dbg(real_dev, "registered with rmnet\n"); + return 0; +} + +static void rmnet_unregister_bridge(struct net_device *dev, + struct rmnet_port *port) +{ + struct rmnet_port *bridge_port; + struct net_device *bridge_dev; + + if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) + return; + + /* bridge slave handling */ + if (!port->nr_rmnet_devs) { + bridge_dev = port->bridge_ep; + + bridge_port = rmnet_get_port_rtnl(bridge_dev); + bridge_port->bridge_ep = NULL; + bridge_port->rmnet_mode = RMNET_EPMODE_VND; + } else { + bridge_dev = port->bridge_ep; + + bridge_port = rmnet_get_port_rtnl(bridge_dev); + rmnet_unregister_real_device(bridge_dev, bridge_port); + } +} + +static int rmnet_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION; + struct net_device *real_dev; + int mode = RMNET_EPMODE_VND; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + int err = 0; + u16 mux_id; + + real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev || !dev) + return -ENODEV; + + if (!data[IFLA_RMNET_MUX_ID]) + return -EINVAL; + + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + if (!ep) + return -ENOMEM; + + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + + err = rmnet_register_real_device(real_dev); + if (err) + goto err0; + + port = rmnet_get_port_rtnl(real_dev); + err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep); + if (err) + goto err1; + + port->rmnet_mode = mode; + + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + + if (data[IFLA_RMNET_FLAGS]) { + struct ifla_rmnet_flags *flags; + + flags = nla_data(data[IFLA_RMNET_FLAGS]); + data_format = flags->flags & flags->mask; + } + + netdev_dbg(dev, "data format [0x%08X]\n", data_format); + port->data_format = data_format; + + if (data[IFLA_RMNET_UL_AGG_PARAMS]) { + void *agg_params; + unsigned long irq_flags; + + agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]); + spin_lock_irqsave(&port->agg_lock, irq_flags); + memcpy(&port->egress_agg_params, agg_params, + sizeof(port->egress_agg_params)); + spin_unlock_irqrestore(&port->agg_lock, irq_flags); + } + + return 0; + +err1: + rmnet_unregister_real_device(real_dev, port); +err0: + kfree(ep); + return err; +} + +static void rmnet_dellink(struct net_device *dev, struct list_head *head) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + u8 mux_id; + + real_dev = priv->real_dev; + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + mux_id = rmnet_vnd_get_mux(dev); + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_unregister_bridge(dev, port); + rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); + kfree(ep); + } + + if (!port->nr_rmnet_devs) + qmi_rmnet_qmi_exit(port->qmi_info, port); + + rmnet_unregister_real_device(real_dev, port); + + unregister_netdevice_queue(dev, head); +} + +static void rmnet_force_unassociate_device(struct net_device *dev) +{ + struct net_device *real_dev = dev; + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + unsigned long bkt_ep; + LIST_HEAD(list); + + if (!rmnet_is_real_dev_registered(real_dev)) + return; + + ASSERT_RTNL(); + + port = rmnet_get_port_rtnl(dev); + qmi_rmnet_qmi_exit(port->qmi_info, port); + + rmnet_unregister_bridge(dev, port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + unregister_netdevice_queue(ep->egress_dev, &list); + rmnet_vnd_dellink(ep->mux_id, port, ep); + + hlist_del_init_rcu(&ep->hlnode); + synchronize_rcu(); + kfree(ep); + } + + unregister_netdevice_many(&list); + + rmnet_unregister_real_device(real_dev, port); +} + +static int rmnet_config_notify_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct net_device *dev = netdev_notifier_info_to_dev(data); + + if (!dev) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: + netdev_dbg(dev, "Kernel unregister\n"); + rmnet_force_unassociate_device(dev); + break; + + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rmnet_dev_notifier __read_mostly = { + .notifier_call = rmnet_config_notify_cb, +}; + +static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct rmnet_egress_agg_params *agg_params; + u16 mux_id; + + if (!data) { + return -EINVAL; + } else { + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) + return -ERANGE; + } + + if (data[IFLA_RMNET_UL_AGG_PARAMS]) { + agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]); + if (agg_params->agg_time < 3000000) + return -EINVAL; + } + } + + return 0; +} + +static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + u16 mux_id; + + real_dev = __dev_get_by_index(dev_net(dev), + nla_get_u32(tb[IFLA_LINK])); + + if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev)) + return -ENODEV; + + port = rmnet_get_port_rtnl(real_dev); + + if (data[IFLA_RMNET_MUX_ID]) { + mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; + } + + if (data[IFLA_RMNET_FLAGS]) { + struct ifla_rmnet_flags *flags; + + flags = nla_data(data[IFLA_RMNET_FLAGS]); + port->data_format = flags->flags & flags->mask; + } + + if (data[IFLA_RMNET_DFC_QOS]) { + struct tcmsg *tcm; + + tcm = nla_data(data[IFLA_RMNET_DFC_QOS]); + qmi_rmnet_change_link(dev, port, tcm); + } + + if (data[IFLA_RMNET_UL_AGG_PARAMS]) { + void *agg_params; + unsigned long irq_flags; + + agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]); + spin_lock_irqsave(&port->agg_lock, irq_flags); + memcpy(&port->egress_agg_params, agg_params, + sizeof(port->egress_agg_params)); + spin_unlock_irqrestore(&port->agg_lock, irq_flags); + } + + return 0; +} + +static size_t rmnet_get_size(const struct net_device *dev) +{ + return + /* IFLA_RMNET_MUX_ID */ + nla_total_size(2) + + /* IFLA_RMNET_FLAGS */ + nla_total_size(sizeof(struct ifla_rmnet_flags)) + + /* IFLA_RMNET_DFC_QOS */ + nla_total_size(sizeof(struct tcmsg)) + + /* IFLA_RMNET_UL_AGG_PARAMS */ + nla_total_size(sizeof(struct rmnet_egress_agg_params)); +} + +static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct net_device *real_dev; + struct ifla_rmnet_flags f; + struct rmnet_port *port = NULL; + + real_dev = priv->real_dev; + + if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id)) + goto nla_put_failure; + + if (rmnet_is_real_dev_registered(real_dev)) { + port = rmnet_get_port_rtnl(real_dev); + f.flags = port->data_format; + } else { + f.flags = 0; + } + + f.mask = ~0; + + if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f)) + goto nla_put_failure; + + if (port) { + if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS, + sizeof(port->egress_agg_params), + &port->egress_agg_params)) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +struct rtnl_link_ops rmnet_link_ops __read_mostly = { + .kind = "rmnet", + .maxtype = __IFLA_RMNET_EXT_MAX, + .priv_size = sizeof(struct rmnet_priv), + .setup = rmnet_vnd_setup, + .validate = rmnet_rtnl_validate, + .newlink = rmnet_newlink, + .dellink = rmnet_dellink, + .get_size = rmnet_get_size, + .changelink = rmnet_changelink, + .policy = rmnet_policy, + .fill_info = rmnet_fill_info, +}; + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} +EXPORT_SYMBOL(rmnet_get_port); + +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + struct rmnet_endpoint *ep; + + hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) { + if (ep->mux_id == mux_id) + return ep; + } + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_endpoint); + +int rmnet_add_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct net_device *real_dev = priv->real_dev; + struct rmnet_port *port, *slave_port; + int err; + + port = rmnet_get_port(real_dev); + + /* If there is more than one rmnet dev attached, its probably being + * used for muxing. Skip the briding in that case + */ + if (port->nr_rmnet_devs > 1) + return -EINVAL; + + if (rmnet_is_real_dev_registered(slave_dev)) + return -EBUSY; + + err = rmnet_register_real_device(slave_dev); + if (err) + return -EBUSY; + + slave_port = rmnet_get_port(slave_dev); + slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; + slave_port->bridge_ep = real_dev; + + port->rmnet_mode = RMNET_EPMODE_BRIDGE; + port->bridge_ep = slave_dev; + + netdev_dbg(slave_dev, "registered with rmnet as slave\n"); + return 0; +} + +int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct net_device *real_dev = priv->real_dev; + struct rmnet_port *port, *slave_port; + + port = rmnet_get_port(real_dev); + port->rmnet_mode = RMNET_EPMODE_VND; + port->bridge_ep = NULL; + + slave_port = rmnet_get_port(slave_dev); + rmnet_unregister_real_device(slave_dev, slave_port); + + netdev_dbg(slave_dev, "removed from rmnet as slave\n"); + return 0; +} + +void *rmnet_get_qmi_pt(void *port) +{ + if (port) + return ((struct rmnet_port *)port)->qmi_info; + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_qmi_pt); + +void *rmnet_get_qos_pt(struct net_device *dev) +{ + if (dev) + return rcu_dereference( + ((struct rmnet_priv *)netdev_priv(dev))->qos_info); + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_qos_pt); + +void *rmnet_get_rmnet_port(struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (dev) { + priv = netdev_priv(dev); + return (void *)rmnet_get_port(priv->real_dev); + } + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_rmnet_port); + +struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id) +{ + struct rmnet_endpoint *ep; + + if (port) { + ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id); + if (ep) + return ep->egress_dev; + } + + return NULL; +} +EXPORT_SYMBOL(rmnet_get_rmnet_dev); + +void rmnet_reset_qmi_pt(void *port) +{ + if (port) + ((struct rmnet_port *)port)->qmi_info = NULL; +} +EXPORT_SYMBOL(rmnet_reset_qmi_pt); + +void rmnet_init_qmi_pt(void *port, void *qmi) +{ + if (port) + ((struct rmnet_port *)port)->qmi_info = qmi; +} +EXPORT_SYMBOL(rmnet_init_qmi_pt); + +void rmnet_get_packets(void *port, u64 *rx, u64 *tx) +{ + struct rmnet_priv *priv; + struct rmnet_pcpu_stats *ps; + unsigned int cpu, start; + + struct rmnet_endpoint *ep; + unsigned long bkt; + + if (!port || !tx || !rx) + return; + + *tx = 0; + *rx = 0; + rcu_read_lock(); + hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) { + priv = netdev_priv(ep->egress_dev); + for_each_possible_cpu(cpu) { + ps = per_cpu_ptr(priv->pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&ps->syncp); + *tx += ps->stats.tx_pkts; + *rx += ps->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ps->syncp, start)); + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(rmnet_get_packets); + +void rmnet_set_powersave_format(void *port) +{ + if (!port) + return; + ((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS; +} +EXPORT_SYMBOL(rmnet_set_powersave_format); + +void rmnet_clear_powersave_format(void *port) +{ + if (!port) + return; + ((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS; +} +EXPORT_SYMBOL(rmnet_clear_powersave_format); + +void rmnet_enable_all_flows(void *port) +{ + struct rmnet_endpoint *ep; + unsigned long bkt; + + if (unlikely(!port)) + return; + + rcu_read_lock(); + hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep, + bkt, ep, hlnode) { + qmi_rmnet_enable_all_flows(ep->egress_dev); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(rmnet_enable_all_flows); + +bool rmnet_all_flows_enabled(void *port) +{ + struct rmnet_endpoint *ep; + unsigned long bkt; + bool ret = true; + + if (unlikely(!port)) + return true; + + rcu_read_lock(); + hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep, + bkt, ep, hlnode) { + if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) { + ret = false; + goto out; + } + } +out: + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(rmnet_all_flows_enabled); + +int rmnet_get_powersave_notif(void *port) +{ + if (!port) + return 0; + return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF; +} +EXPORT_SYMBOL(rmnet_get_powersave_notif); + +/* Startup/Shutdown */ + +static int __init rmnet_init(void) +{ + int rc; + + WARN_ON(1); + rc = register_netdevice_notifier(&rmnet_dev_notifier); + if (rc != 0) + return rc; + + rc = rtnl_link_register(&rmnet_link_ops); + if (rc != 0) { + unregister_netdevice_notifier(&rmnet_dev_notifier); + return rc; + } + return rc; +} + +static void __exit rmnet_exit(void) +{ + unregister_netdevice_notifier(&rmnet_dev_notifier); + rtnl_link_unregister(&rmnet_link_ops); +} + +module_init(rmnet_init) +module_exit(rmnet_exit) +MODULE_LICENSE("GPL v2"); diff --git a/core/rmnet_config.h b/core/rmnet_config.h new file mode 100644 index 0000000000..f2f20a9134 --- /dev/null +++ b/core/rmnet_config.h @@ -0,0 +1,179 @@ +/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include +#include + +#ifndef _RMNET_CONFIG_H_ +#define _RMNET_CONFIG_H_ + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAX_VEID 4 + +struct rmnet_endpoint { + u8 mux_id; + struct net_device *egress_dev; + struct hlist_node hlnode; +}; + +struct rmnet_port_priv_stats { + u64 dl_hdr_last_qmap_vers; + u64 dl_hdr_last_ep_id; + u64 dl_hdr_last_trans_id; + u64 dl_hdr_last_seq; + u64 dl_hdr_last_bytes; + u64 dl_hdr_last_pkts; + u64 dl_hdr_last_flows; + u64 dl_hdr_count; + u64 dl_hdr_total_bytes; + u64 dl_hdr_total_pkts; + u64 dl_trl_last_seq; + u64 dl_trl_count; +}; + +struct rmnet_egress_agg_params { + u16 agg_size; + u16 agg_count; + u32 agg_time; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u32 data_format; + u8 nr_rmnet_devs; + u8 rmnet_mode; + struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; + struct net_device *bridge_ep; + void *rmnet_perf; + + struct rmnet_egress_agg_params egress_agg_params; + + /* Protect aggregation related elements */ + spinlock_t agg_lock; + + struct sk_buff *agg_skb; + int agg_state; + u8 agg_count; + struct timespec64 agg_time; + struct timespec64 agg_last; + struct hrtimer hrtimer; + struct work_struct agg_wq; + + void *qmi_info; + + /* dl marker elements */ + struct list_head dl_list; + struct rmnet_port_priv_stats stats; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; +}; + +extern struct rtnl_link_ops rmnet_link_ops; + +struct rmnet_vnd_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 tx_pkts; + u64 tx_bytes; + u32 tx_drops; +}; + +struct rmnet_pcpu_stats { + struct rmnet_vnd_stats stats; + struct u64_stats_sync syncp; +}; + +struct rmnet_coal_close_stats { + u64 non_coal; + u64 ip_miss; + u64 trans_miss; + u64 hw_nl; + u64 hw_pkt; + u64 hw_byte; + u64 hw_time; + u64 hw_evict; + u64 coal; +}; + +struct rmnet_coal_stats { + u64 coal_rx; + u64 coal_pkts; + u64 coal_hdr_nlo_err; + u64 coal_hdr_pkt_err; + u64 coal_csum_err; + u64 coal_reconstruct; + u64 coal_ip_invalid; + u64 coal_trans_invalid; + struct rmnet_coal_close_stats close; + u64 coal_veid[RMNET_MAX_VEID]; +}; + +struct rmnet_priv_stats { + u64 csum_ok; + u64 csum_valid_unset; + u64 csum_validation_failed; + u64 csum_err_bad_buffer; + u64 csum_err_invalid_ip_version; + u64 csum_err_invalid_transport; + u64 csum_fragmented_pkt; + u64 csum_skipped; + u64 csum_sw; + u64 csum_hw; + struct rmnet_coal_stats coal; +}; + +struct rmnet_priv { + u8 mux_id; + struct net_device *real_dev; + struct rmnet_pcpu_stats __percpu *pcpu_stats; + struct gro_cells gro_cells; + struct rmnet_priv_stats stats; + void __rcu *qos_info; +}; + +enum rmnet_dl_marker_prio { + RMNET_PERF, + RMNET_SHS, +}; + +enum rmnet_trace_func { + RMNET_MODULE, + NW_STACK_MODULE, +}; + +enum rmnet_trace_evt { + RMNET_DLVR_SKB, + RMNET_RCV_FROM_PND, + RMNET_TX_UL_PKT, + NW_STACK_DEV_Q_XMIT, + NW_STACK_NAPI_GRO_FLUSH, + NW_STACK_RX, + NW_STACK_TX, +}; + +int rmnet_is_real_dev_registered(const struct net_device *real_dev); +struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); +int rmnet_add_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack); +int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev); +#endif /* _RMNET_CONFIG_H_ */ diff --git a/core/rmnet_descriptor.c b/core/rmnet_descriptor.c new file mode 100644 index 0000000000..ddc9f12fa6 --- /dev/null +++ b/core/rmnet_descriptor.c @@ -0,0 +1,1087 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_descriptor.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" +#include "rmnet_qmi.h" +#include "qmi_rmnet.h" + +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +typedef void (*rmnet_perf_chain_hook_t)(void); + +struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} +EXPORT_SYMBOL(rmnet_get_frag_descriptor); + +void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + + list_del(&frag_desc->list); + put_page(skb_frag_page(&frag_desc->frag)); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} +EXPORT_SYMBOL(rmnet_recycle_frag_descriptor); + +void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} +EXPORT_SYMBOL(rmnet_descriptor_add_frag); + +int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, + int start, u8 *nexthdrp, __be16 *fragp) +{ + u8 nexthdr = *nexthdrp; + + *fragp = 0; + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr *hp; + int hdrlen; + + if (nexthdr == NEXTHDR_NONE) + return -EINVAL; + + hp = rmnet_frag_data_ptr(frag_desc) + start; + + if (nexthdr == NEXTHDR_FRAGMENT) { + __be16 *fp; + + fp = rmnet_frag_data_ptr(frag_desc) + start + + offsetof(struct frag_hdr, frag_off); + *fragp = *fp; + if (ntohs(*fragp) & ~0x7) + break; + hdrlen = 8; + } else if (nexthdr == NEXTHDR_AUTH) { + hdrlen = (hp->hdrlen + 2) << 2; + } else { + hdrlen = ipv6_optlen(hp); + } + + nexthdr = hp->nexthdr; + start += hdrlen; + } + + *nexthdrp = nexthdr; + return start; +} +EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr); + +static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + mux_id = qmap->mux_id; + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + return RX_HANDLER_CONSUMED; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + return RX_HANDLER_CONSUMED; + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) + return RMNET_MAP_COMMAND_UNSUPPORTED; + else + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_frag_send_ack(struct rmnet_map_header *qmap, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = port->dev; + struct sk_buff *skb; + u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap); + + skb = alloc_skb(alloc_len, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = htons(ETH_P_MAP); + skb->dev = dev; + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + +static void +rmnet_frag_process_flow_start(struct rmnet_map_control_command_header *cmd, + struct rmnet_port *port, + u16 cmd_len) +{ + struct rmnet_map_dl_ind_hdr *dlhdr; + u32 data_format; + bool is_dl_mark_v2; + + if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_HDR_SIZE) + return; + + data_format = port->data_format; + is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2; + dlhdr = (struct rmnet_map_dl_ind_hdr *)((char *)cmd + sizeof(*cmd)); + + port->stats.dl_hdr_last_ep_id = cmd->source_id; + port->stats.dl_hdr_last_qmap_vers = cmd->reserved; + port->stats.dl_hdr_last_trans_id = cmd->transaction_id; + port->stats.dl_hdr_last_seq = dlhdr->le.seq; + port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; + port->stats.dl_hdr_last_pkts = dlhdr->le.pkts; + port->stats.dl_hdr_last_flows = dlhdr->le.flows; + port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes; + port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts; + port->stats.dl_hdr_count++; + + /* If a target is taking frag path, we can assume DL marker v2 is in + * play + */ + if (is_dl_mark_v2) + rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd); + else + rmnet_map_dl_hdr_notify(port, dlhdr); +} + +static void +rmnet_frag_process_flow_end(struct rmnet_map_control_command_header *cmd, + struct rmnet_port *port, u16 cmd_len) +{ + struct rmnet_map_dl_ind_trl *dltrl; + u32 data_format; + bool is_dl_mark_v2; + + + if (cmd_len + sizeof(struct rmnet_map_header) < RMNET_DL_IND_TRL_SIZE) + return; + + data_format = port->data_format; + is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2; + dltrl = (struct rmnet_map_dl_ind_trl *)((char *)cmd + sizeof(*cmd)); + + port->stats.dl_trl_last_seq = dltrl->seq_le; + port->stats.dl_trl_count++; + + /* If a target is taking frag path, we can assume DL marker v2 is in + * play + */ + if (is_dl_mark_v2) + rmnet_map_dl_trl_notify_v2(port, dltrl, cmd); + else + rmnet_map_dl_trl_notify(port, dltrl); +} + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_frag_send_ack(qmap, rc, port); +} + +int rmnet_frag_flow_command(struct rmnet_map_header *qmap, + struct rmnet_port *port, u16 pkt_len) +{ + struct rmnet_map_control_command_header *cmd; + unsigned char command_name; + + cmd = (struct rmnet_map_control_command_header *) + ((char *)qmap + sizeof(*qmap)); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_START: + rmnet_frag_process_flow_start(cmd, port, pkt_len); + break; + + case RMNET_MAP_COMMAND_FLOW_END: + rmnet_frag_process_flow_end(cmd, port, pkt_len); + break; + + default: + return 1; + } + + return 0; +} +EXPORT_SYMBOL(rmnet_frag_flow_command); + +void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->bv_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + +/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack + * if needed (i.e. forwarding, UDP GRO) + */ +static void rmnet_frag_gso_stamp(struct sk_buff *skb, + struct rmnet_frag_descriptor *frag_desc) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct iphdr *iph = (struct iphdr *)skb->data; + __sum16 pseudo; + u16 pkt_len = skb->len - frag_desc->ip_len; + bool ipv4 = frag_desc->ip_proto == 4; + + if (ipv4) { + iph->tot_len = htons(skb->len); + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + pkt_len, frag_desc->trans_proto, + 0); + } else { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + + /* Payload length includes any extension headers */ + ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + pkt_len, frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + struct tcphdr *tp = (struct tcphdr *) + ((u8 *)iph + frag_desc->ip_len); + + tp->check = pseudo; + shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb->csum_offset = offsetof(struct tcphdr, check); + } else { + struct udphdr *up = (struct udphdr *) + ((u8 *)iph + frag_desc->ip_len); + + up->len = htons(pkt_len); + up->check = pseudo; + shinfo->gso_type = SKB_GSO_UDP_L4; + skb->csum_offset = offsetof(struct udphdr, check); + } + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head; + shinfo->gso_size = frag_desc->gso_size; + shinfo->gso_segs = frag_desc->gso_segs; +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) + rmnet_frag_pull(frag_desc, port, hdr_len); + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.bv_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->bv_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid) + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + if (frag_desc->gso_segs > 1) + rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} +EXPORT_SYMBOL(rmnet_frag_deliver); + +static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc, + struct rmnet_port *port, + struct list_head *list, + u8 pkt_id) +{ + struct rmnet_priv *priv = netdev_priv(coal_desc->dev); + struct rmnet_frag_descriptor *new_frag; + u8 *hdr_start = rmnet_frag_data_ptr(coal_desc); + u32 offset; + + new_frag = rmnet_get_frag_descriptor(port); + if (!new_frag) + return; + + /* Account for header lengths to access the data start */ + offset = coal_desc->frag.bv_offset + coal_desc->ip_len + + coal_desc->trans_len + coal_desc->data_offset; + + /* Header information and most metadata is the same as the original */ + memcpy(new_frag, coal_desc, sizeof(*coal_desc)); + INIT_LIST_HEAD(&new_frag->list); + INIT_LIST_HEAD(&new_frag->sub_frags); + rmnet_frag_fill(new_frag, skb_frag_page(&coal_desc->frag), offset, + coal_desc->gso_size * coal_desc->gso_segs); + + if (coal_desc->trans_proto == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)(hdr_start + coal_desc->ip_len); + new_frag->tcp_seq_set = 1; + new_frag->tcp_seq = htonl(ntohl(th->seq) + + coal_desc->data_offset); + } + + if (coal_desc->ip_proto == 4) { + struct iphdr *iph; + + iph = (struct iphdr *)hdr_start; + new_frag->ip_id_set = 1; + new_frag->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id); + } + + new_frag->hdr_ptr = hdr_start; + new_frag->csum_valid = true; + priv->stats.coal.coal_reconstruct++; + + /* Update meta information to move past the data we just segmented */ + coal_desc->data_offset += coal_desc->gso_size * coal_desc->gso_segs; + coal_desc->pkt_id = pkt_id + 1; + coal_desc->gso_segs = 0; + + list_add_tail(&new_frag->list, list); +} + +/* Converts the coalesced frame into a list of descriptors. + * NLOs containing csum erros will not be included. + */ +static void +rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc, + u64 nlo_err_mask, struct rmnet_port *port, + struct list_head *list) +{ + struct iphdr *iph; + struct rmnet_priv *priv = netdev_priv(coal_desc->dev); + struct rmnet_map_v5_coal_header *coal_hdr; + u16 pkt_len; + u8 pkt, total_pkt = 0; + u8 nlo; + bool gro = coal_desc->dev->features & NETIF_F_GRO_HW; + + /* Pull off the headers we no longer need */ + rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header)); + coal_hdr = (struct rmnet_map_v5_coal_header *) + rmnet_frag_data_ptr(coal_desc); + rmnet_frag_pull(coal_desc, port, sizeof(*coal_hdr)); + + iph = (struct iphdr *)rmnet_frag_data_ptr(coal_desc); + + if (iph->version == 4) { + coal_desc->ip_proto = 4; + coal_desc->ip_len = iph->ihl * 4; + coal_desc->trans_proto = iph->protocol; + + /* Don't allow coalescing of any packets with IP options */ + if (iph->ihl != 5) + gro = false; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + __be16 frag_off; + u8 protocol = ip6h->nexthdr; + + coal_desc->ip_proto = 6; + coal_desc->ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc, + sizeof(*ip6h), + &protocol, + &frag_off); + coal_desc->trans_proto = protocol; + + /* If we run into a problem, or this has a fragment header + * (which should technically not be possible, if the HW + * works as intended...), bail. + */ + if (coal_desc->ip_len < 0 || frag_off) { + priv->stats.coal.coal_ip_invalid++; + return; + } else if (coal_desc->ip_len > sizeof(*ip6h)) { + /* Don't allow coalescing of any packets with IPv6 + * extension headers. + */ + gro = false; + } + } else { + priv->stats.coal.coal_ip_invalid++; + return; + } + + if (coal_desc->trans_proto == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + coal_desc->ip_len); + coal_desc->trans_len = th->doff * 4; + } else if (coal_desc->trans_proto == IPPROTO_UDP) { + coal_desc->trans_len = sizeof(struct udphdr); + } else { + priv->stats.coal.coal_trans_invalid++; + return; + } + + coal_desc->hdrs_valid = 1; + for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) { + pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len); + pkt_len -= coal_desc->ip_len + coal_desc->trans_len; + coal_desc->gso_size = pkt_len; + for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets; + pkt++, total_pkt++) { + nlo_err_mask <<= 1; + if (nlo_err_mask & (1ULL << 63)) { + priv->stats.coal.coal_csum_err++; + + /* Segment out the good data */ + if (gro && coal_desc->gso_segs) + __rmnet_frag_segment_data(coal_desc, + port, + list, + total_pkt); + + /* skip over bad packet */ + coal_desc->data_offset += pkt_len; + coal_desc->pkt_id = total_pkt + 1; + } else { + coal_desc->gso_segs++; + + /* Segment the packet if we aren't sending the + * larger packet up the stack. + */ + if (!gro) + __rmnet_frag_segment_data(coal_desc, + port, + list, + total_pkt); + } + } + + /* If we're switching NLOs, we need to send out everything from + * the previous one, if we haven't done so. NLOs only switch + * when the packet length changes. + */ + if (gro && coal_desc->gso_segs) { + /* Fast forward the (hopefully) common case. + * Frames with only one NLO (i.e. one packet length) and + * no checksum errors don't need to be segmented here. + * We can just pass off the original skb. + */ + if (coal_desc->gso_size * coal_desc->gso_segs == + skb_frag_size(&coal_desc->frag) - + coal_desc->ip_len - coal_desc->trans_len) { + coal_desc->hdr_ptr = + rmnet_frag_data_ptr(coal_desc); + coal_desc->csum_valid = true; + list_add_tail(&coal_desc->list, list); + return; + } + + __rmnet_frag_segment_data(coal_desc, port, list, + total_pkt); + } + } +} + +/* Record reason for coalescing pipe closure */ +static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type, + u8 code) +{ + struct rmnet_coal_close_stats *stats = &priv->stats.coal.close; + + switch (type) { + case RMNET_MAP_COAL_CLOSE_NON_COAL: + stats->non_coal++; + break; + case RMNET_MAP_COAL_CLOSE_IP_MISS: + stats->ip_miss++; + break; + case RMNET_MAP_COAL_CLOSE_TRANS_MISS: + stats->trans_miss++; + break; + case RMNET_MAP_COAL_CLOSE_HW: + switch (code) { + case RMNET_MAP_COAL_CLOSE_HW_NL: + stats->hw_nl++; + break; + case RMNET_MAP_COAL_CLOSE_HW_PKT: + stats->hw_pkt++; + break; + case RMNET_MAP_COAL_CLOSE_HW_BYTE: + stats->hw_byte++; + break; + case RMNET_MAP_COAL_CLOSE_HW_TIME: + stats->hw_time++; + break; + case RMNET_MAP_COAL_CLOSE_HW_EVICT: + stats->hw_evict++; + break; + default: + break; + } + break; + case RMNET_MAP_COAL_CLOSE_COAL: + stats->coal++; + break; + default: + break; + } +} + +/* Check if the coalesced header has any incorrect values, in which case, the + * entire coalesced frame must be dropped. Then check if there are any + * checksum issues + */ +static int +rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc, + u64 *nlo_err_mask) +{ + struct rmnet_map_v5_coal_header *coal_hdr; + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + struct rmnet_priv *priv = netdev_priv(frag_desc->dev); + u64 mask = 0; + int i; + u8 veid, pkts = 0; + + coal_hdr = (struct rmnet_map_v5_coal_header *) + (data + sizeof(struct rmnet_map_header)); + veid = coal_hdr->virtual_channel_id; + + if (coal_hdr->num_nlos == 0 || + coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) { + priv->stats.coal.coal_hdr_nlo_err++; + return -EINVAL; + } + + for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) { + /* If there is a checksum issue, we need to split + * up the skb. Rebuild the full csum error field + */ + u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap; + u8 pkt = coal_hdr->nl_pairs[i].num_packets; + + mask |= ((u64)err) << (7 - i) * 8; + + /* Track total packets in frame */ + pkts += pkt; + if (pkts > RMNET_MAP_V5_MAX_PACKETS) { + priv->stats.coal.coal_hdr_pkt_err++; + return -EINVAL; + } + } + + /* Track number of packets we get inside of coalesced frames */ + priv->stats.coal.coal_pkts += pkts; + + /* Update ethtool stats */ + rmnet_frag_data_log_close_stats(priv, + coal_hdr->close_type, + coal_hdr->close_value); + if (veid < RMNET_MAX_VEID) + priv->stats.coal.coal_veid[veid]++; + + *nlo_err_mask = mask; + + return 0; +} + +/* Process a QMAPv5 packet header */ +int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + struct rmnet_priv *priv = netdev_priv(frag_desc->dev); + u64 nlo_err_mask; + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + priv->stats.coal.coal_rx++; + rc = rmnet_frag_data_check_coal_header(frag_desc, + &nlo_err_mask); + if (rc) + return rc; + + rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port, + list); + if (list_first_entry(list, struct rmnet_frag_descriptor, + list) != frag_desc) + rmnet_recycle_frag_descriptor(frag_desc, port); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + priv->stats.csum_ok++; + frag_desc->csum_valid = true; + } else { + priv->stats.csum_valid_unset++; + } + + rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header)); + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + rmnet_frag_trim(frag_desc, port, len); + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +/* Perf hook handler */ +rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly; +EXPORT_SYMBOL(rmnet_perf_desc_entry); + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + rmnet_perf_desc_hook_t rmnet_perf_ingress; + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + qmi_rmnet_set_dl_msg_active(port); + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + rmnet_frag_flow_command(qmap, port, len); + goto recycle; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + rmnet_frag_command(qmap, port); + + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + rmnet_frag_pull(frag_desc, port, sizeof(*qmap)); + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + rmnet_frag_trim(frag_desc, port, len); + list_add_tail(&frag_desc->list, &segs); + } + + if (port->data_format & RMNET_INGRESS_FORMAT_PS) + qmi_rmnet_work_maybe_restart(port); + + rcu_read_lock(); + rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry); + if (rmnet_perf_ingress) { + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_perf_ingress(frag, port); + } + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Notify perf at the end of SKB chain */ +rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly; +EXPORT_SYMBOL(rmnet_perf_chain_end); + +void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end; + LIST_HEAD(desc_list); + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, &desc_list, + list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, port); + } + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } + + rcu_read_lock(); + rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end); + if (rmnet_perf_opt_chain_end) + rmnet_perf_opt_chain_end(); + rcu_read_unlock(); +} + +void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} diff --git a/core/rmnet_descriptor.h b/core/rmnet_descriptor.h new file mode 100644 index 0000000000..996a6caa1d --- /dev/null +++ b/core/rmnet_descriptor.h @@ -0,0 +1,147 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#ifndef _RMNET_DESCRIPTOR_H_ +#define _RMNET_DESCRIPTOR_H_ + +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +/* Descriptor management */ +struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port); +void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len); +int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, + int start, u8 *nexthdrp, __be16 *fragp); + +/* QMAP command packets */ +void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port); +int rmnet_frag_flow_command(struct rmnet_map_header *qmap, + struct rmnet_port *port, u16 pkt_len); + +/* Ingress data handlers */ +void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list); +void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len); +void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port); + +int rmnet_descriptor_init(struct rmnet_port *port); +void rmnet_descriptor_deinit(struct rmnet_port *port); + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.bv_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.bv_offset = page_offset; +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +#endif /* _RMNET_DESCRIPTOR_H_ */ diff --git a/core/rmnet_handlers.c b/core/rmnet_handlers.c new file mode 100644 index 0000000000..a4e5d36488 --- /dev/null +++ b/core/rmnet_handlers.c @@ -0,0 +1,448 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "rmnet_private.h" +#include "rmnet_config.h" +#include "rmnet_vnd.h" +#include "rmnet_map.h" +#include "rmnet_handlers.h" +#include "rmnet_descriptor.h" + +#include "rmnet_qmi.h" +#include "qmi_rmnet.h" + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 +#define CREATE_TRACE_POINTS +#include "rmnet_trace.h" + +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_low); +EXPORT_TRACEPOINT_SYMBOL(rmnet_high); +EXPORT_TRACEPOINT_SYMBOL(rmnet_err); +EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update); +EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset); +EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost); + +/* Helper Functions */ + +void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } +} +EXPORT_SYMBOL(rmnet_set_skb_proto); + +/* Shs hook handler */ +int (*rmnet_shs_skb_entry)(struct sk_buff *skb, + struct rmnet_port *port) __rcu __read_mostly; +EXPORT_SYMBOL(rmnet_shs_skb_entry); + +/* Shs hook handler for work queue*/ +int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb, + struct rmnet_port *port) __rcu __read_mostly; +EXPORT_SYMBOL(rmnet_shs_skb_entry_wq); + +/* Generic handler */ + +void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port); + + trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF, + 0xDEF, 0xDEF, (void *)skb, NULL); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + rmnet_vnd_rx_fixup(skb->dev, skb->len); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + rcu_read_lock(); + rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry); + if (rmnet_shs_stamp) { + rmnet_shs_stamp(skb, port); + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + netif_receive_skb(skb); +} +EXPORT_SYMBOL(rmnet_deliver_skb); + +/* Important to note, port cannot be used here if it has gone stale */ +void +rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port, + enum rmnet_packet_context ctx) +{ + int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port); + struct rmnet_priv *priv = netdev_priv(skb->dev); + + trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF, + 0xDEF, 0xDEF, (void *)skb, NULL); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + rmnet_vnd_rx_fixup(skb->dev, skb->len); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + /* packets coming from work queue context due to packet flush timer + * must go through the special workqueue path in SHS driver + */ + rcu_read_lock(); + rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) : + rcu_dereference(rmnet_shs_skb_entry_wq); + if (rmnet_shs_stamp) { + rmnet_shs_stamp(skb, port); + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + if (ctx == RMNET_NET_RX_CTX) + netif_receive_skb(skb); + else + gro_cells_receive(&priv->gro_cells, skb); +} +EXPORT_SYMBOL(rmnet_deliver_skb_wq); + +/* Deliver a list of skbs after undoing coalescing */ +static void rmnet_deliver_skb_list(struct sk_buff_head *head, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(head))) { + rmnet_set_skb_proto(skb); + rmnet_deliver_skb(skb, port); + } +} + +/* MAP handler */ + +static void +__rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct sk_buff_head list; + u16 len, pad; + u8 mux_id; + + /* We don't need the spinlock since only we touch this */ + __skb_queue_head_init(&list); + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + if (qmap->cd_bit) { + qmi_rmnet_set_dl_msg_active(port); + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + if (!rmnet_map_flow_command(skb, port, false)) + return; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + return rmnet_map_command(skb, port); + + goto free_skb; + } + + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto free_skb; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto free_skb; + + skb->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_map_process_next_hdr_packet(skb, &list, len)) + goto free_skb; + } else { + /* We only have the main QMAP header to worry about */ + pskb_pull(skb, sizeof(*qmap)); + + rmnet_set_skb_proto(skb); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + pskb_trim(skb, len); + + /* Push the single packet onto the list */ + __skb_queue_tail(&list, skb); + } + + if (port->data_format & RMNET_INGRESS_FORMAT_PS) + qmi_rmnet_work_maybe_restart(port); + + rmnet_deliver_skb_list(&list, port); + return; + +free_skb: + kfree_skb(skb); +} + +int (*rmnet_perf_deag_entry)(struct sk_buff *skb, + struct rmnet_port *port) __rcu __read_mostly; +EXPORT_SYMBOL(rmnet_perf_deag_entry); + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct sk_buff *skbn; + int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb, + struct rmnet_port *port); + + if (skb->dev->type == ARPHRD_ETHER) { + if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { + kfree_skb(skb); + return; + } + + skb_push(skb, ETH_HLEN); + } + + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + /* No aggregation. Pass the frame on as is */ + if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) { + __rmnet_map_ingress_handler(skb, port); + return; + } + + /* Pass off handling to rmnet_perf module, if present */ + rcu_read_lock(); + rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry); + if (rmnet_perf_core_deaggregate) { + rmnet_perf_core_deaggregate(skb, port); + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) { + __rmnet_map_ingress_handler(skbn, port); + + if (skbn == skb) + goto next_skb; + } + + consume_skb(skb); +next_skb: + skb = skb_frag; + } +} + +static int rmnet_map_egress_handler(struct sk_buff *skb, + struct rmnet_port *port, u8 mux_id, + struct net_device *orig_dev) +{ + int required_headroom, additional_header_len, csum_type; + struct rmnet_map_header *map_header; + + additional_header_len = 0; + required_headroom = sizeof(struct rmnet_map_header); + csum_type = 0; + + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { + additional_header_len = sizeof(struct rmnet_map_v5_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + } + + required_headroom += additional_header_len; + + if (skb_headroom(skb) < required_headroom) { + if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) + return -ENOMEM; + } + + if (csum_type) + rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type); + + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0, + port); + if (!map_header) + return -ENOMEM; + + map_header->mux_id = mux_id; + + if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { + int non_linear_skb; + + if (rmnet_map_tx_agg_skip(skb, required_headroom)) + goto done; + + non_linear_skb = (orig_dev->features & NETIF_F_GSO) && + skb_is_nonlinear(skb); + + if (non_linear_skb) { + if (unlikely(__skb_linearize(skb))) + goto done; + } + + rmnet_map_tx_aggregate(skb, port); + return -EINPROGRESS; + } + +done: + skb->protocol = htons(ETH_P_MAP); + return 0; +} + +static void +rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) +{ + if (bridge_dev) { + skb->dev = bridge_dev; + dev_queue_xmit(skb); + } +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF, + 0xDEF, 0xDEF, 0xDEF, NULL, NULL); + dev = skb->dev; + port = rmnet_get_port(dev); + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} +EXPORT_SYMBOL(rmnet_rx_handler); + +/* Modifies packet as per logical endpoint configuration and egress data format + * for egress device configured in logical endpoint. Packet is then transmitted + * on the egress device. + */ +void rmnet_egress_handler(struct sk_buff *skb) +{ + struct net_device *orig_dev; + struct rmnet_port *port; + struct rmnet_priv *priv; + u8 mux_id; + int err; + u32 skb_len; + + trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF, + 0xDEF, (void *)skb, NULL); + sk_pacing_shift_update(skb->sk, 8); + + orig_dev = skb->dev; + priv = netdev_priv(orig_dev); + skb->dev = priv->real_dev; + mux_id = priv->mux_id; + + port = rmnet_get_port(skb->dev); + if (!port) + goto drop; + + skb_len = skb->len; + err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); + if (err == -ENOMEM) + goto drop; + else if (err == -EINPROGRESS) { + rmnet_vnd_tx_fixup(orig_dev, skb_len); + return; + } + + rmnet_vnd_tx_fixup(orig_dev, skb_len); + + dev_queue_xmit(skb); + return; + +drop: + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); +} diff --git a/core/rmnet_handlers.h b/core/rmnet_handlers.h new file mode 100644 index 0000000000..f821b764a9 --- /dev/null +++ b/core/rmnet_handlers.h @@ -0,0 +1,35 @@ +/* Copyright (c) 2013, 2016-2017, 2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#ifndef _RMNET_HANDLERS_H_ +#define _RMNET_HANDLERS_H_ + +#include "rmnet_config.h" + +enum rmnet_packet_context { + RMNET_NET_RX_CTX, + RMNET_WQ_CTX, +}; + +void rmnet_egress_handler(struct sk_buff *skb); +void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port); +void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port, + enum rmnet_packet_context ctx); +void rmnet_set_skb_proto(struct sk_buff *skb); +rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port); +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); + +#endif /* _RMNET_HANDLERS_H_ */ diff --git a/core/rmnet_map.h b/core/rmnet_map.h new file mode 100644 index 0000000000..07164d9523 --- /dev/null +++ b/core/rmnet_map.h @@ -0,0 +1,289 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_MAP_H_ +#define _RMNET_MAP_H_ + +#include +#include "rmnet_config.h" + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u16 ip_family:2; + u16 reserved:14; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_v5_close_type { + RMNET_MAP_COAL_CLOSE_NON_COAL, + RMNET_MAP_COAL_CLOSE_IP_MISS, + RMNET_MAP_COAL_CLOSE_TRANS_MISS, + RMNET_MAP_COAL_CLOSE_HW, + RMNET_MAP_COAL_CLOSE_COAL, +}; + +enum rmnet_map_v5_close_value { + RMNET_MAP_COAL_CLOSE_HW_NL, + RMNET_MAP_COAL_CLOSE_HW_PKT, + RMNET_MAP_COAL_CLOSE_HW_BYTE, + RMNET_MAP_COAL_CLOSE_HW_TIME, + RMNET_MAP_COAL_CLOSE_HW_EVICT, +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; + u16 csum_insert_offset:14; + u16 udp_ip4_ind:1; + u16 csum_enabled:1; +} __aligned(1); + +struct rmnet_map_control_command_header { + u8 command_name; + u8 cmd_type:2; + u8 reserved:5; + u8 e:1; + u16 source_id:15; + u16 ext:1; + u32 transaction_id; +} __aligned(1); + +struct rmnet_map_flow_info_le { + __be32 mux_id; + __be32 flow_id; + __be32 bytes; + __be32 pkts; +} __aligned(1); + +struct rmnet_map_flow_info_be { + u32 mux_id; + u32 flow_id; + u32 bytes; + u32 pkts; +} __aligned(1); + +struct rmnet_map_dl_ind_hdr { + union { + struct { + u32 seq; + u32 bytes; + u32 pkts; + u32 flows; + struct rmnet_map_flow_info_le flow[0]; + } le __aligned(1); + struct { + __be32 seq; + __be32 bytes; + __be32 pkts; + __be32 flows; + struct rmnet_map_flow_info_be flow[0]; + } be __aligned(1); + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind_trl { + union { + __be32 seq_be; + u32 seq_le; + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind { + u8 priority; + union { + void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); + void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + union { + void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); + void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + struct list_head list; +}; + +#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ + (Y)->data)->mux_id) +#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ + (Y)->data)->cd_bit) +#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ + (Y)->data)->pad_len) +#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ + ((Y)->data + \ + sizeof(struct rmnet_map_header))) +#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ + (Y)->data)->pkt_len)) + +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +#define RMNET_MAP_NO_PAD_BYTES 0 +#define RMNET_MAP_ADD_PAD_BYTES 1 + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline struct rmnet_map_control_command * +rmnet_map_get_cmd_start(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return (struct rmnet_map_control_command *)data; +} + +static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port); +struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port); +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len); +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type); +int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len); +int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); +void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); +void rmnet_map_tx_aggregate_init(struct rmnet_port *port); +void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); +void rmnet_map_dl_hdr_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_hdr *dl_hdr); +void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port, + struct rmnet_map_dl_ind_hdr *dl_hdr, + struct rmnet_map_control_command_header *qcmd); +void rmnet_map_dl_trl_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_trl *dltrl); +void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port, + struct rmnet_map_dl_ind_trl *dltrl, + struct rmnet_map_control_command_header *qcmd); +int rmnet_map_flow_command(struct sk_buff *skb, + struct rmnet_port *port, + bool rmnet_perf); +void rmnet_map_cmd_init(struct rmnet_port *port); +int rmnet_map_dl_ind_register(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind); +int rmnet_map_dl_ind_deregister(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind); +void rmnet_map_cmd_exit(struct rmnet_port *port); +#endif /* _RMNET_MAP_H_ */ diff --git a/core/rmnet_map_command.c b/core/rmnet_map_command.c new file mode 100644 index 0000000000..ce058d2efe --- /dev/null +++ b/core/rmnet_map_command.c @@ -0,0 +1,366 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +static u8 rmnet_map_do_flow_control(struct sk_buff *skb, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_header *qmap; + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + mux_id = qmap->mux_id; + cmd = rmnet_map_get_cmd_start(skb); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) { + kfree_skb(skb); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } else { + return RMNET_MAP_COMMAND_ACK; + } +} + +static void rmnet_map_send_ack(struct sk_buff *skb, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = skb->dev; + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + pskb_trim(skb, + skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); + + skb->protocol = htons(ETH_P_MAP); + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + +void +rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port, + struct rmnet_map_dl_ind_hdr *dlhdr, + struct rmnet_map_control_command_header *qcmd) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_hdr_handler_v2(dlhdr, qcmd); +} + +void +rmnet_map_dl_hdr_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_hdr *dlhdr) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_hdr_handler(dlhdr); +} + +void +rmnet_map_dl_trl_notify_v2(struct rmnet_port *port, + struct rmnet_map_dl_ind_trl *dltrl, + struct rmnet_map_control_command_header *qcmd) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_trl_handler_v2(dltrl, qcmd); +} + +void +rmnet_map_dl_trl_notify(struct rmnet_port *port, + struct rmnet_map_dl_ind_trl *dltrl) +{ + struct rmnet_map_dl_ind *tmp; + + list_for_each_entry(tmp, &port->dl_list, list) + tmp->dl_trl_handler(dltrl); +} + +static void rmnet_map_process_flow_start(struct sk_buff *skb, + struct rmnet_port *port, + bool rmnet_perf) +{ + struct rmnet_map_dl_ind_hdr *dlhdr; + struct rmnet_map_control_command_header *qcmd; + u32 data_format; + bool is_dl_mark_v2; + + if (skb->len < RMNET_DL_IND_HDR_SIZE) + return; + + data_format = port->data_format; + is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2; + if (is_dl_mark_v2) { + pskb_pull(skb, sizeof(struct rmnet_map_header)); + qcmd = (struct rmnet_map_control_command_header *) + rmnet_map_data_ptr(skb); + port->stats.dl_hdr_last_ep_id = qcmd->source_id; + port->stats.dl_hdr_last_qmap_vers = qcmd->reserved; + port->stats.dl_hdr_last_trans_id = qcmd->transaction_id; + pskb_pull(skb, sizeof(struct rmnet_map_control_command_header)); + } else { + pskb_pull(skb, RMNET_MAP_CMD_SIZE); + } + + dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb); + + port->stats.dl_hdr_last_seq = dlhdr->le.seq; + port->stats.dl_hdr_last_bytes = dlhdr->le.bytes; + port->stats.dl_hdr_last_pkts = dlhdr->le.pkts; + port->stats.dl_hdr_last_flows = dlhdr->le.flows; + port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes; + port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts; + port->stats.dl_hdr_count++; + + if (is_dl_mark_v2) + rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd); + else + rmnet_map_dl_hdr_notify(port, dlhdr); + + if (rmnet_perf) { + unsigned int pull_size; + + pull_size = sizeof(struct rmnet_map_dl_ind_hdr); + if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + pull_size += sizeof(struct rmnet_map_dl_csum_trailer); + pskb_pull(skb, pull_size); + } +} + +static void rmnet_map_process_flow_end(struct sk_buff *skb, + struct rmnet_port *port, + bool rmnet_perf) +{ + struct rmnet_map_dl_ind_trl *dltrl; + struct rmnet_map_control_command_header *qcmd; + u32 data_format; + bool is_dl_mark_v2; + + if (skb->len < RMNET_DL_IND_TRL_SIZE) + return; + + data_format = port->data_format; + is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2; + if (is_dl_mark_v2) { + pskb_pull(skb, sizeof(struct rmnet_map_header)); + qcmd = (struct rmnet_map_control_command_header *) + rmnet_map_data_ptr(skb); + pskb_pull(skb, sizeof(struct rmnet_map_control_command_header)); + } else { + pskb_pull(skb, RMNET_MAP_CMD_SIZE); + } + + dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb); + + port->stats.dl_trl_last_seq = dltrl->seq_le; + port->stats.dl_trl_count++; + + if (is_dl_mark_v2) + rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd); + else + rmnet_map_dl_trl_notify(port, dltrl); + + if (rmnet_perf) { + unsigned int pull_size; + + pull_size = sizeof(struct rmnet_map_dl_ind_trl); + if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + pull_size += sizeof(struct rmnet_map_dl_csum_trailer); + pskb_pull(skb, pull_size); + } +} + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = rmnet_map_get_cmd_start(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(skb, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(skb, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + kfree_skb(skb); + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(skb, rc, port); +} + +int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port, + bool rmnet_perf) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + + cmd = rmnet_map_get_cmd_start(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_START: + rmnet_map_process_flow_start(skb, port, rmnet_perf); + break; + + case RMNET_MAP_COMMAND_FLOW_END: + rmnet_map_process_flow_end(skb, port, rmnet_perf); + break; + + default: + return 1; + } + + /* rmnet_perf module will handle the consuming */ + if (!rmnet_perf) + consume_skb(skb); + + return 0; +} +EXPORT_SYMBOL(rmnet_map_flow_command); + +void rmnet_map_cmd_exit(struct rmnet_port *port) +{ + struct rmnet_map_dl_ind *tmp, *idx; + + list_for_each_entry_safe(tmp, idx, &port->dl_list, list) + list_del_rcu(&tmp->list); +} + +void rmnet_map_cmd_init(struct rmnet_port *port) +{ + INIT_LIST_HEAD(&port->dl_list); +} + +int rmnet_map_dl_ind_register(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind) +{ + struct rmnet_map_dl_ind *dl_ind_iterator; + bool empty_ind_list = true; + + if (!port || !dl_ind || !dl_ind->dl_hdr_handler || + !dl_ind->dl_trl_handler) + return -EINVAL; + + list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) { + empty_ind_list = false; + if (dl_ind_iterator->priority < dl_ind->priority) { + if (dl_ind_iterator->list.next) { + if (dl_ind->priority + < list_entry_rcu(dl_ind_iterator->list.next, + typeof(*dl_ind_iterator), list)->priority) { + list_add_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } else { + list_add_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } else { + list_add_tail_rcu(&dl_ind->list, + &dl_ind_iterator->list); + break; + } + } + + if (empty_ind_list) + list_add_rcu(&dl_ind->list, &port->dl_list); + + return 0; +} +EXPORT_SYMBOL(rmnet_map_dl_ind_register); + +int rmnet_map_dl_ind_deregister(struct rmnet_port *port, + struct rmnet_map_dl_ind *dl_ind) +{ + struct rmnet_map_dl_ind *tmp; + + if (!port || !dl_ind) + return -EINVAL; + + list_for_each_entry(tmp, &port->dl_list, list) { + if (tmp == dl_ind) { + list_del_rcu(&dl_ind->list); + goto done; + } + } + +done: + return 0; +} +EXPORT_SYMBOL(rmnet_map_dl_ind_deregister); diff --git a/core/rmnet_map_data.c b/core/rmnet_map_data.c new file mode 100644 index 0000000000..505fcc33de --- /dev/null +++ b/core/rmnet_map_data.c @@ -0,0 +1,1245 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data MAP protocol + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_handlers.h" + +#define RMNET_MAP_PKT_COPY_THRESHOLD 64 +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) + +struct rmnet_map_coal_metadata { + void *ip_header; + void *trans_header; + u16 ip_len; + u16 trans_len; + u16 data_offset; + u16 data_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 pkt_count; +}; + +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static int +rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) +{ + __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; + u16 csum_value, csum_value_final; + struct iphdr *ip4h; + void *txporthdr; + __be16 addend; + + ip4h = (struct iphdr *)rmnet_map_data_ptr(skb); + if ((ntohs(ip4h->frag_off) & IP_MF) || + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { + priv->stats.csum_fragmented_pkt++; + return -EOPNOTSUPP; + } + + txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4; + + csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); + + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; + return -EPROTONOSUPPORT; + } + + /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) { + priv->stats.csum_skipped++; + return 0; + } + + csum_value = ~ntohs(csum_trailer->csum_value); + hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); + ip_payload_csum = csum16_sub((__force __sum16)csum_value, + (__force __be16)hdr_csum); + + pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, + ntohs(ip4h->tot_len) - ip4h->ihl * 4, + ip4h->protocol, 0); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip4h->protocol) { + case IPPROTO_UDP: + /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL4 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; + return 0; + } else { + priv->stats.csum_validation_failed++; + return -EINVAL; + } +} + +#if IS_ENABLED(CONFIG_IPV6) +static int +rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) +{ + __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; + u16 csum_value, csum_value_final; + __be16 ip6_hdr_csum, addend; + struct ipv6hdr *ip6h; + void *txporthdr, *data = rmnet_map_data_ptr(skb); + u32 length; + + ip6h = data; + + txporthdr = data + sizeof(struct ipv6hdr); + csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); + + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; + return -EPROTONOSUPPORT; + } + + csum_value = ~ntohs(csum_trailer->csum_value); + ip6_hdr_csum = (__force __be16) + ~ntohs((__force __be16)ip_compute_csum(ip6h, + (int)(txporthdr - data))); + ip6_payload_csum = csum16_sub((__force __sum16)csum_value, + ip6_hdr_csum); + + length = (ip6h->nexthdr == IPPROTO_UDP) ? + ntohs(((struct udphdr *)txporthdr)->len) : + ntohs(ip6h->payload_len); + pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + length, ip6h->nexthdr, 0)); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip6_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip6h->nexthdr) { + case IPPROTO_UDP: + /* RFC 2460 section 8.1 + * DL6 One's complement rule for UDP checksum 0 + */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL6 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; + return 0; + } else { + priv->stats.csum_validation_failed++; + return -EINVAL; + } +} +#endif + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ip4_ind = 1; + else + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + +/* Adds MAP header to front of skb->data + * Padding is calculated and set appropriately in MAP header. Mux ID is + * initialized to 0. + */ +struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port) +{ + struct rmnet_map_header *map_header; + u32 padding, map_datalen; + u8 *padbytes; + + map_datalen = skb->len - hdrlen; + map_header = (struct rmnet_map_header *) + skb_push(skb, sizeof(struct rmnet_map_header)); + memset(map_header, 0, sizeof(struct rmnet_map_header)); + + /* Set next_hdr bit for csum offload packets */ + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) + map_header->next_hdr = 1; + + if (pad == RMNET_MAP_NO_PAD_BYTES) { + map_header->pkt_len = htons(map_datalen); + return map_header; + } + + padding = ALIGN(map_datalen, 4) - map_datalen; + + if (padding == 0) + goto done; + + if (skb_tailroom(skb) < padding) + return NULL; + + padbytes = (u8 *)skb_put(skb, padding); + memset(padbytes, 0, padding); + +done: + map_header->pkt_len = htons(map_datalen + padding); + map_header->pad_len = padding & 0x3F; + + return map_header; +} + +/* Deaggregates a single packet + * A whole new buffer is allocated for each portion of an aggregated frame. + * Caller should keep calling deaggregate() on the source skb until 0 is + * returned, indicating that there are no more packets to deaggregate. Caller + * is responsible for freeing the original skb. + */ +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *maph; + struct sk_buff *skbn; + unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL; + u32 packet_len; + + if (skb->len == 0) + return NULL; + + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) { + if (!maph->cd_bit) { + packet_len += sizeof(struct rmnet_map_v5_csum_header); + + /* Coalescing headers require MAPv5 */ + next_hdr = data + sizeof(*maph); + } + } + + if (((int)skb->len - (int)packet_len) < 0) + return NULL; + + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + + if (next_hdr && + ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type == + RMNET_MAP_HEADER_TYPE_COALESCING) + return skb; + + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag0 = skb_shinfo(skb)->frags; + struct page *page = skb_frag_page(frag0); + + skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_append_pagefrags(skbn, page, frag0->bv_offset, + packet_len); + skbn->data_len += packet_len; + skbn->len += packet_len; + } else { + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, + GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); + skb_put(skbn, packet_len); + memcpy(skbn->data, data, packet_len); + } + + pskb_pull(skb, packet_len); + + return skbn; +} + +/* Validates packet checksums. Function takes a pointer to + * the beginning of a buffer which contains the IP payload + + * padding + checksum trailer. + * Only IPv4 and IPv6 are supported along with TCP & UDP. + * Fragmented or tunneled packets are not supported. + */ +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) +{ + struct rmnet_priv *priv = netdev_priv(skb->dev); + struct rmnet_map_dl_csum_trailer *csum_trailer; + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { + priv->stats.csum_sw++; + return -EOPNOTSUPP; + } + + csum_trailer = (struct rmnet_map_dl_csum_trailer *) + (rmnet_map_data_ptr(skb) + len); + + if (!csum_trailer->valid) { + priv->stats.csum_valid_unset++; + return -EINVAL; + } + + if (skb->protocol == htons(ETH_P_IP)) { + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); +#else + priv->stats.csum_err_invalid_ip_version++; + return -EPROTONOSUPPORT; +#endif + } else { + priv->stats.csum_err_invalid_ip_version++; + return -EPROTONOSUPPORT; + } + + return 0; +} +EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet); + +void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; +#else + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; +#endif + } else { + priv->stats.csum_err_invalid_ip_version++; + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ip4_ind = 0; + + priv->stats.csum_sw++; +} + +void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_v5_csum_header *ul_header; + + ul_header = (struct rmnet_map_v5_csum_header *) + skb_push(skb, sizeof(*ul_header)); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + void *iph = (char *)ul_header + sizeof(*ul_header); + void *trans; + __sum16 *check; + u8 proto; + + if (skb->protocol == htons(ETH_P_IP)) { + u16 ip_len = ((struct iphdr *)iph)->ihl * 4; + + proto = ((struct iphdr *)iph)->protocol; + trans = iph + ip_len; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + u16 ip_len = sizeof(struct ipv6hdr); + + proto = ((struct ipv6hdr *)iph)->nexthdr; + trans = iph + ip_len; + } else { + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; + } + + check = rmnet_map_get_csum_field(proto, trans); + if (check) { + *check = 0; + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; + priv->stats.csum_hw++; + return; + } + } + +sw_csum: + priv->stats.csum_sw++; +} + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type) +{ + switch (csum_type) { + case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: + rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); + break; + case RMNET_FLAGS_EGRESS_MAP_CKSUMV5: + rmnet_map_v5_checksum_uplink_packet(skb, orig_dev); + break; + default: + break; + } +} + +static void rmnet_map_move_headers(struct sk_buff *skb) +{ + struct iphdr *iph; + u16 ip_len; + u16 trans_len = 0; + u8 proto; + + /* This only applies to non-linear SKBs */ + if (!skb_is_nonlinear(skb)) + return; + + iph = (struct iphdr *)rmnet_map_data_ptr(skb); + if (iph->version == 4) { + ip_len = iph->ihl * 4; + proto = iph->protocol; + if (iph->frag_off & htons(IP_OFFSET)) + /* No transport header information */ + goto pull; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + __be16 frag_off; + u8 nexthdr = ip6h->nexthdr; + + ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, + &frag_off); + if (ip_len < 0) + return; + + proto = nexthdr; + } else { + return; + } + + if (proto == IPPROTO_TCP) { + struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len); + + trans_len = tp->doff * 4; + } else if (proto == IPPROTO_UDP) { + trans_len = sizeof(struct udphdr); + } else if (proto == NEXTHDR_FRAGMENT) { + /* Non-first fragments don't have the fragment length added by + * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so + * we account for the length here. + */ + ip_len += sizeof(struct frag_hdr); + } + +pull: + __pskb_pull_tail(skb, ip_len + trans_len); + skb_reset_network_header(skb); + if (trans_len) + skb_set_transport_header(skb, ip_len); +} + +static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb, + struct rmnet_map_coal_metadata *coal_meta, + struct sk_buff *dest) +{ + unsigned char *data_start = rmnet_map_data_ptr(coal_skb) + + coal_meta->ip_len + coal_meta->trans_len; + u32 copy_len = coal_meta->data_len * coal_meta->pkt_count; + + if (skb_is_nonlinear(coal_skb)) { + skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags; + struct page *page = skb_frag_page(frag0); + + skb_append_pagefrags(dest, page, + frag0->bv_offset + coal_meta->ip_len + + coal_meta->trans_len + + coal_meta->data_offset, + copy_len); + dest->data_len += copy_len; + dest->len += copy_len; + } else { + skb_put_data(dest, data_start + coal_meta->data_offset, + copy_len); + } +} + +/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack + * if needed (i.e. forwarding, UDP GRO) + */ +static void rmnet_map_gso_stamp(struct sk_buff *skb, + struct rmnet_map_coal_metadata *coal_meta) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct iphdr *iph = ip_hdr(skb); + __sum16 pseudo; + u16 pkt_len = skb->len - coal_meta->ip_len; + bool ipv4 = coal_meta->ip_proto == 4; + + if (ipv4) { + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + pkt_len, coal_meta->trans_proto, + 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + pkt_len, coal_meta->trans_proto, 0); + } + + if (coal_meta->trans_proto == IPPROTO_TCP) { + struct tcphdr *tp = tcp_hdr(skb); + + tp->check = pseudo; + shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb->csum_offset = offsetof(struct tcphdr, check); + } else { + struct udphdr *up = udp_hdr(skb); + + up->check = pseudo; + shinfo->gso_type = SKB_GSO_UDP_L4; + skb->csum_offset = offsetof(struct udphdr, check); + } + + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb->data + coal_meta->ip_len - skb->head; + shinfo->gso_size = coal_meta->data_len; + shinfo->gso_segs = coal_meta->pkt_count; +} + +static void +__rmnet_map_segment_coal_skb(struct sk_buff *coal_skb, + struct rmnet_map_coal_metadata *coal_meta, + struct sk_buff_head *list, u8 pkt_id) +{ + struct sk_buff *skbn; + struct rmnet_priv *priv = netdev_priv(coal_skb->dev); + u32 alloc_len; + + /* We can avoid copying the data if the SKB we got from the lower-level + * drivers was nonlinear. + */ + if (skb_is_nonlinear(coal_skb)) + alloc_len = coal_meta->ip_len + coal_meta->trans_len; + else + alloc_len = coal_meta->ip_len + coal_meta->trans_len + + coal_meta->data_len; + + skbn = alloc_skb(alloc_len, GFP_ATOMIC); + if (!skbn) + return; + + skb_reserve(skbn, coal_meta->ip_len + coal_meta->trans_len); + rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn); + + /* Push transport header and update necessary fields */ + skb_push(skbn, coal_meta->trans_len); + memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len); + skb_reset_transport_header(skbn); + if (coal_meta->trans_proto == IPPROTO_TCP) { + struct tcphdr *th = tcp_hdr(skbn); + + th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset); + } else if (coal_meta->trans_proto == IPPROTO_UDP) { + udp_hdr(skbn)->len = htons(skbn->len); + } + + /* Push IP header and update necessary fields */ + skb_push(skbn, coal_meta->ip_len); + memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len); + skb_reset_network_header(skbn); + if (coal_meta->ip_proto == 4) { + struct iphdr *iph = ip_hdr(skbn); + + iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id); + iph->tot_len = htons(skbn->len); + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); + } else { + /* Payload length includes any extension headers */ + ipv6_hdr(skbn)->payload_len = htons(skbn->len - + sizeof(struct ipv6hdr)); + } + + skbn->ip_summed = CHECKSUM_UNNECESSARY; + skbn->dev = coal_skb->dev; + priv->stats.coal.coal_reconstruct++; + + /* Stamp GSO information if necessary */ + if (coal_meta->pkt_count > 1) + rmnet_map_gso_stamp(skbn, coal_meta); + + __skb_queue_tail(list, skbn); + + /* Update meta information to move past the data we just segmented */ + coal_meta->data_offset += coal_meta->data_len * coal_meta->pkt_count; + coal_meta->pkt_id = pkt_id + 1; + coal_meta->pkt_count = 0; +} + +/* Converts the coalesced SKB into a list of SKBs. + * NLOs containing csum erros will not be included. + * The original coalesced SKB should be treated as invalid and + * must be freed by the caller + */ +static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb, + u64 nlo_err_mask, + struct sk_buff_head *list) +{ + struct iphdr *iph; + struct rmnet_priv *priv = netdev_priv(coal_skb->dev); + struct rmnet_map_v5_coal_header *coal_hdr; + struct rmnet_map_coal_metadata coal_meta; + u16 pkt_len; + u8 pkt, total_pkt = 0; + u8 nlo; + bool gro = coal_skb->dev->features & NETIF_F_GRO_HW; + + memset(&coal_meta, 0, sizeof(coal_meta)); + + /* Pull off the headers we no longer need */ + pskb_pull(coal_skb, sizeof(struct rmnet_map_header)); + coal_hdr = (struct rmnet_map_v5_coal_header *) + rmnet_map_data_ptr(coal_skb); + pskb_pull(coal_skb, sizeof(*coal_hdr)); + + iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb); + + if (iph->version == 4) { + coal_meta.ip_proto = 4; + coal_meta.ip_len = iph->ihl * 4; + coal_meta.trans_proto = iph->protocol; + coal_meta.ip_header = iph; + + /* Don't allow coalescing of any packets with IP options */ + if (iph->ihl != 5) + gro = false; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + __be16 frag_off; + u8 protocol = ip6h->nexthdr; + + coal_meta.ip_proto = 6; + coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h), + &protocol, &frag_off); + coal_meta.trans_proto = protocol; + coal_meta.ip_header = ip6h; + + /* If we run into a problem, or this has a fragment header + * (which should technically not be possible, if the HW + * works as intended...), bail. + */ + if (coal_meta.ip_len < 0 || frag_off) { + priv->stats.coal.coal_ip_invalid++; + return; + } else if (coal_meta.ip_len > sizeof(*ip6h)) { + /* Don't allow coalescing of any packets with IPv6 + * extension headers. + */ + gro = false; + } + } else { + priv->stats.coal.coal_ip_invalid++; + return; + } + + if (coal_meta.trans_proto == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len); + coal_meta.trans_len = th->doff * 4; + coal_meta.trans_header = th; + } else if (coal_meta.trans_proto == IPPROTO_UDP) { + struct udphdr *uh; + + uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len); + coal_meta.trans_len = sizeof(*uh); + coal_meta.trans_header = uh; + } else { + priv->stats.coal.coal_trans_invalid++; + return; + } + + for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) { + pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len); + pkt_len -= coal_meta.ip_len + coal_meta.trans_len; + coal_meta.data_len = pkt_len; + for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets; + pkt++, total_pkt++) { + nlo_err_mask <<= 1; + if (nlo_err_mask & (1ULL << 63)) { + priv->stats.coal.coal_csum_err++; + + /* Segment out the good data */ + if (gro && coal_meta.pkt_count) { + __rmnet_map_segment_coal_skb(coal_skb, + &coal_meta, + list, + total_pkt); + } + + /* skip over bad packet */ + coal_meta.data_offset += pkt_len; + coal_meta.pkt_id = total_pkt + 1; + } else { + coal_meta.pkt_count++; + + /* Segment the packet if we aren't sending the + * larger packet up the stack. + */ + if (!gro) + __rmnet_map_segment_coal_skb(coal_skb, + &coal_meta, + list, + total_pkt); + } + } + + /* If we're switching NLOs, we need to send out everything from + * the previous one, if we haven't done so. NLOs only switch + * when the packet length changes. + */ + if (gro && coal_meta.pkt_count) { + /* Fast forward the (hopefully) common case. + * Frames with only one NLO (i.e. one packet length) and + * no checksum errors don't need to be segmented here. + * We can just pass off the original skb. + */ + if (pkt_len * coal_meta.pkt_count == + coal_skb->len - coal_meta.ip_len - + coal_meta.trans_len) { + rmnet_map_move_headers(coal_skb); + coal_skb->ip_summed = CHECKSUM_UNNECESSARY; + if (coal_meta.pkt_count > 1) + rmnet_map_gso_stamp(coal_skb, + &coal_meta); + __skb_queue_tail(list, coal_skb); + return; + } + + __rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list, + total_pkt); + } + } +} + +/* Record reason for coalescing pipe closure */ +static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type, + u8 code) +{ + struct rmnet_coal_close_stats *stats = &priv->stats.coal.close; + + switch (type) { + case RMNET_MAP_COAL_CLOSE_NON_COAL: + stats->non_coal++; + break; + case RMNET_MAP_COAL_CLOSE_IP_MISS: + stats->ip_miss++; + break; + case RMNET_MAP_COAL_CLOSE_TRANS_MISS: + stats->trans_miss++; + break; + case RMNET_MAP_COAL_CLOSE_HW: + switch (code) { + case RMNET_MAP_COAL_CLOSE_HW_NL: + stats->hw_nl++; + break; + case RMNET_MAP_COAL_CLOSE_HW_PKT: + stats->hw_pkt++; + break; + case RMNET_MAP_COAL_CLOSE_HW_BYTE: + stats->hw_byte++; + break; + case RMNET_MAP_COAL_CLOSE_HW_TIME: + stats->hw_time++; + break; + case RMNET_MAP_COAL_CLOSE_HW_EVICT: + stats->hw_evict++; + break; + default: + break; + } + break; + case RMNET_MAP_COAL_CLOSE_COAL: + stats->coal++; + break; + default: + break; + } +} + +/* Check if the coalesced header has any incorrect values, in which case, the + * entire coalesced skb must be dropped. Then check if there are any + * checksum issues + */ +static int rmnet_map_data_check_coal_header(struct sk_buff *skb, + u64 *nlo_err_mask) +{ + struct rmnet_map_v5_coal_header *coal_hdr; + unsigned char *data = rmnet_map_data_ptr(skb); + struct rmnet_priv *priv = netdev_priv(skb->dev); + u64 mask = 0; + int i; + u8 veid, pkts = 0; + + coal_hdr = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(struct rmnet_map_header))); + veid = coal_hdr->virtual_channel_id; + + if (coal_hdr->num_nlos == 0 || + coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) { + priv->stats.coal.coal_hdr_nlo_err++; + return -EINVAL; + } + + for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) { + /* If there is a checksum issue, we need to split + * up the skb. Rebuild the full csum error field + */ + u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap; + u8 pkt = coal_hdr->nl_pairs[i].num_packets; + + mask |= ((u64)err) << (7 - i) * 8; + + /* Track total packets in frame */ + pkts += pkt; + if (pkts > RMNET_MAP_V5_MAX_PACKETS) { + priv->stats.coal.coal_hdr_pkt_err++; + return -EINVAL; + } + } + + /* Track number of packets we get inside of coalesced frames */ + priv->stats.coal.coal_pkts += pkts; + + /* Update ethtool stats */ + rmnet_map_data_log_close_stats(priv, + coal_hdr->close_type, + coal_hdr->close_value); + if (veid < RMNET_MAX_VEID) + priv->stats.coal.coal_veid[veid]++; + + *nlo_err_mask = mask; + + return 0; +} + +/* Process a QMAPv5 packet header */ +int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len) +{ + struct rmnet_priv *priv = netdev_priv(skb->dev); + u64 nlo_err_mask; + int rc = 0; + + switch (rmnet_map_get_next_hdr_type(skb)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + priv->stats.coal.coal_rx++; + rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask); + if (rc) + return rc; + + rmnet_map_segment_coal_skb(skb, nlo_err_mask, list); + if (skb_peek(list) != skb) + consume_skb(skb); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_map_get_csum_valid(skb)) { + priv->stats.csum_ok++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + priv->stats.csum_valid_unset++; + } + + /* Pull unnecessary headers and move the rest to the linear + * section of the skb. + */ + pskb_pull(skb, + (sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))); + rmnet_map_move_headers(skb); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + pskb_trim(skb, len); + __skb_queue_tail(list, skb); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +long rmnet_agg_time_limit __read_mostly = 1000000L; +long rmnet_agg_bypass_time __read_mostly = 10000000L; + +int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int is_icmp = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_ICMP) + is_icmp = 1; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == IPPROTO_ICMPV6) { + is_icmp = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + is_icmp = 1; + } + } + + return is_icmp; +} + +static void rmnet_map_flush_tx_packet_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct rmnet_port *port; + unsigned long flags; + + port = container_of(work, struct rmnet_port, agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (likely(port->agg_state == -EINPROGRESS)) { + /* Buffer may have already been shipped out */ + if (likely(port->agg_skb)) { + skb = port->agg_skb; + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); + if (skb) + dev_queue_xmit(skb); +} + +enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t) +{ + struct rmnet_port *port; + + port = container_of(t, struct rmnet_port, hrtimer); + + schedule_work(&port->agg_wq); + return HRTIMER_NORESTART; +} + +void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port) +{ + struct timespec64 diff, last; + int size, agg_count = 0; + struct sk_buff *agg_skb; + unsigned long flags; + u8 *dest_buff; + +new_packet: + spin_lock_irqsave(&port->agg_lock, flags); + memcpy(&last, &port->agg_last, sizeof(struct timespec)); + ktime_get_real_ts64(&port->agg_last); + + if (!port->agg_skb) { + /* Check to see if we should agg first. If the traffic is very + * sparse, don't aggregate. We will need to tune this later + */ + diff = timespec64_sub(port->agg_last, last); + size = port->egress_agg_params.agg_size - skb->len; + + if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time || + size <= 0) { + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + + port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC); + if (!port->agg_skb) { + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + port->agg_skb->protocol = htons(ETH_P_MAP); + port->agg_count = 1; + ktime_get_real_ts64(&port->agg_time); + dev_kfree_skb_any(skb); + goto schedule; + } + diff = timespec64_sub(port->agg_last, port->agg_time); + size = port->egress_agg_params.agg_size - port->agg_skb->len; + + if (skb->len > size || + port->agg_count >= port->egress_agg_params.agg_count || + diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) { + agg_skb = port->agg_skb; + agg_count = port->agg_count; + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + port->agg_state = 0; + spin_unlock_irqrestore(&port->agg_lock, flags); + hrtimer_cancel(&port->hrtimer); + dev_queue_xmit(agg_skb); + goto new_packet; + } + + dest_buff = skb_put(port->agg_skb, skb->len); + memcpy(dest_buff, skb->data, skb->len); + port->agg_count++; + dev_kfree_skb_any(skb); + +schedule: + if (port->agg_state != -EINPROGRESS) { + port->agg_state = -EINPROGRESS; + hrtimer_start(&port->hrtimer, + ns_to_ktime(port->egress_agg_params.agg_time), + HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&port->agg_lock, flags); +} + +void rmnet_map_tx_aggregate_init(struct rmnet_port *port) +{ + hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->hrtimer.function = rmnet_map_flush_tx_packet_queue; + port->egress_agg_params.agg_size = 8192; + port->egress_agg_params.agg_count = 20; + port->egress_agg_params.agg_time = 3000000; + spin_lock_init(&port->agg_lock); + + INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work); +} + +void rmnet_map_tx_aggregate_exit(struct rmnet_port *port) +{ + unsigned long flags; + + hrtimer_cancel(&port->hrtimer); + cancel_work_sync(&port->agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (port->agg_state == -EINPROGRESS) { + if (port->agg_skb) { + kfree_skb(port->agg_skb); + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); +} + +void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb) +{ + struct rmnet_port *port; + struct sk_buff *agg_skb; + unsigned long flags; + + port = rmnet_get_port(qmap_skb->dev); + + if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { + spin_lock_irqsave(&port->agg_lock, flags); + if (port->agg_skb) { + agg_skb = port->agg_skb; + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + port->agg_state = 0; + spin_unlock_irqrestore(&port->agg_lock, flags); + hrtimer_cancel(&port->hrtimer); + dev_queue_xmit(agg_skb); + } else { + spin_unlock_irqrestore(&port->agg_lock, flags); + } + } + + dev_queue_xmit(qmap_skb); +} +EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd); diff --git a/core/rmnet_private.h b/core/rmnet_private.h new file mode 100644 index 0000000000..420c656836 --- /dev/null +++ b/core/rmnet_private.h @@ -0,0 +1,42 @@ +/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_PRIVATE_H_ +#define _RMNET_PRIVATE_H_ + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +/* Constants */ +#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29) + +#define RMNET_FLAGS_INGRESS_COALESCE BIT(4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 BIT(5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 BIT(6) + +#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\ +RMNET_INGRESS_FORMAT_DL_MARKER_V2) + +/* Power save feature*/ +#define RMNET_INGRESS_FORMAT_PS BIT(27) +#define RMNET_FORMAT_PS_NOTIF BIT(26) + +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +#endif /* _RMNET_PRIVATE_H_ */ diff --git a/core/rmnet_qmi.h b/core/rmnet_qmi.h new file mode 100644 index 0000000000..06844879ec --- /dev/null +++ b/core/rmnet_qmi.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_QMI_H +#define _RMNET_QMI_H + +#include +#include + +void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb); + +void *rmnet_get_qmi_pt(void *port); +void *rmnet_get_qos_pt(struct net_device *dev); +void *rmnet_get_rmnet_port(struct net_device *dev); +struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id); +void rmnet_reset_qmi_pt(void *port); +void rmnet_init_qmi_pt(void *port, void *qmi); +void rmnet_enable_all_flows(void *port); +bool rmnet_all_flows_enabled(void *port); +void rmnet_set_powersave_format(void *port); +void rmnet_clear_powersave_format(void *port); +void rmnet_get_packets(void *port, u64 *rx, u64 *tx); +int rmnet_get_powersave_notif(void *port); +#endif /*_RMNET_QMI_H*/ diff --git a/core/rmnet_trace.h b/core/rmnet_trace.h new file mode 100644 index 0000000000..b1ae59b30c --- /dev/null +++ b/core/rmnet_trace.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rmnet +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core +#define TRACE_INCLUDE_FILE rmnet_trace + +#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RMNET_H + +#include +#include + +/*****************************************************************************/ +/* Trace events for rmnet module */ +/*****************************************************************************/ +TRACE_EVENT(rmnet_xmit_skb, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __string(dev_name, skb->dev->name) + __field(unsigned int, len) + ), + + TP_fast_assign( + __assign_str(dev_name, skb->dev->name); + __entry->len = skb->len; + ), + + TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len) +); + +DECLARE_EVENT_CLASS + (rmnet_mod_template, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry(__field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign(__entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + +TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +DEFINE_EVENT + (rmnet_mod_template, rmnet_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_perf module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_shs module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) +); + +DECLARE_EVENT_CLASS + (rmnet_freq_template, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq), + + TP_STRUCT__entry(__field(u8, core) + __field(u32, newfreq) + ), + + TP_fast_assign(__entry->core = core; + __entry->newfreq = newfreq; + ), + +TP_printk("freq policy core:%u freq floor :%u", + __entry->core, __entry->newfreq) + +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_boost, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_reset, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +TRACE_EVENT + (rmnet_freq_update, + + TP_PROTO(u8 core, u32 lowfreq, u32 highfreq), + + TP_ARGS(core, lowfreq, highfreq), + + TP_STRUCT__entry(__field(u8, core) + __field(u32, lowfreq) + __field(u32, highfreq) + ), + + TP_fast_assign(__entry->core = core; + __entry->lowfreq = lowfreq; + __entry->highfreq = highfreq; + + ), + +TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u", + __entry->core, __entry->lowfreq, __entry->highfreq) +); +#endif /* _TRACE_RMNET_H */ + +#include diff --git a/core/rmnet_vnd.c b/core/rmnet_vnd.c new file mode 100644 index 0000000000..c340f327d5 --- /dev/null +++ b/core/rmnet_vnd.c @@ -0,0 +1,399 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data virtual network driver + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_vnd.h" + +#include "qmi_rmnet.h" +#include "rmnet_qmi.h" +#include "rmnet_trace.h" + +/* RX/TX Fixup */ + +void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.rx_pkts++; + pcpu_ptr->stats.rx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.tx_pkts++; + pcpu_ptr->stats.tx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +/* Network Device Operations */ + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + int ip_type; + u32 mark; + unsigned int len; + + priv = netdev_priv(dev); + if (priv->real_dev) { + ip_type = (ip_hdr(skb)->version == 4) ? + AF_INET : AF_INET6; + mark = skb->mark; + len = skb->len; + trace_rmnet_xmit_skb(skb); + rmnet_egress_handler(skb); + qmi_rmnet_burst_fc_check(dev, ip_type, mark, len); + qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev)); + } else { + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static int rmnet_vnd_get_iflink(const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + return priv->real_dev->ifindex; +} + +static int rmnet_vnd_init(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + int err; + + priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); + if (!priv->pcpu_stats) + return -ENOMEM; + + err = gro_cells_init(&priv->gro_cells, dev); + if (err) { + free_percpu(priv->pcpu_stats); + return err; + } + + return 0; +} + +static void rmnet_vnd_uninit(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + void *qos; + + gro_cells_destroy(&priv->gro_cells); + free_percpu(priv->pcpu_stats); + + qos = priv->qos_info; + RCU_INIT_POINTER(priv->qos_info, NULL); + synchronize_rcu(); + qmi_rmnet_qos_exit(dev, qos); +} + +static void rmnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_vnd_stats total_stats; + struct rmnet_pcpu_stats *pcpu_ptr; + unsigned int cpu, start; + + memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); + + for_each_possible_cpu(cpu) { + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); + + do { + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; + total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; + total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; + total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + + total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + } + + s->rx_packets = total_stats.rx_pkts; + s->rx_bytes = total_stats.rx_bytes; + s->tx_packets = total_stats.tx_pkts; + s->tx_bytes = total_stats.tx_bytes; + s->tx_dropped = total_stats.tx_drops; +} + +static u16 rmnet_vnd_select_queue(struct net_device *dev, + struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + int txq = 0; + + if (priv->real_dev) + txq = qmi_rmnet_get_queue(dev, skb); + + return (txq < dev->real_num_tx_queues) ? txq : 0; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_get_iflink = rmnet_vnd_get_iflink, + .ndo_add_slave = rmnet_add_bridge, + .ndo_del_slave = rmnet_del_bridge, + .ndo_init = rmnet_vnd_init, + .ndo_uninit = rmnet_vnd_uninit, + .ndo_get_stats64 = rmnet_get_stats64, + .ndo_select_queue = rmnet_vnd_select_queue, +}; + +static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { + "Checksum ok", + "Checksum valid bit not set", + "Checksum validation failed", + "Checksum error bad buffer", + "Checksum error bad ip version", + "Checksum error bad transport", + "Checksum skipped on ip fragment", + "Checksum skipped", + "Checksum computed in software", + "Checksum computed in hardware", + "Coalescing packets received", + "Coalesced packets", + "Coalescing header NLO errors", + "Coalescing header pcount errors", + "Coalescing checksum errors", + "Coalescing packet reconstructs", + "Coalescing IP version invalid", + "Coalescing L4 header invalid", + "Coalescing close Non-coalescable", + "Coalescing close L3 mismatch", + "Coalescing close L4 mismatch", + "Coalescing close HW NLO limit", + "Coalescing close HW packet limit", + "Coalescing close HW byte limit", + "Coalescing close HW time limit", + "Coalescing close HW eviction", + "Coalescing close Coalescable", + "Coalescing packets over VEID0", + "Coalescing packets over VEID1", + "Coalescing packets over VEID2", + "Coalescing packets over VEID3", +}; + +static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = { + "MAP Cmd last version", + "MAP Cmd last ep id", + "MAP Cmd last transaction id", + "DL header last seen sequence", + "DL header last seen bytes", + "DL header last seen packets", + "DL header last seen flows", + "DL header pkts received", + "DL header total bytes received", + "DL header total pkts received", + "DL trailer last seen sequence", + "DL trailer pkts received", +}; + +static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &rmnet_gstrings_stats, + sizeof(rmnet_gstrings_stats)); + memcpy(buf + sizeof(rmnet_gstrings_stats), + &rmnet_port_gstrings_stats, + sizeof(rmnet_port_gstrings_stats)); + break; + } +} + +static int rmnet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rmnet_gstrings_stats) + + ARRAY_SIZE(rmnet_port_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void rmnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_priv_stats *st = &priv->stats; + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + + if (!data || !port) + return; + + stp = &port->stats; + + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); + memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp, + ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64)); +} + +static int rmnet_stats_reset(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + if (!port) + return -EINVAL; + + stp = &port->stats; + + memset(stp, 0, sizeof(*stp)); + return 0; +} + +static const struct ethtool_ops rmnet_ethtool_ops = { + .get_ethtool_stats = rmnet_get_ethtool_stats, + .get_strings = rmnet_get_strings, + .get_sset_count = rmnet_get_sset_count, + .nway_reset = rmnet_stats_reset, +}; + +/* Called by kernel whenever a new rmnet device is created. Sets MTU, + * flags, ARP type, needed headroom, etc... + */ +void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + rmnet_dev->needs_free_netdev = true; + rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; +} + +/* Exposed API */ + +int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + int rc; + + if (ep->egress_dev) + return -EINVAL; + + if (rmnet_get_endpoint(port, id)) + return -EBUSY; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + rmnet_dev->hw_features |= NETIF_F_SG; + rmnet_dev->hw_features |= NETIF_F_GRO_HW; + + priv->real_dev = real_dev; + + rc = register_netdevice(rmnet_dev); + if (!rc) { + ep->egress_dev = rmnet_dev; + ep->mux_id = id; + port->nr_rmnet_devs++; + + rmnet_dev->rtnl_link_ops = &rmnet_link_ops; + + priv->mux_id = id; + priv->qos_info = qmi_rmnet_qos_init(real_dev, id); + + netdev_dbg(rmnet_dev, "rmnet dev created\n"); + } + + return rc; +} + +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep) +{ + if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) + return -EINVAL; + + ep->egress_dev = NULL; + port->nr_rmnet_devs--; + return 0; +} + +u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) +{ + struct rmnet_priv *priv; + + priv = netdev_priv(rmnet_dev); + return priv->mux_id; +} + +int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) +{ + netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); + /* Although we expect similar number of enable/disable + * commands, optimize for the disable. That is more + * latency sensitive than enable + */ + if (unlikely(enable)) + netif_wake_queue(rmnet_dev); + else + netif_stop_queue(rmnet_dev); + + return 0; +} diff --git a/core/rmnet_vnd.h b/core/rmnet_vnd.h new file mode 100644 index 0000000000..8b26f7cb63 --- /dev/null +++ b/core/rmnet_vnd.h @@ -0,0 +1,30 @@ +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data Virtual Network Device APIs + * + */ + +#ifndef _RMNET_VND_H_ +#define _RMNET_VND_H_ + +int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); +int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep); +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep); +void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len); +void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len); +u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); +void rmnet_vnd_setup(struct net_device *dev); +#endif /* _RMNET_VND_H_ */ diff --git a/core/wda.h b/core/wda.h new file mode 100644 index 0000000000..b6038ad819 --- /dev/null +++ b/core/wda.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM wda +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core +#define TRACE_INCLUDE_FILE wda + +#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_WDA_H + +#include + +TRACE_EVENT(wda_set_powersave_mode, + + TP_PROTO(int enable), + + TP_ARGS(enable), + + TP_STRUCT__entry( + __field(int, enable) + ), + + TP_fast_assign( + __entry->enable = enable; + ), + + TP_printk("set powersave mode to %s", + __entry->enable ? "enable" : "disable") +); + +TRACE_EVENT(wda_client_state_up, + + TP_PROTO(u32 instance, u32 ep_type, u32 iface), + + TP_ARGS(instance, ep_type, iface), + + TP_STRUCT__entry( + __field(u32, instance) + __field(u32, ep_type) + __field(u32, iface) + ), + + TP_fast_assign( + __entry->instance = instance; + __entry->ep_type = ep_type; + __entry->iface = iface; + ), + + TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u", + __entry->instance, __entry->ep_type, __entry->iface) +); + +TRACE_EVENT(wda_client_state_down, + + TP_PROTO(int from_cb), + + TP_ARGS(from_cb), + + TP_STRUCT__entry( + __field(int, from_cb) + ), + + TP_fast_assign( + __entry->from_cb = from_cb; + ), + + TP_printk("Client: Connection with WDA lost Exit by callback %d", + __entry->from_cb) +); + +#endif /* _TRACE_WDA_H */ + +/* This part must be outside protection */ +#include diff --git a/core/wda_qmi.c b/core/wda_qmi.c new file mode 100644 index 0000000000..7e6bc947bb --- /dev/null +++ b/core/wda_qmi.c @@ -0,0 +1,458 @@ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include "rmnet_qmi.h" +#define CREATE_TRACE_POINTS +#include "wda.h" +#undef CREATE_TRACE_POINTS +#include "qmi_rmnet_i.h" + +struct wda_qmi_data { + void *rmnet_port; + struct workqueue_struct *wda_wq; + struct work_struct svc_arrive; + struct qmi_handle handle; + struct sockaddr_qrtr ssctl; + struct svc_info svc; + int restart_state; +}; + +static void wda_svc_config(struct work_struct *work); +/* **************************************************** */ +#define WDA_SERVICE_ID_V01 0x1A +#define WDA_SERVICE_VERS_V01 0x01 +#define WDA_TIMEOUT_JF msecs_to_jiffies(1000) + +#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D +#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D +#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18 +#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14 + +#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E +#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E +#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 4 +#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7 + +enum wda_powersave_config_mask_enum_v01 { + WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00, + WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01, + WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02, + WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF, + WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647 +}; + +struct wda_set_powersave_config_req_msg_v01 { + /* Mandatory */ + struct data_ep_id_type_v01 ep_id; + /* Optional */ + uint8_t req_data_cfg_valid; + enum wda_powersave_config_mask_enum_v01 req_data_cfg; +}; + +struct wda_set_powersave_config_resp_msg_v01 { + /* Mandatory */ + struct qmi_response_type_v01 resp; + /* Optional */ + uint8_t data_cfg_valid; + enum wda_powersave_config_mask_enum_v01 data_cfg; +}; + +struct wda_set_powersave_mode_req_msg_v01 { + /* Mandatory */ + uint8_t powersave_control_flag; +}; + +struct wda_set_powersave_mode_resp_msg_v01 { + /* Mandatory */ + struct qmi_response_type_v01 resp; +}; + +static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct data_ep_id_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wda_set_powersave_config_req_msg_v01, + ep_id), + .ei_array = data_ep_id_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wda_set_powersave_config_req_msg_v01, + req_data_cfg_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum + wda_powersave_config_mask_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wda_set_powersave_config_req_msg_v01, + req_data_cfg), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wda_set_powersave_config_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wda_set_powersave_config_resp_msg_v01, + data_cfg_valid), + .ei_array = NULL, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum + wda_powersave_config_mask_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wda_set_powersave_config_resp_msg_v01, + data_cfg), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wda_set_powersave_mode_req_msg_v01, + powersave_control_flag), + .ei_array = NULL, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wda_set_powersave_mode_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable) +{ + struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data; + struct wda_set_powersave_mode_resp_msg_v01 *resp; + struct wda_set_powersave_mode_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + if (!data || !data->rmnet_port) + return -EINVAL; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + ret = qmi_txn_init(&data->handle, &txn, + wda_set_powersave_mode_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init for response, err: %d\n", + __func__, ret); + goto out; + } + + req->powersave_control_flag = enable; + ret = qmi_send_request(&data->handle, &data->ssctl, &txn, + QMI_WDA_SET_POWERSAVE_MODE_REQ_V01, + QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN, + wda_set_powersave_mode_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("%s() Failed sending request, err: %d\n", + __func__, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF); + if (ret < 0) { + pr_err("%s() Response waiting failed, err: %d\n", + __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() Request rejected, result: %d, err: %d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static int wda_set_powersave_config_req(struct qmi_handle *wda_handle) +{ + struct wda_qmi_data *data = container_of(wda_handle, + struct wda_qmi_data, handle); + struct wda_set_powersave_config_resp_msg_v01 *resp; + struct wda_set_powersave_config_req_msg_v01 *req; + struct qmi_txn txn; + int ret; + + req = kzalloc(sizeof(*req), GFP_ATOMIC); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_ATOMIC); + if (!resp) { + kfree(req); + return -ENOMEM; + } + + ret = qmi_txn_init(wda_handle, &txn, + wda_set_powersave_config_resp_msg_v01_ei, resp); + if (ret < 0) { + pr_err("%s() Failed init for response, err: %d\n", + __func__, ret); + goto out; + } + + req->ep_id.ep_type = data->svc.ep_type; + req->ep_id.iface_id = data->svc.iface_id; + req->req_data_cfg_valid = 1; + req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01; + ret = qmi_send_request(wda_handle, &data->ssctl, &txn, + QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01, + QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN, + wda_set_powersave_config_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + pr_err("%s() Failed sending request, err: %d\n", __func__, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF); + if (ret < 0) { + pr_err("%s() Response waiting failed, err: %d\n", + __func__, ret); + } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + pr_err("%s() Request rejected, result: %d, error: %d\n", + __func__, resp->resp.result, resp->resp.error); + ret = -resp->resp.result; + } + +out: + kfree(resp); + kfree(req); + return ret; +} + +static void wda_svc_config(struct work_struct *work) +{ + struct wda_qmi_data *data = container_of(work, struct wda_qmi_data, + svc_arrive); + struct qmi_info *qmi; + int rc; + + if (data->restart_state == 1) + return; + rc = wda_set_powersave_config_req(&data->handle); + if (rc < 0) { + pr_err("%s Failed to init service, err[%d]\n", __func__, rc); + return; + } + + if (data->restart_state == 1) + return; + while (!rtnl_trylock()) { + if (!data->restart_state) + cond_resched(); + else + return; + } + qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port); + if (!qmi) { + rtnl_unlock(); + return; + } + + qmi->wda_pending = NULL; + qmi->wda_client = (void *)data; + trace_wda_client_state_up(data->svc.instance, + data->svc.ep_type, + data->svc.iface_id); + + rtnl_unlock(); + + pr_info("Connection established with the WDA Service\n"); +} + +static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data, + handle); + + data->ssctl.sq_family = AF_QIPCRTR; + data->ssctl.sq_node = svc->node; + data->ssctl.sq_port = svc->port; + + queue_work(data->wda_wq, &data->svc_arrive); + + return 0; +} + +static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc) +{ + struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data, + handle); + + if (!data) + pr_info("%s() data is null\n", __func__); +} + +static struct qmi_ops server_ops = { + .new_server = wda_svc_arrive, + .del_server = wda_svc_exit, +}; + +int +wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi) +{ + struct wda_qmi_data *data; + int rc = -ENOMEM; + + if (!port || !qmi) + return -EINVAL; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->wda_wq = create_singlethread_workqueue("wda_wq"); + if (!data->wda_wq) { + pr_err("%s Could not create workqueue\n", __func__); + goto err0; + } + + data->rmnet_port = port; + data->restart_state = 0; + memcpy(&data->svc, psvc, sizeof(data->svc)); + INIT_WORK(&data->svc_arrive, wda_svc_config); + + rc = qmi_handle_init(&data->handle, + QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN, + &server_ops, NULL); + if (rc < 0) { + pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc); + goto err1; + } + + rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01, + WDA_SERVICE_VERS_V01, psvc->instance); + if (rc < 0) { + pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc); + goto err2; + } + + qmi->wda_pending = (void *)data; + return 0; + +err2: + qmi_handle_release(&data->handle); +err1: + destroy_workqueue(data->wda_wq); +err0: + kfree(data); + return rc; +} + +void wda_qmi_client_exit(void *wda_data) +{ + struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data; + + if (!data) { + pr_info("%s() data is null\n", __func__); + return; + } + + data->restart_state = 1; + trace_wda_client_state_down(0); + qmi_handle_release(&data->handle); + destroy_workqueue(data->wda_wq); + kfree(data); +} + +int wda_set_powersave_mode(void *wda_data, uint8_t enable) +{ + trace_wda_set_powersave_mode(enable); + return wda_set_powersave_mode_req(wda_data, enable); +} diff --git a/datarmnet_dlkm_vendor_board.mk b/datarmnet_dlkm_vendor_board.mk new file mode 100644 index 0000000000..303c6b8186 --- /dev/null +++ b/datarmnet_dlkm_vendor_board.mk @@ -0,0 +1,8 @@ +#Build rmnet core +DATA_DLKM_BOARD_PLATFORMS_LIST := lahaina +ifneq ($(TARGET_BOARD_AUTO),true) +ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true) +BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_core.ko +endif +endif +