Pārlūkot izejas kodu

Add 'qcom/opensource/datarmnet/' from commit 'c5d70f15d54c8489bd452d52729bcfcd301ab985'

git-subtree-dir: qcom/opensource/datarmnet
git-subtree-mainline: 235599e5f6f2317a76b773ce699fc61a50c1eae1
git-subtree-split: c5d70f15d54c8489bd452d52729bcfcd301ab985
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/qcom/opensource/datarmnet
tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
David Wronek 5 mēneši atpakaļ
vecāks
revīzija
16648ad2df
50 mainītis faili ar 16756 papildinājumiem un 0 dzēšanām
  1. 36 0
      qcom/opensource/datarmnet/BUILD.bazel
  2. 44 0
      qcom/opensource/datarmnet/core/Android.mk
  3. 36 0
      qcom/opensource/datarmnet/core/Kbuild
  4. 29 0
      qcom/opensource/datarmnet/core/Kconfig
  5. 50 0
      qcom/opensource/datarmnet/core/Makefile
  6. 34 0
      qcom/opensource/datarmnet/core/Makefile.am
  7. 385 0
      qcom/opensource/datarmnet/core/dfc.h
  8. 101 0
      qcom/opensource/datarmnet/core/dfc_defs.h
  9. 564 0
      qcom/opensource/datarmnet/core/dfc_qmap.c
  10. 1592 0
      qcom/opensource/datarmnet/core/dfc_qmi.c
  11. 1512 0
      qcom/opensource/datarmnet/core/qmi_rmnet.c
  12. 180 0
      qcom/opensource/datarmnet/core/qmi_rmnet.h
  13. 300 0
      qcom/opensource/datarmnet/core/qmi_rmnet_i.h
  14. 891 0
      qcom/opensource/datarmnet/core/rmnet_config.c
  15. 257 0
      qcom/opensource/datarmnet/core/rmnet_config.h
  16. 42 0
      qcom/opensource/datarmnet/core/rmnet_ctl.h
  17. 242 0
      qcom/opensource/datarmnet/core/rmnet_ctl_client.c
  18. 43 0
      qcom/opensource/datarmnet/core/rmnet_ctl_client.h
  19. 113 0
      qcom/opensource/datarmnet/core/rmnet_ctl_ipa.c
  20. 222 0
      qcom/opensource/datarmnet/core/rmnet_ctl_mhi.c
  21. 2061 0
      qcom/opensource/datarmnet/core/rmnet_descriptor.c
  22. 119 0
      qcom/opensource/datarmnet/core/rmnet_descriptor.h
  23. 463 0
      qcom/opensource/datarmnet/core/rmnet_genl.c
  24. 107 0
      qcom/opensource/datarmnet/core/rmnet_genl.h
  25. 551 0
      qcom/opensource/datarmnet/core/rmnet_handlers.c
  26. 37 0
      qcom/opensource/datarmnet/core/rmnet_handlers.h
  27. 159 0
      qcom/opensource/datarmnet/core/rmnet_hook.h
  28. 173 0
      qcom/opensource/datarmnet/core/rmnet_ll.c
  29. 45 0
      qcom/opensource/datarmnet/core/rmnet_ll.h
  30. 69 0
      qcom/opensource/datarmnet/core/rmnet_ll_core.h
  31. 222 0
      qcom/opensource/datarmnet/core/rmnet_ll_ipa.c
  32. 239 0
      qcom/opensource/datarmnet/core/rmnet_ll_mhi.c
  33. 542 0
      qcom/opensource/datarmnet/core/rmnet_ll_qmap.c
  34. 331 0
      qcom/opensource/datarmnet/core/rmnet_map.h
  35. 465 0
      qcom/opensource/datarmnet/core/rmnet_map_command.c
  36. 1720 0
      qcom/opensource/datarmnet/core/rmnet_map_data.c
  37. 100 0
      qcom/opensource/datarmnet/core/rmnet_module.c
  38. 137 0
      qcom/opensource/datarmnet/core/rmnet_module.h
  39. 70 0
      qcom/opensource/datarmnet/core/rmnet_private.h
  40. 162 0
      qcom/opensource/datarmnet/core/rmnet_qmap.c
  41. 68 0
      qcom/opensource/datarmnet/core/rmnet_qmap.h
  42. 101 0
      qcom/opensource/datarmnet/core/rmnet_qmi.h
  43. 496 0
      qcom/opensource/datarmnet/core/rmnet_trace.h
  44. 811 0
      qcom/opensource/datarmnet/core/rmnet_vnd.c
  45. 31 0
      qcom/opensource/datarmnet/core/rmnet_vnd.h
  46. 92 0
      qcom/opensource/datarmnet/core/wda.h
  47. 582 0
      qcom/opensource/datarmnet/core/wda_qmi.c
  48. 25 0
      qcom/opensource/datarmnet/datarmnet_dlkm_vendor_board.mk
  49. 2 0
      qcom/opensource/datarmnet/datarmnet_dlkm_vendor_product.mk
  50. 103 0
      qcom/opensource/datarmnet/define_modules.bzl

+ 36 - 0
qcom/opensource/datarmnet/BUILD.bazel

@@ -0,0 +1,36 @@
+load(":define_modules.bzl", "define_modules")
+load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
+
+define_modules("pineapple", "consolidate")
+
+define_modules("pineapple", "gki")
+
+define_modules("blair", "consolidate")
+
+define_modules("blair", "gki")
+
+define_modules("monaco", "consolidate")
+
+define_modules("monaco", "gki")
+
+define_modules("pitti", "consolidate")
+
+define_modules("pitti", "gki")
+
+define_modules("volcano", "consolidate")
+
+define_modules("volcano", "gki")
+
+package(
+    default_visibility = [
+        "//visibility:public",
+    ],
+)
+
+ddk_headers(
+    name = "rmnet_core_headers",
+    hdrs = glob([
+        "core/*.h",
+    ]),
+    includes = ["core"],
+)

+ 44 - 0
qcom/opensource/datarmnet/core/Android.mk

@@ -0,0 +1,44 @@
+ifeq ($(TARGET_DATARMNET_ENABLE), true)
+ifneq ($(TARGET_BOARD_PLATFORM),qssi)
+RMNET_CORE_DLKM_PLATFORMS_LIST := pineapple
+RMNET_CORE_DLKM_PLATFORMS_LIST += blair
+RMNET_CORE_DLKM_PLATFORMS_LIST += monaco
+RMNET_CORE_DLKM_PLATFORMS_LIST += pitti
+RMNET_CORE_DLKM_PLATFORMS_LIST += volcano
+
+ifeq ($(call is-board-platform-in-list, $(RMNET_CORE_DLKM_PLATFORMS_LIST)),true)
+#Make file to create RMNET_CORE DLKM
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+BOARD_COMMON_DIR ?= device/qcom/common
+
+#Enabling BAZEL
+LOCAL_MODULE_DDK_BUILD := true
+
+LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror
+LOCAL_CLANG :=true
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+LOCAL_MODULE := rmnet_core.ko
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+KBUILD_REQUIRED_KOS := ipam.ko
+DLKM_DIR := $(TOP)/$(BOARD_COMMON_DIR)/dlkm
+$(warning $(DLKM_DIR))
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+######## Create RMNET_CTL DLKM ########
+include $(CLEAR_VARS)
+
+LOCAL_CFLAGS := -Wno-macro-redefined -Wno-unused-function -Wall -Werror
+LOCAL_CLANG :=true
+LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
+LOCAL_MODULE := rmnet_ctl.ko
+LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
+KBUILD_REQUIRED_KOS := ipam.ko
+DLKM_DIR := $(TOP)/$(BOARD_COMMON_DIR)/dlkm
+$(warning $(DLKM_DIR))
+include $(DLKM_DIR)/Build_external_kernelmodule.mk
+
+endif #End of Check for target
+endif #End of Check for qssi target
+endif #End of Check for datarmnet

+ 36 - 0
qcom/opensource/datarmnet/core/Kbuild

@@ -0,0 +1,36 @@
+ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_MONACO) $(CONFIG_ARCH_PITTI)))
+ccflags-y	+= -DRMNET_LA_PLATFORM
+endif
+
+obj-m += rmnet_core.o
+
+#core sources
+rmnet_core-y := \
+	rmnet_config.o \
+	rmnet_handlers.o \
+	rmnet_descriptor.o \
+	rmnet_genl.o \
+	rmnet_map_command.o \
+	rmnet_map_data.o \
+	rmnet_module.o \
+	rmnet_vnd.o
+
+rmnet_core-y += \
+	rmnet_ll.o \
+	rmnet_ll_ipa.o
+
+#DFC sources
+rmnet_core-y += \
+	qmi_rmnet.o \
+	wda_qmi.o \
+	dfc_qmi.o \
+	dfc_qmap.o \
+	rmnet_qmap.o \
+	rmnet_ll_qmap.o
+
+ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_PITTI)))
+obj-m += rmnet_ctl.o
+rmnet_ctl-y := \
+	rmnet_ctl_client.o \
+	rmnet_ctl_ipa.o
+endif

+ 29 - 0
qcom/opensource/datarmnet/core/Kconfig

@@ -0,0 +1,29 @@
+#
+# RMNET MAP driver
+#
+
+config RMNET_CORE
+	default m
+	depends on RMNET_MEM
+	select GRO_CELLS
+	help
+	  If you select this, you will enable the RMNET module which is used
+	  for handling data in the multiplexing and aggregation protocol (MAP)
+	  format in the embedded data path. RMNET devices can be attached to
+	  any IP mode physical device.
+
+config RMNET_CTL
+	default m
+	help
+	  Enable the RMNET CTL module which is used for handling QMAP commands
+	  for flow control purposes.
+
+config RMNET_LA_PLATFORM
+	default y
+	bool "RMNET platform support"
+	depends on ARCH_PINEAPPLE || ARCH_BLAIR || ARCH_MONACO || ARCH_PITTI
+	help
+	  Enable the functionality gated by the RMNET_LA_PLATFORM configuration
+	  in rmnet driver.
+	  This should be automatically set based on the target used for
+	  compilation.

+ 50 - 0
qcom/opensource/datarmnet/core/Makefile

@@ -0,0 +1,50 @@
+ifeq ($(RELEASE_PACKAGE),1)
+EXTRA_CFLAGS+=-DRELEASE_PACKAGE
+endif
+LBITS ?= $(shell getconf LONG_BIT)
+ifeq ($(LBITS),64)
+CCFLAGS += -m64
+EXTRA_CFLAGS+=-DSYSTEM_IS_64
+else
+CCFLAGS += -m32
+endif
+
+M ?= $(shell pwd)
+#obj-m := rmnet_core.o rmnet_ctl.o
+
+rmnet_core-y += 	rmnet_config.o \
+			rmnet_descriptor.o \
+			rmnet_genl.o \
+			rmnet_handlers.o \
+			rmnet_map_command.o \
+			rmnet_map_data.o \
+			rmnet_module.o \
+			rmnet_vnd.o \
+			dfc_qmap.c \
+			dfc_qmi.c \
+			qmi_rmnet.0 \
+			wda_qmi.0 \
+			rmnet_ll.o \
+			rmnet_ll_ipa.o \
+			rmnet_qmap.o \
+			rmnet_ll_qmap.o
+
+ifneq (, $(filter y, $(CONFIG_ARCH_PINEAPPLE)))
+rmnet_ctl-y += 		rmnet_ctl_client.o \
+			rmnet_ctl_ipa.o \
+			rmnet_ctl_mhi.o
+endif
+
+KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
+
+KBUILD_OPTIONS := RMNET_CORE_ROOT=$(PWD)
+KBUILD_OPTIONS += MODNAME?=rmnet_core
+
+all:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
+
+modules_install:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) modules_install
+
+clean:
+	$(MAKE) -C $(KERNEL_SRC) M=$(M) clean

+ 34 - 0
qcom/opensource/datarmnet/core/Makefile.am

@@ -0,0 +1,34 @@
+ifeq ($(RELEASE_PACKAGE),1)
+EXTRA_CFLAGS+=-DRELEASE_PACKAGE
+endif
+LBITS := $(shell getconf LONG_BIT)
+ifeq ($(LBITS),64)
+CCFLAGS += -m64
+EXTRA_CFLAGS+=-DSYSTEM_IS_64
+else
+CCFLAGS += -m32
+endif
+
+rmnet_core_dir	        = $(prefix)/rmnet_core
+rmnet_core_CFLAGS	= -Werror
+
+KERNEL_FLAGS ?= ARCH=arm
+
+module = rmnet_core.ko
+kmake  = $(MAKE) $(KERNEL_FLAGS) -C $(KERNEL_DIR) M=$(CURDIR)
+
+$(module):
+	$(kmake) modules
+
+all-local: $(module)
+
+install-exec-local: $(module)
+	$(kmake) INSTALL_MOD_PATH=$(DESTDIR)$(prefix)/modules modules_install
+
+# "make distclean" will always run clean-local in this directory,
+# regardless of the KERNELMODULES conditional. Therefore, ensure
+# KERNEL_DIR exists before running clean. Further, don't fail even
+# if there is a problem.
+clean-local:
+	-test ! -d "$(KERNEL_DIR)" || $(kmake) clean
+

+ 385 - 0
qcom/opensource/datarmnet/core/dfc.h

@@ -0,0 +1,385 @@
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/version.h>
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dfc
+#undef TRACE_INCLUDE_PATH
+
+#ifndef RMNET_TRACE_INCLUDE_PATH
+#if defined(CONFIG_RMNET_LA_PLATFORM)
+#ifdef CONFIG_ARCH_KHAJE
+#define RMNET_TRACE_INCLUDE_PATH ../../../../../vendor/qcom/opensource/datarmnet/core
+#else
+#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
+#endif /* CONFIG_ARCH_KHAJE */
+#elif defined(__arch_um__)
+#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
+#else
+#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
+#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
+#endif /* RMNET_TRACE_INCLUDE_PATH */
+
+#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_FILE dfc
+
+#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DFC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(dfc_qmi_tc,
+
+	TP_PROTO(const char *name, u32 txq, int enable),
+
+	TP_ARGS(name, txq, enable),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u32, txq)
+		__field(int, enable)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name);
+		__entry->txq = txq;
+		__entry->enable = enable;
+	),
+
+	TP_printk("dev=%s txq=%u %s",
+		__get_str(dev_name),
+		__entry->txq,
+		__entry->enable ? "enable" : "disable")
+);
+
+TRACE_EVENT(dfc_flow_ind,
+
+	TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
+		 u16 seq_num, u8 ack_req, u32 ancillary),
+
+	TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req,
+		ancillary),
+
+	TP_STRUCT__entry(
+		__field(int, src)
+		__field(int, idx)
+		__field(u8, mid)
+		__field(u8, bid)
+		__field(u32, grant)
+		__field(u16, seq)
+		__field(u8, ack_req)
+		__field(u32, ancillary)
+	),
+
+	TP_fast_assign(
+		__entry->src = src;
+		__entry->idx = idx;
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+		__entry->grant = grant;
+		__entry->seq = seq_num;
+		__entry->ack_req = ack_req;
+		__entry->ancillary = ancillary;
+	),
+
+	TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u",
+		__entry->src, __entry->idx, __entry->mid, __entry->bid,
+		__entry->grant, __entry->seq, __entry->ack_req,
+		__entry->ancillary)
+);
+
+TRACE_EVENT(dfc_flow_check,
+
+	TP_PROTO(const char *name, u8 bearer_id, unsigned int len,
+		 u32 mark, u32 grant),
+
+	TP_ARGS(name, bearer_id, len, mark, grant),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u8, bearer_id)
+		__field(unsigned int, len)
+		__field(u32, mark)
+		__field(u32, grant)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name)
+		__entry->bearer_id = bearer_id;
+		__entry->len = len;
+		__entry->mark = mark;
+		__entry->grant = grant;
+	),
+
+	TP_printk("dev=%s bearer_id=%u skb_len=%u mark=%u current_grant=%u",
+		__get_str(dev_name), __entry->bearer_id,
+		__entry->len, __entry->mark, __entry->grant)
+);
+
+TRACE_EVENT(dfc_flow_info,
+
+	TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type,
+		 u32 handle, int add),
+
+	TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u8, bid)
+		__field(u32, fid)
+		__field(int, ip)
+		__field(u32, handle)
+		__field(int, action)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name)
+		__entry->bid = bearer_id;
+		__entry->fid = flow_id;
+		__entry->ip = ip_type;
+		__entry->handle = handle;
+		__entry->action = add;
+	),
+
+	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d txq=%d",
+		__entry->action ? "add flow" : "delete flow",
+		__get_str(dev_name),
+		__entry->bid, __entry->fid, __entry->ip, __entry->handle)
+);
+
+TRACE_EVENT(dfc_client_state_up,
+
+	TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface),
+
+	TP_ARGS(idx, instance, ep_type, iface),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(u32, instance)
+		__field(u32, ep_type)
+		__field(u32, iface)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->instance = instance;
+		__entry->ep_type = ep_type;
+		__entry->iface = iface;
+	),
+
+	TP_printk("DFC Client[%d] connect: instance=%u ep_type=%u iface_id=%u",
+		__entry->idx, __entry->instance,
+		__entry->ep_type, __entry->iface)
+);
+
+TRACE_EVENT(dfc_client_state_down,
+
+	TP_PROTO(int idx, int from_cb),
+
+	TP_ARGS(idx, from_cb),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(int, from_cb)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->from_cb = from_cb;
+	),
+
+	TP_printk("DFC Client[%d] exit: callback %d",
+		  __entry->idx, __entry->from_cb)
+);
+
+TRACE_EVENT(dfc_qmap_cmd,
+
+	TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran),
+
+	TP_ARGS(mux_id, bearer_id, seq_num, type, tran),
+
+	TP_STRUCT__entry(
+		__field(u8, mid)
+		__field(u8, bid)
+		__field(u16, seq)
+		__field(u8, type)
+		__field(u32, tran)
+	),
+
+	TP_fast_assign(
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+		__entry->seq = seq_num;
+		__entry->type = type;
+		__entry->tran = tran;
+	),
+
+	TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u",
+		__entry->mid, __entry->bid, __entry->seq,
+		__entry->type, __entry->tran)
+);
+
+TRACE_EVENT(dfc_tx_link_status_ind,
+
+	TP_PROTO(int src, int idx, u8 status, u8 mux_id, u8 bearer_id),
+
+	TP_ARGS(src, idx, status, mux_id, bearer_id),
+
+	TP_STRUCT__entry(
+		__field(int, src)
+		__field(int, idx)
+		__field(u8, status)
+		__field(u8, mid)
+		__field(u8, bid)
+	),
+
+	TP_fast_assign(
+		__entry->src = src;
+		__entry->idx = idx;
+		__entry->status = status;
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+	),
+
+	TP_printk("src=%d [%d]: status=%u mux_id=%u bearer_id=%u",
+		__entry->src, __entry->idx, __entry->status,
+		__entry->mid, __entry->bid)
+);
+
+TRACE_EVENT(dfc_qmap,
+
+	TP_PROTO(const void *data, size_t len, bool in, u8 chn),
+
+	TP_ARGS(data, len, in, chn),
+
+	TP_STRUCT__entry(
+		__field(bool, in)
+		__field(size_t, len)
+		__dynamic_array(u8, data, len)
+		__field(u8, chn)
+	),
+
+	TP_fast_assign(
+		__entry->in = in;
+		__entry->len = len;
+		memcpy(__get_dynamic_array(data), data, len);
+		__entry->chn = chn;
+	),
+
+	TP_printk("[0x%02x] %s %s", __entry->chn,
+		__entry->in ? "<--" : "-->",
+		__print_array(__get_dynamic_array(data), __entry->len,
+			      sizeof(u8)))
+);
+
+TRACE_EVENT(dfc_adjust_grant,
+
+	TP_PROTO(u8 mux_id, u8 bearer_id, u32 grant, u32 rx_bytes,
+		 u32 inflight, u32 a_grant),
+
+	TP_ARGS(mux_id, bearer_id, grant, rx_bytes, inflight, a_grant),
+
+	TP_STRUCT__entry(
+		__field(u8, mux_id)
+		__field(u8, bearer_id)
+		__field(u32, grant)
+		__field(u32, rx_bytes)
+		__field(u32, inflight)
+		__field(u32, a_grant)
+	),
+
+	TP_fast_assign(
+		__entry->mux_id = mux_id;
+		__entry->bearer_id = bearer_id;
+		__entry->grant = grant;
+		__entry->rx_bytes = rx_bytes;
+		__entry->inflight = inflight;
+		__entry->a_grant = a_grant;
+	),
+
+	TP_printk("mid=%u bid=%u grant=%u rx=%u inflight=%u adjusted_grant=%u",
+		__entry->mux_id, __entry->bearer_id, __entry->grant,
+		__entry->rx_bytes, __entry->inflight, __entry->a_grant)
+);
+
+TRACE_EVENT(dfc_watchdog,
+
+	TP_PROTO(u8 mux_id, u8 bearer_id, u8 event),
+
+	TP_ARGS(mux_id, bearer_id, event),
+
+	TP_STRUCT__entry(
+		__field(u8, mux_id)
+		__field(u8, bearer_id)
+		__field(u8, event)
+	),
+
+	TP_fast_assign(
+		__entry->mux_id = mux_id;
+		__entry->bearer_id = bearer_id;
+		__entry->event = event;
+	),
+
+	TP_printk("mid=%u bid=%u event=%u",
+		__entry->mux_id, __entry->bearer_id, __entry->event)
+);
+
+TRACE_EVENT(dfc_ll_switch,
+
+	TP_PROTO(const char *cmd, u8 type, u8 num_bearer, void *bearers),
+
+	TP_ARGS(cmd, type, num_bearer, bearers),
+
+	TP_STRUCT__entry(
+		__string(cmd_str, cmd)
+		__field(u8, type)
+		__field(u8, num_bearer)
+		__dynamic_array(u8, bearers, num_bearer)
+	),
+
+	TP_fast_assign(
+		__assign_str(cmd_str, cmd)
+		__entry->type = type;
+		__entry->num_bearer = num_bearer;
+		memcpy(__get_dynamic_array(bearers), bearers, num_bearer);
+	),
+
+	TP_printk("%s type=%u num_bearer=%u bearers={%s}",
+		__get_str(cmd_str),
+		__entry->type,
+		__entry->num_bearer,
+		__print_array(__get_dynamic_array(bearers),
+			      __entry->num_bearer, 1))
+);
+
+TRACE_EVENT(dfc_set_powersave_mode,
+
+	TP_PROTO(int enable),
+
+	TP_ARGS(enable),
+
+	TP_STRUCT__entry(
+		__field(int, enable)
+	),
+
+	TP_fast_assign(
+		__entry->enable = enable;
+	),
+
+	TP_printk("set powersave mode to %s",
+		__entry->enable ? "enable" : "disable")
+);
+
+#endif /* _TRACE_DFC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>

+ 101 - 0
qcom/opensource/datarmnet/core/dfc_defs.h

@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DFC_DEFS_H
+#define _DFC_DEFS_H
+
+#include <linux/soc/qcom/qmi.h>
+#include "qmi_rmnet_i.h"
+
+#define DFC_ACK_TYPE_DISABLE 1
+#define DFC_ACK_TYPE_THRESHOLD 2
+
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
+#define DFC_MAX_QOS_ID_V01 2
+
+struct dfc_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *dfc_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	struct work_struct qmi_ind_work;
+	struct list_head qmi_ind_q;
+	spinlock_t qmi_ind_lock;
+	int index;
+	int restart_state;
+};
+
+enum dfc_ip_type_enum_v01 {
+	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	DFC_IPV4_TYPE_V01 = 0x4,
+	DFC_IPV6_TYPE_V01 = 0x6,
+	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct dfc_qos_id_type_v01 {
+	u32 qos_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_flow_status_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 num_bytes;
+	u16 seq_num;
+	u8 qos_ids_len;
+	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+	u8 rx_bytes_valid;
+	u32 rx_bytes;
+	u8 ll_status;
+};
+
+struct dfc_ancillary_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 reserved;
+};
+
+struct dfc_flow_status_ind_msg_v01 {
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+	u8 eod_ack_reqd_valid;
+	u8 eod_ack_reqd;
+	u8 ancillary_info_valid;
+	u8 ancillary_info_len;
+	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_bearer_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_tx_link_status_ind_msg_v01 {
+	u8 tx_status;
+	u8 bearer_info_valid;
+	u8 bearer_info_len;
+	struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
+};
+
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind,
+			       bool is_query);
+
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind);
+
+#endif /* _DFC_DEFS_H */

+ 564 - 0
qcom/opensource/datarmnet/core/dfc_qmap.c

@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/pkt_sched.h>
+#include <linux/module.h>
+#include "rmnet_qmap.h"
+#include "dfc_defs.h"
+#include "rmnet_qmi.h"
+#include "qmi_rmnet.h"
+#include "dfc.h"
+#include "rmnet_map.h"
+
+#define QMAP_DFC_VER		1
+
+struct qmap_dfc_config {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			cmd_id;
+	u8			reserved;
+	u8			tx_info:1;
+	u8			reserved2:7;
+	__be32			ep_type;
+	__be32			iface_id;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_ind {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	__be16			seq_num;
+	u8			reserved2;
+	u8			tx_info_valid:1;
+	u8			tx_info:1;
+	u8			rx_bytes_valid:1;
+	u8			reserved3:5;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			bearer_status:3;
+	u8			ll_status:1;
+	u8			reserved4:3;
+	__be32			grant;
+	__be32			rx_bytes;
+	u32			reserved6;
+} __aligned(1);
+
+struct qmap_dfc_query {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u32			reserved3;
+} __aligned(1);
+
+struct qmap_dfc_query_resp {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			bearer_id;
+	u8			tcp_bidir:1;
+	u8			rx_bytes_valid:1;
+	u8			reserved:6;
+	u8			invalid:1;
+	u8			reserved2:7;
+	__be32			grant;
+	__be32			rx_bytes;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_req {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_cnf {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmapv5_cmd_hdr {
+	u8	pad_len:6;
+	u8	next_hdr:1;
+	u8	cd_bit:1;
+	u8	mux_id;
+	__be16	pkt_len;
+	struct rmnet_map_v5_csum_header csum_hdr;
+	u8	cmd_name;
+	u8	cmd_type:2;
+	u8	reserved:6;
+	u16	reserved2;
+	__be32	tx_id;
+} __aligned(1);
+
+struct qmapv5_dfc_end_marker_cnf {
+	struct qmapv5_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			bearer_id;
+	u8			reserved2;
+	u16			reserved3;
+	__be16			seq_num;
+	u32			reserved4;
+} __aligned(1);
+
+struct qmap_dfc_powersave_req {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			allow:1;
+	u8			autoshut:1;
+	u8			reserved:6;
+	u8			reserved2;
+	u8			mode:1;
+	u8			reserved3:7;
+	__be32			ep_type;
+	__be32			iface_id;
+	u8			num_bearers;
+	u8			bearer_id[PS_MAX_BEARERS];
+	u8			reserved4[3];
+} __aligned(1);
+
+static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
+static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
+static struct dfc_qmi_data __rcu *qmap_dfc_data;
+static bool dfc_config_acked;
+
+static void dfc_qmap_send_config(struct dfc_qmi_data *data);
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 struct rmnet_bearer_map *bearer,
+					 u16 seq, u32 tx_id);
+
+static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
+			       struct sk_buff *skb)
+{
+	struct qmap_dfc_ind *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_ind))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_ind *)skb->data;
+
+	if (cmd->tx_info_valid) {
+		memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
+		qmap_tx_ind.tx_status = cmd->tx_info;
+		qmap_tx_ind.bearer_info_valid = 1;
+		qmap_tx_ind.bearer_info_len = 1;
+		qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
+
+		dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
+
+		/* Ignore grant since it is always 0 */
+		goto done;
+	}
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
+	qmap_flow_ind.flow_status[0].ll_status = cmd->ll_status;
+
+	if (cmd->rx_bytes_valid) {
+		qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
+		qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
+	}
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind, false);
+
+done:
+	return QMAP_CMD_ACK;
+}
+
+static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
+				      struct sk_buff *skb)
+{
+	struct qmap_dfc_query_resp *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_query_resp))
+		return QMAP_CMD_DONE;
+
+	cmd = (struct qmap_dfc_query_resp *)skb->data;
+
+	if (cmd->invalid)
+		return QMAP_CMD_DONE;
+
+	memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+	qmap_flow_ind.flow_status_valid = 1;
+	qmap_flow_ind.flow_status_len = 1;
+
+	qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+	qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+	qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+	qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
+
+	if (cmd->rx_bytes_valid) {
+		qmap_flow_ind.flow_status[0].rx_bytes_valid = 1;
+		qmap_flow_ind.flow_status[0].rx_bytes = ntohl(cmd->rx_bytes);
+	}
+
+	if (cmd->tcp_bidir) {
+		qmap_flow_ind.ancillary_info_valid = 1;
+		qmap_flow_ind.ancillary_info_len = 1;
+		qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+		qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+		qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+	}
+
+	dfc_do_burst_flow_control(dfc, &qmap_flow_ind, true);
+
+	return QMAP_CMD_DONE;
+}
+
+static int dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
+				   u8 bearer_id, u16 seq_num, u32 tx_id)
+{
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	int rc = QMAP_CMD_ACK;
+
+	dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
+	if (!dev)
+		return rc;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return rc;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+	if (!bearer) {
+		spin_unlock_bh(&qos->qos_lock);
+		return rc;
+	}
+
+	if (bearer->last_seq == seq_num && bearer->grant_size) {
+		bearer->ack_req = 1;
+		bearer->ack_txid = tx_id;
+	} else {
+		dfc_qmap_send_end_marker_cnf(qos, bearer, seq_num, tx_id);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return QMAP_CMD_DONE;
+}
+
+static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
+					  struct sk_buff *skb)
+{
+	struct qmap_dfc_end_marker_req *cmd;
+
+	if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_dfc_end_marker_req *)skb->data;
+	return dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
+				       ntohs(cmd->seq_num),
+				       ntohl(cmd->hdr.tx_id));
+}
+
+int dfc_qmap_cmd_handler(struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+	struct dfc_qmi_data *dfc;
+	int rc = QMAP_CMD_DONE;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+
+	if (cmd->cmd_name == QMAP_DFC_QUERY) {
+		if (cmd->cmd_type != QMAP_CMD_ACK)
+			return rc;
+	} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
+		if (cmd->cmd_type == QMAP_CMD_ACK &&
+		    cmd->cmd_name == QMAP_DFC_CONFIG)
+			dfc_config_acked = true;
+		return rc;
+	}
+
+	dfc = rcu_dereference(qmap_dfc_data);
+	if (!dfc || READ_ONCE(dfc->restart_state))
+		return rc;
+
+	/* Re-send DFC config once if needed */
+	if (unlikely(!dfc_config_acked)) {
+		dfc_qmap_send_config(dfc);
+		dfc_config_acked = true;
+	}
+
+	switch (cmd->cmd_name) {
+	case QMAP_DFC_IND:
+		rc = dfc_qmap_handle_ind(dfc, skb);
+		qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
+		break;
+
+	case QMAP_DFC_QUERY:
+		rc = dfc_qmap_handle_query_resp(dfc, skb);
+		break;
+
+	case QMAP_DFC_END_MARKER:
+		rc = dfc_qmap_handle_end_marker_req(dfc, skb);
+		break;
+
+	default:
+		if (cmd->cmd_type == QMAP_CMD_REQUEST)
+			rc = QMAP_CMD_UNSUPPORTED;
+	}
+
+	return rc;
+}
+
+static void dfc_qmap_send_config(struct dfc_qmi_data *data)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_config *dfc_config;
+	unsigned int len = sizeof(struct qmap_dfc_config);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
+	memset(dfc_config, 0, len);
+
+	dfc_config->hdr.cd_bit = 1;
+	dfc_config->hdr.mux_id = 0;
+	dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
+	dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_config->hdr.tx_id = htonl(rmnet_qmap_next_txid());
+
+	dfc_config->cmd_ver = QMAP_DFC_VER;
+	dfc_config->cmd_id = QMAP_DFC_IND;
+	dfc_config->tx_info = 1;
+	dfc_config->ep_type = htonl(data->svc.ep_type);
+	dfc_config->iface_id = htonl(data->svc.iface_id);
+
+	rmnet_qmap_send(skb, RMNET_CH_CTL, false);
+}
+
+static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_query *dfc_query;
+	unsigned int len = sizeof(struct qmap_dfc_query);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
+	memset(dfc_query, 0, len);
+
+	dfc_query->hdr.cd_bit = 1;
+	dfc_query->hdr.mux_id = mux_id;
+	dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
+	dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_query->hdr.tx_id = htonl(rmnet_qmap_next_txid());
+
+	dfc_query->cmd_ver = QMAP_DFC_VER;
+	dfc_query->bearer_id = bearer_id;
+
+	rmnet_qmap_send(skb, RMNET_CH_CTL, false);
+}
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+					 struct rmnet_bearer_map *bearer,
+					 u16 seq, u32 tx_id)
+{
+	struct sk_buff *skb;
+	struct qmapv5_dfc_end_marker_cnf *em_cnf;
+	unsigned int len = sizeof(struct qmapv5_dfc_end_marker_cnf);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	em_cnf = (struct qmapv5_dfc_end_marker_cnf *)skb_put(skb, len);
+	memset(em_cnf, 0, len);
+
+	em_cnf->hdr.cd_bit = 1;
+	em_cnf->hdr.next_hdr = 1;
+	em_cnf->hdr.mux_id = qos->mux_id;
+	em_cnf->hdr.pkt_len = htons(len -
+				    (QMAP_HDR_LEN +
+				     sizeof(struct rmnet_map_v5_csum_header)));
+	em_cnf->hdr.csum_hdr.header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
+	em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
+	em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
+	em_cnf->hdr.tx_id = htonl(tx_id);
+
+	em_cnf->cmd_ver = QMAP_DFC_VER;
+	em_cnf->bearer_id = bearer->bearer_id;
+	em_cnf->seq_num = htons(seq);
+
+	rmnet_qmap_send(skb, bearer->ch_switch.current_ch, true);
+}
+
+static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
+{
+	struct sk_buff *skb;
+	struct qmap_dfc_powersave_req *dfc_powersave;
+	unsigned int len = sizeof(struct qmap_dfc_powersave_req);
+	struct dfc_qmi_data *dfc;
+	u32 ep_type = 0;
+	u32 iface_id = 0;
+
+	rcu_read_lock();
+	dfc = rcu_dereference(qmap_dfc_data);
+	if (dfc) {
+		ep_type = dfc->svc.ep_type;
+		iface_id = dfc->svc.iface_id;
+	} else {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	rcu_read_unlock();
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	skb->protocol = htons(ETH_P_MAP);
+	dfc_powersave = (struct qmap_dfc_powersave_req *)skb_put(skb, len);
+	memset(dfc_powersave, 0, len);
+
+	dfc_powersave->hdr.cd_bit = 1;
+	dfc_powersave->hdr.mux_id = 0;
+	dfc_powersave->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	dfc_powersave->hdr.cmd_name = QMAP_DFC_POWERSAVE;
+	dfc_powersave->hdr.cmd_type = QMAP_CMD_REQUEST;
+	dfc_powersave->hdr.tx_id =  htonl(rmnet_qmap_next_txid());
+
+	dfc_powersave->cmd_ver = 3;
+	dfc_powersave->mode = enable ? 1 : 0;
+
+	if (enable && num_bearers) {
+		if (unlikely(num_bearers > PS_MAX_BEARERS))
+			num_bearers = PS_MAX_BEARERS;
+		dfc_powersave->allow = 1;
+		dfc_powersave->autoshut = 1;
+		dfc_powersave->num_bearers = num_bearers;
+		memcpy(dfc_powersave->bearer_id, bearer_id, num_bearers);
+	}
+
+	dfc_powersave->ep_type = htonl(ep_type);
+	dfc_powersave->iface_id = htonl(iface_id);
+
+	return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
+}
+
+int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
+{
+	trace_dfc_set_powersave_mode(enable);
+	return dfc_qmap_send_powersave(enable, num_bearers, bearer_id);
+}
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
+{
+	struct rmnet_bearer_map *bearer;
+
+	if (type == DFC_ACK_TYPE_DISABLE) {
+		bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+		if (bearer)
+			dfc_qmap_send_end_marker_cnf(qos, bearer,
+						     seq, bearer->ack_txid);
+	} else if (type == DFC_ACK_TYPE_THRESHOLD) {
+		dfc_qmap_send_query(qos->mux_id, bearer_id);
+	}
+}
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *data;
+
+	if (!port || !qmi)
+		return -EINVAL;
+
+	/* Prevent double init */
+	data = rcu_dereference(qmap_dfc_data);
+	if (data)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->rmnet_port = port;
+	data->index = index;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	qmi->dfc_clients[index] = (void *)data;
+	rcu_assign_pointer(qmap_dfc_data, data);
+
+	rmnet_qmap_init(port);
+
+	trace_dfc_client_state_up(data->index, data->svc.instance,
+				  data->svc.ep_type, data->svc.iface_id);
+
+	pr_info("DFC QMAP init\n");
+
+	/* Currently if powersave ext is enabled, no need to do dfc config
+	 * which only enables tx_info */
+	if (qmi->ps_ext) {
+		dfc_config_acked = true;
+	} else {
+		dfc_config_acked = false;
+		dfc_qmap_send_config(data);
+	}
+
+	return 0;
+}
+
+void dfc_qmap_client_exit(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+	if (!data) {
+		pr_err("%s() data is null\n", __func__);
+		return;
+	}
+
+	trace_dfc_client_state_down(data->index, 0);
+
+	rmnet_qmap_exit();
+
+	WRITE_ONCE(data->restart_state, 1);
+	RCU_INIT_POINTER(qmap_dfc_data, NULL);
+	synchronize_rcu();
+
+	kfree(data);
+
+	pr_info("DFC QMAP exit\n");
+}

+ 1592 - 0
qcom/opensource/datarmnet/core/dfc_qmi.c

@@ -0,0 +1,1592 @@
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/pkt_sched.h>
+#include "rmnet_qmi.h"
+#include "qmi_rmnet.h"
+#include "dfc_defs.h"
+
+#define CREATE_TRACE_POINTS
+#include "dfc.h"
+
+struct dfc_qmap_header {
+	u8  pad_len:6;
+	u8  reserved_bit:1;
+	u8  cd_bit:1;
+	u8  mux_id;
+	__be16   pkt_len;
+} __aligned(1);
+
+struct dfc_ack_cmd {
+	struct dfc_qmap_header header;
+	u8  command_name;
+	u8  cmd_type:2;
+	u8  reserved:6;
+	u16 reserved2;
+	u32 transaction_id;
+	u8  ver:2;
+	u8  reserved3:6;
+	u8  type:2;
+	u8  reserved4:6;
+	u16 dfc_seq;
+	u8  reserved5[3];
+	u8  bearer_id;
+} __aligned(1);
+
+static void dfc_svc_init(struct work_struct *work);
+extern int dfc_ps_ext;
+
+/* **************************************************** */
+#define DFC_SERVICE_ID_V01 0x4E
+#define DFC_SERVICE_VERS_V01 0x01
+#define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
+
+#define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
+#define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
+#define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN  11
+#define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
+
+#define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
+#define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
+#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 8
+#define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
+
+#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
+#define QMI_DFC_TX_LINK_STATUS_IND_V01 0x0024
+
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
+
+struct dfc_bind_client_req_msg_v01 {
+	u8 ep_id_valid;
+	struct data_ep_id_type_v01 ep_id;
+};
+
+struct dfc_bind_client_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+struct dfc_indication_register_req_msg_v01 {
+	u8 report_flow_status_valid;
+	u8 report_flow_status;
+	u8 report_tx_link_status_valid;
+	u8 report_tx_link_status;
+};
+
+struct dfc_indication_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct dfc_qos_id_type_v01,
+					   qos_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum dfc_ip_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct dfc_qos_id_type_v01,
+					   ip_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   num_bytes),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   seq_num),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   qos_ids_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_QOS_ID_V01,
+		.elem_size	= sizeof(struct dfc_qos_id_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   qos_ids),
+		.ei_array	= dfc_qos_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   reserved),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct dfc_get_flow_status_req_msg_v01 {
+	u8 bearer_id_list_valid;
+	u8 bearer_id_list_len;
+	u8 bearer_id_list[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_get_flow_status_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_svc_ind {
+	struct list_head list;
+	u16 msg_id;
+	union {
+		struct dfc_flow_status_ind_msg_v01 dfc_info;
+		struct dfc_tx_link_status_ind_msg_v01 tx_status;
+	} d;
+};
+
+static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct dfc_bind_client_req_msg_v01,
+					   ep_id_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct data_ep_id_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct dfc_bind_client_req_msg_v01,
+					   ep_id),
+		.ei_array	= data_ep_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct dfc_bind_client_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_flow_status),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_tx_link_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_tx_link_status),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   dfc_indication_register_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_flow_status_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status),
+		.ei_array	= dfc_flow_status_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   eod_ack_reqd_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   eod_ack_reqd),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_ancillary_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info),
+		.ei_array	= dfc_ancillary_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(u8),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_req_msg_v01,
+					   bearer_id_list),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_flow_status_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_get_flow_status_resp_msg_v01,
+					   flow_status),
+		.ei_array	= dfc_flow_status_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_bearer_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum dfc_ip_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_bearer_info_type_v01,
+					   ip_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_tx_link_status_ind_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   tx_status),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_bearer_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_tx_link_status_ind_msg_v01,
+					   bearer_info),
+		.ei_array	= dfc_bearer_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static int
+dfc_bind_client_req(struct qmi_handle *dfc_handle,
+		    struct sockaddr_qrtr *ssctl, struct svc_info *svc)
+{
+	struct dfc_bind_client_resp_msg_v01 *resp;
+	struct dfc_bind_client_req_msg_v01 *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, &txn,
+			   dfc_bind_client_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->ep_id_valid = 1;
+	req->ep_id.ep_type = svc->ep_type;
+	req->ep_id.iface_id = svc->iface_id;
+	ret = qmi_send_request(dfc_handle, ssctl, &txn,
+			       QMI_DFC_BIND_CLIENT_REQ_V01,
+			       QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
+			       dfc_bind_client_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int
+dfc_indication_register_req(struct qmi_handle *dfc_handle,
+			    struct sockaddr_qrtr *ssctl, u8 reg)
+{
+	struct dfc_indication_register_resp_msg_v01 *resp;
+	struct dfc_indication_register_req_msg_v01 *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, &txn,
+			   dfc_indication_register_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->report_flow_status_valid = 1;
+	req->report_flow_status = reg;
+
+	if (!dfc_ps_ext) {
+		req->report_tx_link_status_valid = 1;
+		req->report_tx_link_status = reg;
+	}
+
+	ret = qmi_send_request(dfc_handle, ssctl, &txn,
+			       QMI_DFC_INDICATION_REGISTER_REQ_V01,
+			       QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
+			       dfc_indication_register_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int
+dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
+			struct sockaddr_qrtr *ssctl,
+			struct dfc_get_flow_status_resp_msg_v01 *resp)
+{
+	struct dfc_get_flow_status_req_msg_v01 *req;
+	struct qmi_txn *txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
+	if (!txn) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, txn,
+			   dfc_get_flow_status_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(dfc_handle, ssctl, txn,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01,
+			       QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
+			       dfc_get_flow_status_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(txn);
+	kfree(req);
+	return ret;
+}
+
+static int dfc_init_service(struct dfc_qmi_data *data)
+{
+	int rc;
+
+	rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
+	if (rc < 0)
+		return rc;
+
+	return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
+}
+
+static void
+dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+	struct sk_buff *skb;
+	struct dfc_ack_cmd *msg;
+	int data_size = sizeof(struct dfc_ack_cmd);
+	int header_size = sizeof(struct dfc_qmap_header);
+
+	if (!qos)
+		return;
+
+	if (dfc_qmap) {
+		dfc_qmap_send_ack(qos, bearer_id, seq, type);
+		return;
+	}
+
+	skb = alloc_skb(data_size, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
+	memset(msg, 0, data_size);
+
+	msg->header.cd_bit = 1;
+	msg->header.mux_id = mux_id;
+	msg->header.pkt_len = htons(data_size - header_size);
+
+	msg->bearer_id = bearer_id;
+	msg->command_name = 4;
+	msg->cmd_type = 0;
+	msg->dfc_seq = htons(seq);
+	msg->type = type;
+	msg->ver = 2;
+	msg->transaction_id = htonl(qos->tran_num);
+
+	skb->dev = qos->real_dev;
+	skb->protocol = htons(ETH_P_MAP);
+
+	trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
+	qos->tran_num++;
+
+	rmnet_map_tx_qmap_cmd(skb, RMNET_CH_DEFAULT, true);
+}
+
+int dfc_bearer_flow_ctl(struct net_device *dev,
+			struct rmnet_bearer_map *bearer,
+			struct qos_info *qos)
+{
+	bool enable;
+
+	enable = bearer->grant_size ? true : false;
+
+	/* Do not flow disable tcp ack q in tcp bidir
+	 * ACK queue opened first to drain ACKs faster
+	 * Although since tcp ancillary is true most of the time,
+	 * this shouldn't really make a difference
+	 * If there is non zero grant but tcp ancillary is false,
+	 * send out ACKs anyway
+	 */
+	if (bearer->ack_mq_idx != INVALID_MQ)
+		qmi_rmnet_flow_control(dev, bearer->ack_mq_idx,
+				       enable || bearer->tcp_bidir);
+
+	qmi_rmnet_flow_control(dev, bearer->mq_idx, enable);
+
+	if (!enable && bearer->ack_req)
+		dfc_send_ack(dev, bearer->bearer_id,
+			     bearer->seq, qos->mux_id,
+			     DFC_ACK_TYPE_DISABLE);
+
+	return 0;
+}
+
+static int dfc_all_bearer_flow_ctl(struct net_device *dev,
+				struct qos_info *qos, u8 ack_req, u32 ancillary,
+				struct dfc_flow_status_info_type_v01 *fc_info)
+{
+	struct rmnet_bearer_map *bearer;
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->grant_size = fc_info->num_bytes;
+		bearer->grant_thresh =
+			qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->seq = fc_info->seq_num;
+		bearer->ack_req = ack_req;
+		bearer->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		bearer->last_grant = fc_info->num_bytes;
+		bearer->last_seq = fc_info->seq_num;
+		bearer->last_adjusted_grant = fc_info->num_bytes;
+
+		dfc_bearer_flow_ctl(dev, bearer, qos);
+	}
+
+	return 0;
+}
+
+static u32 dfc_adjust_grant(struct rmnet_bearer_map *bearer,
+			    struct dfc_flow_status_info_type_v01 *fc_info)
+{
+	u32 grant;
+
+	if (!fc_info->rx_bytes_valid)
+		return fc_info->num_bytes;
+
+	if (bearer->bytes_in_flight > fc_info->rx_bytes)
+		bearer->bytes_in_flight -= fc_info->rx_bytes;
+	else
+		bearer->bytes_in_flight = 0;
+
+	/* Adjusted grant = grant - bytes_in_flight */
+	if (fc_info->num_bytes > bearer->bytes_in_flight)
+		grant = fc_info->num_bytes - bearer->bytes_in_flight;
+	else
+		grant = 0;
+
+	trace_dfc_adjust_grant(fc_info->mux_id, fc_info->bearer_id,
+			       fc_info->num_bytes, fc_info->rx_bytes,
+			       bearer->bytes_in_flight, grant);
+	return grant;
+}
+
+static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
+			     u8 ack_req, u32 ancillary,
+			     struct dfc_flow_status_info_type_v01 *fc_info,
+			     bool is_query)
+{
+	struct rmnet_bearer_map *itm = NULL;
+	int rc = 0;
+	bool action = false;
+	u32 adjusted_grant;
+
+	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
+
+	/* cache the bearer assuming it is a new bearer */
+	if (unlikely(!itm && !is_query && fc_info->num_bytes))
+		itm = qmi_rmnet_get_bearer_noref(qos, fc_info->bearer_id);
+
+	if (itm) {
+		/* The RAT switch flag indicates the start and end of
+		 * the switch. Ignore indications in between.
+		 */
+		if (DFC_IS_RAT_SWITCH(ancillary))
+			itm->rat_switch = !fc_info->num_bytes;
+		else
+			if (itm->rat_switch)
+				return 0;
+
+		/* If TX is OFF but we received grant, ignore it */
+		if (itm->tx_off  && fc_info->num_bytes > 0)
+			return 0;
+
+		if (fc_info->ll_status &&
+		    itm->ch_switch.current_ch != RMNET_CH_LL) {
+			itm->ch_switch.current_ch = RMNET_CH_LL;
+			itm->ch_switch.auto_switched = true;
+			if (itm->mq_idx < MAX_MQ_NUM)
+				qos->mq[itm->mq_idx].is_ll_ch = RMNET_CH_LL;
+		}
+
+		/* Adjuste grant for query */
+		if (dfc_qmap && is_query) {
+			adjusted_grant = dfc_adjust_grant(itm, fc_info);
+		} else {
+			adjusted_grant = fc_info->num_bytes;
+			itm->bytes_in_flight = 0;
+		}
+
+		/* update queue state only if there is a change in grant
+		 * or change in ancillary tcp state
+		 */
+		if ((itm->grant_size == 0 && adjusted_grant > 0) ||
+		    (itm->grant_size > 0 && adjusted_grant == 0) ||
+		    (itm->tcp_bidir ^ DFC_IS_TCP_BIDIR(ancillary)))
+			action = true;
+
+		/* This is needed by qmap */
+		if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
+			dfc_qmap_send_ack(qos, itm->bearer_id,
+					  itm->seq, DFC_ACK_TYPE_DISABLE);
+
+		itm->grant_size = adjusted_grant;
+
+		/* No further query if the adjusted grant is less
+		 * than 20% of the original grant. Add to watch to
+		 * recover if no indication is received.
+		 */
+		if (dfc_qmap && is_query &&
+		    itm->grant_size < (fc_info->num_bytes / 5)) {
+			itm->grant_thresh = itm->grant_size;
+			qmi_rmnet_watchdog_add(itm);
+		} else {
+			itm->grant_thresh =
+				qmi_rmnet_grant_per(itm->grant_size);
+			qmi_rmnet_watchdog_remove(itm);
+		}
+
+		itm->seq = fc_info->seq_num;
+		itm->ack_req = ack_req;
+		itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+		itm->last_grant = fc_info->num_bytes;
+		itm->last_seq = fc_info->seq_num;
+		itm->last_adjusted_grant = adjusted_grant;
+
+		if (action)
+			rc = dfc_bearer_flow_ctl(dev, itm, qos);
+	}
+
+	return rc;
+}
+
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+			       struct dfc_flow_status_ind_msg_v01 *ind,
+			       bool is_query)
+{
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct dfc_flow_status_info_type_v01 *flow_status;
+	struct dfc_ancillary_info_type_v01 *ai;
+	u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
+	u32 ancillary;
+	int i, j;
+
+	rcu_read_lock();
+
+	for (i = 0; i < ind->flow_status_len; i++) {
+		flow_status = &ind->flow_status[i];
+
+		ancillary = 0;
+		if (ind->ancillary_info_valid) {
+			for (j = 0; j < ind->ancillary_info_len; j++) {
+				ai = &ind->ancillary_info[j];
+				if (ai->mux_id == flow_status->mux_id &&
+				    ai->bearer_id == flow_status->bearer_id) {
+					ancillary = ai->reserved;
+					break;
+				}
+			}
+		}
+
+		trace_dfc_flow_ind(dfc->index,
+				   i, flow_status->mux_id,
+				   flow_status->bearer_id,
+				   flow_status->num_bytes,
+				   flow_status->seq_num,
+				   ack_req,
+				   ancillary);
+
+		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
+					  flow_status->mux_id);
+		if (!dev)
+			goto clean_out;
+
+		qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+		if (!qos)
+			continue;
+
+		spin_lock_bh(&qos->qos_lock);
+
+		/* In powersave, change grant to 1 if it is a enable */
+		if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
+			if (flow_status->num_bytes) {
+				flow_status->num_bytes = DEFAULT_GRANT;
+				flow_status->seq_num = 0;
+				/* below is to reset bytes-in-flight */
+				flow_status->rx_bytes_valid = 1;
+				flow_status->rx_bytes = 0xFFFFFFFF;
+			} else {
+				spin_unlock_bh(&qos->qos_lock);
+				continue;
+			}
+		}
+
+		if (unlikely(flow_status->bearer_id == 0xFF))
+			dfc_all_bearer_flow_ctl(
+				dev, qos, ack_req, ancillary, flow_status);
+		else
+			dfc_update_fc_map(
+				dev, qos, ack_req, ancillary, flow_status,
+				is_query);
+
+		spin_unlock_bh(&qos->qos_lock);
+	}
+
+clean_out:
+	rcu_read_unlock();
+}
+
+static void dfc_update_tx_link_status(struct net_device *dev,
+				      struct qos_info *qos, u8 tx_status,
+				      struct dfc_bearer_info_type_v01 *binfo)
+{
+	struct rmnet_bearer_map *itm = NULL;
+
+	itm = qmi_rmnet_get_bearer_map(qos, binfo->bearer_id);
+	if (!itm)
+		return;
+
+	/* If no change in tx status, ignore */
+	if (itm->tx_off == !tx_status)
+		return;
+
+	if (itm->grant_size && !tx_status) {
+		itm->grant_size = 0;
+		itm->tcp_bidir = false;
+		itm->bytes_in_flight = 0;
+		qmi_rmnet_watchdog_remove(itm);
+		dfc_bearer_flow_ctl(dev, itm, qos);
+	} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
+		itm->grant_size = DEFAULT_GRANT;
+		itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+		itm->seq = 0;
+		itm->ack_req = 0;
+		dfc_bearer_flow_ctl(dev, itm, qos);
+	}
+
+	itm->tx_off = !tx_status;
+}
+
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+				   struct dfc_tx_link_status_ind_msg_v01 *ind)
+{
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct dfc_bearer_info_type_v01 *bearer_info;
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < ind->bearer_info_len; i++) {
+		bearer_info = &ind->bearer_info[i];
+
+		trace_dfc_tx_link_status_ind(dfc->index, i,
+					     ind->tx_status,
+					     bearer_info->mux_id,
+					     bearer_info->bearer_id);
+
+		dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
+					  bearer_info->mux_id);
+		if (!dev)
+			goto clean_out;
+
+		qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+		if (!qos)
+			continue;
+
+		spin_lock_bh(&qos->qos_lock);
+
+		dfc_update_tx_link_status(
+			dev, qos, ind->tx_status, bearer_info);
+
+		spin_unlock_bh(&qos->qos_lock);
+	}
+
+clean_out:
+	rcu_read_unlock();
+}
+
+static void dfc_qmi_ind_work(struct work_struct *work)
+{
+	struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
+						qmi_ind_work);
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (!dfc)
+		return;
+
+	local_bh_disable();
+
+	do {
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
+						   struct dfc_svc_ind, list);
+		if (svc_ind)
+			list_del(&svc_ind->list);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		if (!svc_ind)
+			break;
+
+		if (!dfc->restart_state) {
+			if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
+				dfc_do_burst_flow_control(
+						dfc, &svc_ind->d.dfc_info,
+						false);
+			else if (svc_ind->msg_id ==
+					QMI_DFC_TX_LINK_STATUS_IND_V01)
+				dfc_handle_tx_link_status_ind(
+						dfc, &svc_ind->d.tx_status);
+		}
+		kfree(svc_ind);
+	} while (1);
+
+	local_bh_enable();
+
+	qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
+}
+
+static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			    struct qmi_txn *txn, const void *data)
+{
+	struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
+						handle);
+	struct dfc_flow_status_ind_msg_v01 *ind_msg;
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (qmi != &dfc->handle)
+		return;
+
+	ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
+	if (ind_msg->flow_status_valid) {
+		if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
+			pr_err("%s() Invalid fc info len: %d\n",
+			       __func__, ind_msg->flow_status_len);
+			return;
+		}
+
+		svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
+		if (!svc_ind)
+			return;
+
+		svc_ind->msg_id = QMI_DFC_FLOW_STATUS_IND_V01;
+		memcpy(&svc_ind->d.dfc_info, ind_msg, sizeof(*ind_msg));
+
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
+	}
+}
+
+static void dfc_tx_link_status_ind_cb(struct qmi_handle *qmi,
+				      struct sockaddr_qrtr *sq,
+				      struct qmi_txn *txn, const void *data)
+{
+	struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
+						handle);
+	struct dfc_tx_link_status_ind_msg_v01 *ind_msg;
+	struct dfc_svc_ind *svc_ind;
+	unsigned long flags;
+
+	if (qmi != &dfc->handle)
+		return;
+
+	ind_msg = (struct dfc_tx_link_status_ind_msg_v01 *)data;
+	if (ind_msg->bearer_info_valid) {
+		if (ind_msg->bearer_info_len > DFC_MAX_BEARERS_V01) {
+			pr_err("%s() Invalid bearer info len: %d\n",
+			       __func__, ind_msg->bearer_info_len);
+			return;
+		}
+
+		svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
+		if (!svc_ind)
+			return;
+
+		svc_ind->msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01;
+		memcpy(&svc_ind->d.tx_status, ind_msg, sizeof(*ind_msg));
+
+		spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+		list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+		spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+		queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
+	}
+}
+
+static void dfc_svc_init(struct work_struct *work)
+{
+	int rc = 0;
+	struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
+						 svc_arrive);
+	struct qmi_info *qmi;
+
+	if (data->restart_state == 1)
+		return;
+
+	rc = dfc_init_service(data);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
+	}
+
+	if (data->restart_state == 1)
+		return;
+	while (!rtnl_trylock()) {
+		if (!data->restart_state)
+			cond_resched();
+		else
+			return;
+	}
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
+
+	qmi->dfc_pending[data->index] = NULL;
+	qmi->dfc_clients[data->index] = (void *)data;
+	trace_dfc_client_state_up(data->index,
+				  data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
+
+	rtnl_unlock();
+
+	pr_info("Connection established with the DFC Service\n");
+}
+
+static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
+						 handle);
+
+	data->ssctl.sq_family = AF_QIPCRTR;
+	data->ssctl.sq_node = svc->node;
+	data->ssctl.sq_port = svc->port;
+
+	queue_work(data->dfc_wq, &data->svc_arrive);
+
+	return 0;
+}
+
+static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
+						 handle);
+
+	if (!data)
+		pr_debug("%s() data is null\n", __func__);
+}
+
+static struct qmi_ops server_ops = {
+	.new_server = dfc_svc_arrive,
+	.del_server = dfc_svc_exit,
+};
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
+		.ei = dfc_flow_status_ind_v01_ei,
+		.decoded_size = sizeof(struct dfc_flow_status_ind_msg_v01),
+		.fn = dfc_clnt_ind_cb,
+	},
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_DFC_TX_LINK_STATUS_IND_V01,
+		.ei = dfc_tx_link_status_ind_v01_ei,
+		.decoded_size = sizeof(struct dfc_tx_link_status_ind_msg_v01),
+		.fn = dfc_tx_link_status_ind_cb,
+	},
+	{},
+};
+
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *data;
+	int rc = -ENOMEM;
+
+	if (!port || !qmi)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->rmnet_port = port;
+	data->index = index;
+	data->restart_state = 0;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+
+	INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
+	INIT_LIST_HEAD(&data->qmi_ind_q);
+	spin_lock_init(&data->qmi_ind_lock);
+
+	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
+	if (!data->dfc_wq) {
+		pr_err("%s Could not create workqueue\n", __func__);
+		goto err0;
+	}
+
+	INIT_WORK(&data->svc_arrive, dfc_svc_init);
+	rc = qmi_handle_init(&data->handle,
+			     QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
+			     &server_ops, qmi_indication_handler);
+	if (rc < 0) {
+		pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
+		goto err1;
+	}
+
+	rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
+			    DFC_SERVICE_VERS_V01,
+			    psvc->instance);
+	if (rc < 0) {
+		pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
+		goto err2;
+	}
+
+	qmi->dfc_pending[index] = (void *)data;
+
+	return 0;
+
+err2:
+	qmi_handle_release(&data->handle);
+err1:
+	destroy_workqueue(data->dfc_wq);
+err0:
+	kfree(data);
+	return rc;
+}
+
+void dfc_qmi_client_exit(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+	if (!data) {
+		pr_err("%s() data is null\n", __func__);
+		return;
+	}
+
+	data->restart_state = 1;
+	trace_dfc_client_state_down(data->index, 0);
+	qmi_handle_release(&data->handle);
+
+	drain_workqueue(data->dfc_wq);
+	destroy_workqueue(data->dfc_wq);
+	kfree(data);
+}
+
+void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
+			 int ip_type, u32 mark, unsigned int len)
+{
+	struct rmnet_bearer_map *bearer = NULL;
+	struct rmnet_flow_map *itm;
+	u32 start_grant;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	/* Mark is flow_id */
+	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
+	if (likely(itm))
+		bearer = itm->bearer;
+
+	if (unlikely(!bearer))
+		goto out;
+
+	trace_dfc_flow_check(dev->name, bearer->bearer_id,
+			     len, mark, bearer->grant_size);
+
+	bearer->bytes_in_flight += len;
+
+	if (!bearer->grant_size)
+		goto out;
+
+	start_grant = bearer->grant_size;
+	if (len >= bearer->grant_size)
+		bearer->grant_size = 0;
+	else
+		bearer->grant_size -= len;
+
+	if (start_grant > bearer->grant_thresh &&
+	    bearer->grant_size <= bearer->grant_thresh) {
+		dfc_send_ack(dev, bearer->bearer_id,
+			     bearer->seq, qos->mux_id,
+			     DFC_ACK_TYPE_THRESHOLD);
+	}
+
+	if (!bearer->grant_size)
+		dfc_bearer_flow_ctl(dev, bearer, qos);
+
+out:
+	spin_unlock_bh(&qos->qos_lock);
+}
+
+void dfc_qmi_query_flow(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+	struct dfc_get_flow_status_resp_msg_v01 *resp;
+	struct dfc_svc_ind *svc_ind;
+	int rc;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp)
+		return;
+
+	svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
+	if (!svc_ind) {
+		kfree(resp);
+		return;
+	}
+
+	if (!data)
+		goto done;
+
+	rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
+
+	if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
+	    resp->flow_status_len > DFC_MAX_BEARERS_V01)
+		goto done;
+
+	svc_ind->d.dfc_info.flow_status_valid = resp->flow_status_valid;
+	svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
+	memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
+		sizeof(resp->flow_status[0]) * resp->flow_status_len);
+	dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info, true);
+
+done:
+	kfree(svc_ind);
+	kfree(resp);
+}

+ 1512 - 0
qcom/opensource/datarmnet/core/qmi_rmnet.c

@@ -0,0 +1,1512 @@
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/soc/qcom/qmi.h>
+
+#include "qmi_rmnet_i.h"
+#include "qmi_rmnet.h"
+#include "rmnet_qmi.h"
+#include "rmnet_module.h"
+#include "rmnet_hook.h"
+#include "dfc.h"
+#include <linux/rtnetlink.h>
+#include <uapi/linux/rtnetlink.h>
+#include <net/pkt_sched.h>
+#include <net/tcp.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#define NLMSG_FLOW_ACTIVATE 1
+#define NLMSG_FLOW_DEACTIVATE 2
+#define NLMSG_CLIENT_SETUP 4
+#define NLMSG_CLIENT_DELETE 5
+#define NLMSG_SCALE_FACTOR 6
+#define NLMSG_WQ_FREQUENCY 7
+#define NLMSG_CHANNEL_SWITCH 8
+
+#define FLAG_DFC_MASK 0x000F
+#define FLAG_POWERSAVE_MASK 0x0010
+#define FLAG_QMAP_MASK 0x0020
+#define FLAG_PS_EXT_MASK 0x0040
+
+#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
+
+#define DFC_SUPPORTED_MODE(m) \
+	((m) == DFC_MODE_SA)
+
+#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
+#define FLAG_TO_PS_EXT(f) ((f) & FLAG_PS_EXT_MASK)
+
+int dfc_mode;
+int dfc_qmap;
+int dfc_ps_ext;
+
+unsigned int rmnet_wq_frequency __read_mostly = 1000;
+
+#define PS_WORK_ACTIVE_BIT 0
+
+#define NO_DELAY (0x0000 * HZ)
+#define PS_INTERVAL_MS (rmnet_wq_frequency)
+#define PS_INTERVAL_KT (ms_to_ktime(PS_INTERVAL_MS))
+#define PS_INTERVAL_JF (msecs_to_jiffies(PS_INTERVAL_MS))
+
+#define WATCHDOG_EXPIRE_JF (msecs_to_jiffies(50))
+
+#ifdef CONFIG_QTI_QMI_DFC
+static unsigned int qmi_rmnet_scale_factor = 5;
+static LIST_HEAD(qos_cleanup_list);
+#endif
+
+static int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi);
+
+struct qmi_elem_info data_ep_id_type_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum data_ep_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct data_ep_id_type_v01,
+					   ep_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct data_ep_id_type_v01,
+					   iface_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	},
+};
+EXPORT_SYMBOL(data_ep_id_type_v01_ei);
+
+void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
+{
+	int i;
+
+	if (!qmi)
+		return NULL;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_clients[i])
+			return qmi->dfc_clients[i];
+	}
+
+	return NULL;
+}
+
+static inline int
+qmi_rmnet_has_client(struct qmi_info *qmi)
+{
+	if (qmi->wda_client)
+		return 1;
+
+	return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
+}
+
+static int
+qmi_rmnet_has_pending(struct qmi_info *qmi)
+{
+	int i;
+
+	if (qmi->wda_pending)
+		return 1;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_pending[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_QTI_QMI_DFC
+static void
+qmi_rmnet_clean_flow_list(struct qos_info *qos)
+{
+	struct rmnet_bearer_map *bearer, *br_tmp;
+	struct rmnet_flow_map *itm, *fl_tmp;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
+		list_del(&itm->list);
+		kfree(itm);
+	}
+
+	list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) {
+		list_del(&bearer->list);
+		kfree(bearer);
+	}
+
+	memset(qos->mq, 0, sizeof(qos->mq));
+}
+
+struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type)
+{
+	struct rmnet_flow_map *itm;
+
+	if (!qos)
+		return NULL;
+
+	list_for_each_entry(itm, &qos->flow_head, list) {
+		if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type))
+			return itm;
+	}
+	return NULL;
+}
+
+struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id)
+{
+	struct rmnet_bearer_map *itm;
+
+	if (!qos)
+		return NULL;
+
+	list_for_each_entry(itm, &qos->bearer_head, list) {
+		if (itm->bearer_id == bearer_id)
+			return itm;
+	}
+	return NULL;
+}
+
+static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
+				      struct rmnet_flow_map *new_map)
+{
+	itm->bearer_id = new_map->bearer_id;
+	itm->flow_id = new_map->flow_id;
+	itm->ip_type = new_map->ip_type;
+	itm->mq_idx = new_map->mq_idx;
+}
+
+int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
+{
+	struct netdev_queue *q;
+
+	if (unlikely(mq_idx >= dev->num_tx_queues))
+		return 0;
+
+	q = netdev_get_tx_queue(dev, mq_idx);
+	if (unlikely(!q))
+		return 0;
+
+	if (enable)
+		netif_tx_wake_queue(q);
+	else
+		netif_tx_stop_queue(q);
+
+	trace_dfc_qmi_tc(dev->name, mq_idx, enable);
+
+	return 0;
+}
+
+/**
+ * qmi_rmnet_watchdog_fn - watchdog timer func
+ */
+static void qmi_rmnet_watchdog_fn(struct timer_list *t)
+{
+	struct rmnet_bearer_map *bearer;
+
+	bearer = container_of(t, struct rmnet_bearer_map, watchdog);
+
+	trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 2);
+
+	spin_lock_bh(&bearer->qos->qos_lock);
+
+	if (bearer->watchdog_quit)
+		goto done;
+
+	/*
+	 * Possible stall, try to recover. Enable 80% query and jumpstart
+	 * the bearer if disabled.
+	 */
+	bearer->watchdog_expire_cnt++;
+	bearer->bytes_in_flight = 0;
+	if (!bearer->grant_size) {
+		bearer->grant_size = DEFAULT_CALL_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+		dfc_bearer_flow_ctl(bearer->qos->vnd_dev, bearer, bearer->qos);
+	} else {
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+	}
+
+done:
+	bearer->watchdog_started = false;
+	spin_unlock_bh(&bearer->qos->qos_lock);
+}
+
+/**
+ * qmi_rmnet_watchdog_add - add the bearer to watch
+ * Needs to be called with qos_lock
+ */
+void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer)
+{
+	bearer->watchdog_quit = false;
+
+	if (bearer->watchdog_started)
+		return;
+
+	bearer->watchdog_started = true;
+	mod_timer(&bearer->watchdog, jiffies + WATCHDOG_EXPIRE_JF);
+
+	trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 1);
+}
+
+/**
+ * qmi_rmnet_watchdog_remove - remove the bearer from watch
+ * Needs to be called with qos_lock
+ */
+void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
+{
+	bearer->watchdog_quit = true;
+
+	if (!bearer->watchdog_started)
+		return;
+
+	del_timer(&bearer->watchdog);
+	bearer->watchdog_started = false;
+
+	trace_dfc_watchdog(bearer->qos->mux_id, bearer->bearer_id, 0);
+}
+
+/**
+ * qmi_rmnet_bearer_clean - clean the removed bearer
+ * Needs to be called with rtn_lock but not qos_lock
+ */
+static void qmi_rmnet_bearer_clean(struct qos_info *qos)
+{
+	if (qos->removed_bearer) {
+		qos->removed_bearer->watchdog_quit = true;
+		del_timer_sync(&qos->removed_bearer->watchdog);
+		qos->removed_bearer->ch_switch.timer_quit = true;
+		del_timer_sync(&qos->removed_bearer->ch_switch.guard_timer);
+		kfree(qos->removed_bearer);
+		qos->removed_bearer = NULL;
+	}
+}
+
+static struct rmnet_bearer_map *__qmi_rmnet_bearer_get(
+				struct qos_info *qos_info, u8 bearer_id)
+{
+	struct rmnet_bearer_map *bearer;
+
+	bearer = qmi_rmnet_get_bearer_map(qos_info, bearer_id);
+	if (bearer) {
+		bearer->flow_ref++;
+	} else {
+		bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
+		if (!bearer)
+			return NULL;
+
+		bearer->bearer_id = bearer_id;
+		bearer->flow_ref = 1;
+		bearer->grant_size = DEFAULT_CALL_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(bearer->grant_size);
+		bearer->mq_idx = INVALID_MQ;
+		bearer->ack_mq_idx = INVALID_MQ;
+		bearer->qos = qos_info;
+		timer_setup(&bearer->watchdog, qmi_rmnet_watchdog_fn, 0);
+		timer_setup(&bearer->ch_switch.guard_timer,
+			    rmnet_ll_guard_fn, 0);
+		list_add(&bearer->list, &qos_info->bearer_head);
+	}
+
+	return bearer;
+}
+
+static void __qmi_rmnet_bearer_put(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_bearer_map *bearer,
+				   bool reset)
+{
+	struct mq_map *mq;
+	int i, j;
+
+	if (bearer && --bearer->flow_ref == 0) {
+		for (i = 0; i < MAX_MQ_NUM; i++) {
+			mq = &qos_info->mq[i];
+			if (mq->bearer != bearer)
+				continue;
+
+			mq->bearer = NULL;
+			mq->is_ll_ch = false;
+			mq->drop_on_remove = reset;
+			smp_mb();
+
+			qmi_rmnet_flow_control(dev, i, 1);
+			if (dfc_mode == DFC_MODE_SA) {
+				j = i + ACK_MQ_OFFSET;
+				qmi_rmnet_flow_control(dev, j, 1);
+			}
+		}
+
+		/* Remove from bearer map */
+		list_del(&bearer->list);
+		qos_info->removed_bearer = bearer;
+	}
+}
+
+static void __qmi_rmnet_update_mq(struct net_device *dev,
+				  struct qos_info *qos_info,
+				  struct rmnet_bearer_map *bearer,
+				  struct rmnet_flow_map *itm)
+{
+	struct mq_map *mq;
+
+	/* In SA mode default mq is not associated with any bearer */
+	if (dfc_mode == DFC_MODE_SA && itm->mq_idx == DEFAULT_MQ_NUM)
+		return;
+
+	mq = &qos_info->mq[itm->mq_idx];
+	if (!mq->bearer) {
+		mq->bearer = bearer;
+		mq->is_ll_ch = bearer->ch_switch.current_ch;
+		mq->drop_on_remove = false;
+		smp_mb();
+
+		if (dfc_mode == DFC_MODE_SA) {
+			bearer->mq_idx = itm->mq_idx;
+			bearer->ack_mq_idx = itm->mq_idx + ACK_MQ_OFFSET;
+		} else {
+			bearer->mq_idx = itm->mq_idx;
+		}
+
+		/* Always enable flow for the newly associated bearer */
+		if (!bearer->grant_size) {
+			bearer->grant_size = DEFAULT_GRANT;
+			bearer->grant_thresh =
+				qmi_rmnet_grant_per(DEFAULT_GRANT);
+		}
+		qmi_rmnet_flow_control(dev, itm->mq_idx, 1);
+		if (dfc_mode == DFC_MODE_SA)
+			qmi_rmnet_flow_control(dev, bearer->ack_mq_idx, 1);
+	}
+}
+
+static int __qmi_rmnet_rebind_flow(struct net_device *dev,
+				   struct qos_info *qos_info,
+				   struct rmnet_flow_map *itm,
+				   struct rmnet_flow_map *new_map)
+{
+	struct rmnet_bearer_map *bearer;
+
+	__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, false);
+
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map->bearer_id);
+	if (!bearer)
+		return -ENOMEM;
+
+	qmi_rmnet_update_flow_map(itm, new_map);
+	itm->bearer = bearer;
+
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
+
+	return 0;
+}
+
+static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
+			      struct qmi_info *qmi)
+{
+	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
+	struct rmnet_flow_map new_map, *itm;
+	struct rmnet_bearer_map *bearer;
+	struct tcmsg tmp_tcm;
+	int rc = 0;
+
+	if (!qos_info || !tcm || tcm->tcm_handle >= MAX_MQ_NUM)
+		return -EINVAL;
+
+	ASSERT_RTNL();
+
+	/* flow activate
+	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
+	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - mq_idx
+	 */
+
+	new_map.bearer_id = tcm->tcm__pad1;
+	new_map.flow_id = tcm->tcm_parent;
+	new_map.ip_type = tcm->tcm_ifindex;
+	new_map.mq_idx = tcm->tcm_handle;
+	trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
+			    new_map.ip_type, new_map.mq_idx, 1);
+
+again:
+	spin_lock_bh(&qos_info->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
+				     new_map.ip_type);
+	if (itm) {
+		if (itm->bearer_id != new_map.bearer_id) {
+			rc = __qmi_rmnet_rebind_flow(
+				dev, qos_info, itm, &new_map);
+			goto done;
+		} else if (itm->mq_idx != new_map.mq_idx) {
+			tmp_tcm.tcm__pad1 = itm->bearer_id;
+			tmp_tcm.tcm_parent = itm->flow_id;
+			tmp_tcm.tcm_ifindex = itm->ip_type;
+			tmp_tcm.tcm_handle = itm->mq_idx;
+			spin_unlock_bh(&qos_info->qos_lock);
+			qmi_rmnet_del_flow(dev, &tmp_tcm, qmi);
+			goto again;
+		} else {
+			goto done;
+		}
+	}
+
+	/* Create flow map */
+	itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
+	if (!itm) {
+		spin_unlock_bh(&qos_info->qos_lock);
+		return -ENOMEM;
+	}
+
+	qmi_rmnet_update_flow_map(itm, &new_map);
+	list_add(&itm->list, &qos_info->flow_head);
+
+	/* Create or update bearer map */
+	bearer = __qmi_rmnet_bearer_get(qos_info, new_map.bearer_id);
+	if (!bearer) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	itm->bearer = bearer;
+
+	__qmi_rmnet_update_mq(dev, qos_info, bearer, itm);
+
+done:
+	spin_unlock_bh(&qos_info->qos_lock);
+
+	qmi_rmnet_bearer_clean(qos_info);
+
+	return rc;
+}
+
+static int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
+	struct rmnet_flow_map new_map, *itm;
+
+	if (!qos_info)
+		return -EINVAL;
+
+	ASSERT_RTNL();
+
+	/* flow deactivate
+	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
+	 * tcm->tcm_ifindex - ip_type
+	 */
+
+	spin_lock_bh(&qos_info->qos_lock);
+
+	new_map.bearer_id = tcm->tcm__pad1;
+	new_map.flow_id = tcm->tcm_parent;
+	new_map.ip_type = tcm->tcm_ifindex;
+	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
+				     new_map.ip_type);
+	if (itm) {
+		trace_dfc_flow_info(dev->name, new_map.bearer_id,
+				    new_map.flow_id, new_map.ip_type,
+				    itm->mq_idx, 0);
+
+		__qmi_rmnet_bearer_put(dev, qos_info, itm->bearer, true);
+
+		/* Remove from flow map */
+		list_del(&itm->list);
+		kfree(itm);
+	}
+
+	if (list_empty(&qos_info->flow_head))
+		netif_tx_wake_all_queues(dev);
+
+	spin_unlock_bh(&qos_info->qos_lock);
+
+	qmi_rmnet_bearer_clean(qos_info);
+
+	return 0;
+}
+
+static void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+	int i;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->dfc_clients[i] && !dfc_qmap &&
+		    !qmi->dfc_client_exiting[i])
+			dfc_qmi_query_flow(qmi->dfc_clients[i]);
+	}
+}
+
+struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
+						    u8 bearer_id)
+{
+	struct rmnet_bearer_map *bearer;
+
+	bearer = __qmi_rmnet_bearer_get(qos_info, bearer_id);
+	if (bearer)
+		bearer->flow_ref--;
+
+	return bearer;
+}
+
+#else
+static inline void
+qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
+			  struct rmnet_flow_map *new_map)
+{
+}
+
+static inline int
+qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+}
+#endif
+
+static int
+qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
+{
+	int idx, err = 0;
+	struct svc_info svc;
+
+	ASSERT_RTNL();
+
+	/* client setup
+	 * tcm->tcm_handle - instance, tcm->tcm_info - ep_type,
+	 * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags
+	 */
+	idx = (tcm->tcm_handle == 0) ? 0 : 1;
+
+	if (!qmi) {
+		qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
+		if (!qmi)
+			return -ENOMEM;
+
+		rmnet_init_qmi_pt(port, qmi);
+	}
+
+	qmi->flag = tcm->tcm_ifindex;
+	qmi->ps_enabled = true;
+	qmi->ps_ext = FLAG_TO_PS_EXT(qmi->flag);
+	svc.instance = tcm->tcm_handle;
+	svc.ep_type = tcm->tcm_info;
+	svc.iface_id = tcm->tcm_parent;
+
+	if (DFC_SUPPORTED_MODE(dfc_mode) &&
+	    !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
+		if (dfc_qmap)
+			err = dfc_qmap_client_init(port, idx, &svc, qmi);
+		else
+			err = dfc_qmi_client_init(port, idx, &svc, qmi);
+		qmi->dfc_client_exiting[idx] = false;
+	}
+
+	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
+	    (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
+		err = wda_qmi_client_init(port, &svc, qmi);
+	}
+
+	return err;
+}
+
+static int
+__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
+{
+	void *data = NULL;
+
+	ASSERT_RTNL();
+
+	if (qmi->dfc_clients[idx])
+		data = qmi->dfc_clients[idx];
+	else if (qmi->dfc_pending[idx])
+		data = qmi->dfc_pending[idx];
+
+	if (data) {
+		if (dfc_qmap)
+			dfc_qmap_client_exit(data);
+		else
+			dfc_qmi_client_exit(data);
+		qmi->dfc_clients[idx] = NULL;
+		qmi->dfc_pending[idx] = NULL;
+	}
+
+	if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
+		rmnet_reset_qmi_pt(port);
+		kfree(qmi);
+		return 0;
+	}
+
+	return 1;
+}
+
+static void
+qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
+{
+	int idx;
+	void *data = NULL;
+
+	/* client delete: tcm->tcm_handle - instance*/
+	idx = (tcm->tcm_handle == 0) ? 0 : 1;
+
+	ASSERT_RTNL();
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
+
+	if ((idx == 0) && data) {
+		wda_qmi_client_exit(data);
+		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
+	} else {
+		qmi->dfc_client_exiting[idx] = true;
+		qmi_rmnet_flush_ps_wq();
+	}
+
+	__qmi_rmnet_delete_client(port, qmi, idx);
+}
+
+int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
+			  int attr_len)
+{
+	struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
+	void *wda_data = NULL;
+	int rc = 0;
+
+	switch (tcm->tcm_family) {
+	case NLMSG_FLOW_ACTIVATE:
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
+		    !qmi_rmnet_has_dfc_client(qmi))
+			return rc;
+
+		qmi_rmnet_add_flow(dev, tcm, qmi);
+		break;
+	case NLMSG_FLOW_DEACTIVATE:
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
+			return rc;
+
+		qmi_rmnet_del_flow(dev, tcm, qmi);
+		break;
+	case NLMSG_CLIENT_SETUP:
+		dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
+		dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
+		dfc_ps_ext = FLAG_TO_PS_EXT(tcm->tcm_ifindex);
+
+		if (!DFC_SUPPORTED_MODE(dfc_mode) &&
+		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
+			return rc;
+
+		if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
+			/* retrieve qmi again as it could have been changed */
+			qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+			if (qmi &&
+			    !qmi_rmnet_has_client(qmi) &&
+			    !qmi_rmnet_has_pending(qmi)) {
+				rmnet_reset_qmi_pt(port);
+				kfree(qmi);
+			}
+
+			return rc;
+		}
+
+		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+			qmi_rmnet_work_init(port);
+			rmnet_set_powersave_format(port);
+		}
+		rmnet_ll_wq_init();
+		break;
+	case NLMSG_CLIENT_DELETE:
+		if (!qmi)
+			return rc;
+		if (tcm->tcm_handle == 0) { /* instance 0 */
+			rmnet_clear_powersave_format(port);
+			if (qmi->wda_client)
+				wda_data = qmi->wda_client;
+			else if (qmi->wda_pending)
+				wda_data = qmi->wda_pending;
+			wda_qmi_client_release(wda_data);
+			qmi_rmnet_work_exit(port);
+		}
+		qmi_rmnet_delete_client(port, qmi, tcm);
+		rmnet_ll_wq_exit();
+		break;
+	case NLMSG_SCALE_FACTOR:
+		if (!tcm->tcm_ifindex)
+			return rc;
+		qmi_rmnet_scale_factor = tcm->tcm_ifindex;
+		break;
+	case NLMSG_WQ_FREQUENCY:
+		if (tcm->tcm_ifindex >= 100)
+			rmnet_wq_frequency = tcm->tcm_ifindex;
+		else
+			rc = -EINVAL;
+		break;
+	case NLMSG_CHANNEL_SWITCH:
+		if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
+		    !qmi_rmnet_has_dfc_client(qmi))
+			return rc;
+
+		rc = rmnet_ll_switch(dev, tcm, attr_len);
+		break;
+	default:
+		pr_debug("%s(): No handler\n", __func__);
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qmi_rmnet_change_link);
+
+void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
+{
+	struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
+	int i;
+	void *data = NULL;
+
+	if (!qmi)
+		return;
+
+	ASSERT_RTNL();
+
+	if (qmi->wda_client)
+		data = qmi->wda_client;
+	else if (qmi->wda_pending)
+		data = qmi->wda_pending;
+
+	wda_qmi_client_release(data);
+	qmi_rmnet_work_exit(port);
+	rmnet_ll_wq_exit();
+
+	if (data) {
+		wda_qmi_client_exit(data);
+		qmi->wda_client = NULL;
+		qmi->wda_pending = NULL;
+	}
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (!__qmi_rmnet_delete_client(port, qmi, i))
+			return;
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
+
+void qmi_rmnet_enable_all_flows(struct net_device *dev)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	bool do_wake;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->seq = 0;
+		bearer->ack_req = 0;
+		bearer->bytes_in_flight = 0;
+		bearer->tcp_bidir = false;
+		bearer->rat_switch = false;
+
+		qmi_rmnet_watchdog_remove(bearer);
+
+		if (bearer->tx_off)
+			continue;
+
+		do_wake = !bearer->grant_size;
+		bearer->grant_size = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+
+		if (do_wake)
+			dfc_bearer_flow_ctl(dev, bearer, qos);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+}
+EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
+
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	bool ret = true;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return true;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		if (!bearer->grant_size) {
+			ret = false;
+			break;
+		}
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
+
+/**
+ * rmnet_prepare_ps_bearers - get disabled bearers and
+ * reset enabled bearers
+ */
+void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
+				  u8 *bearer_id)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	u8 current_num_bearers = 0;
+	u8 num_bearers_left = 0;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos || !num_bearers)
+		return;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	num_bearers_left = *num_bearers;
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		if (bearer->grant_size) {
+			bearer->seq = 0;
+			bearer->ack_req = 0;
+			bearer->bytes_in_flight = 0;
+			bearer->tcp_bidir = false;
+			bearer->rat_switch = false;
+			qmi_rmnet_watchdog_remove(bearer);
+			bearer->grant_size = DEFAULT_GRANT;
+			bearer->grant_thresh =
+				qmi_rmnet_grant_per(DEFAULT_GRANT);
+		} else if (num_bearers_left) {
+			if (bearer_id)
+				bearer_id[current_num_bearers] =
+					bearer->bearer_id;
+			current_num_bearers++;
+			num_bearers_left--;
+		} else {
+			pr_err("DFC: no bearer space\n");
+		}
+	}
+
+	*num_bearers = current_num_bearers;
+
+	spin_unlock_bh(&qos->qos_lock);
+}
+EXPORT_SYMBOL(qmi_rmnet_prepare_ps_bearers);
+
+#ifdef CONFIG_QTI_QMI_DFC
+bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
+			      bool *drop, bool *is_low_latency)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+	int txq = skb->queue_mapping;
+
+	if (txq > ACK_MQ_OFFSET)
+		txq -= ACK_MQ_OFFSET;
+
+	if (unlikely(!qos || txq >= MAX_MQ_NUM))
+		return false;
+
+	/* If the bearer is gone, packets may need to be dropped */
+	*drop = (txq != DEFAULT_MQ_NUM && !READ_ONCE(qos->mq[txq].bearer) &&
+		 READ_ONCE(qos->mq[txq].drop_on_remove));
+
+	*is_low_latency = READ_ONCE(qos->mq[txq].is_ll_ch);
+
+	return true;
+}
+EXPORT_SYMBOL(qmi_rmnet_get_flow_state);
+
+void qmi_rmnet_burst_fc_check(struct net_device *dev,
+			      int ip_type, u32 mark, unsigned int len)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+
+	if (!qos)
+		return;
+
+	dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
+}
+EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
+
+static bool _qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
+{
+	struct tcphdr *th;
+	int ip_hdr_len;
+	int ip_payload_len;
+
+	if (skb->protocol == htons(ETH_P_IP) &&
+	    ip_hdr(skb)->protocol == IPPROTO_TCP) {
+		ip_hdr_len = ip_hdr(skb)->ihl << 2;
+		ip_payload_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdr_len;
+	} else if (skb->protocol == htons(ETH_P_IPV6) &&
+		   ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) {
+		ip_hdr_len = sizeof(struct ipv6hdr);
+		ip_payload_len = ntohs(ipv6_hdr(skb)->payload_len);
+	} else {
+		return false;
+	}
+
+	th = (struct tcphdr *)(skb->data + ip_hdr_len);
+	/* no longer looking for ACK flag */
+	if (ip_payload_len == th->doff << 2)
+		return true;
+
+	return false;
+}
+
+static inline bool qmi_rmnet_is_tcp_ack(struct sk_buff *skb)
+{
+	/* Locally generated TCP acks */
+	if (skb_is_tcp_pure_ack(skb))
+		return true;
+
+	/* Forwarded */
+	if (unlikely(_qmi_rmnet_is_tcp_ack(skb)))
+		return true;
+
+	return false;
+}
+
+static int qmi_rmnet_get_queue_sa(struct qos_info *qos, struct sk_buff *skb)
+{
+	struct rmnet_flow_map *itm;
+	int ip_type;
+	int txq = DEFAULT_MQ_NUM;
+
+	/* Put NDP in default mq */
+	if (skb->protocol == htons(ETH_P_IPV6) &&
+	    ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6 &&
+	    icmp6_hdr(skb)->icmp6_type >= 133 &&
+	    icmp6_hdr(skb)->icmp6_type <= 137) {
+		return DEFAULT_MQ_NUM;
+	}
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, skb->mark, ip_type);
+	if (unlikely(!itm))
+		goto done;
+
+	/* Put the packet in the assigned mq except TCP ack */
+	if (likely(itm->bearer) && qmi_rmnet_is_tcp_ack(skb))
+		txq = itm->bearer->ack_mq_idx;
+	else
+		txq = itm->mq_idx;
+
+done:
+	spin_unlock_bh(&qos->qos_lock);
+	return txq;
+}
+
+int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+	int txq = 0, ip_type = AF_INET;
+	struct rmnet_flow_map *itm;
+	u32 mark = skb->mark;
+
+	if (!qos)
+		return 0;
+
+	if (likely(dfc_mode == DFC_MODE_SA))
+		return qmi_rmnet_get_queue_sa(qos, skb);
+
+	ip_type = (skb->protocol == htons(ETH_P_IPV6)) ? AF_INET6 : AF_INET;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
+	if (itm)
+		txq = itm->mq_idx;
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return txq;
+}
+EXPORT_SYMBOL(qmi_rmnet_get_queue);
+
+inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
+{
+	return grant / qmi_rmnet_scale_factor;
+}
+EXPORT_SYMBOL(qmi_rmnet_grant_per);
+
+void *qmi_rmnet_qos_init(struct net_device *real_dev,
+			 struct net_device *vnd_dev, u8 mux_id)
+{
+	struct qos_info *qos;
+
+	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+	if (!qos)
+		return NULL;
+
+	qos->mux_id = mux_id;
+	qos->real_dev = real_dev;
+	qos->vnd_dev = vnd_dev;
+	qos->tran_num = 0;
+	INIT_LIST_HEAD(&qos->flow_head);
+	INIT_LIST_HEAD(&qos->bearer_head);
+	spin_lock_init(&qos->qos_lock);
+
+	return qos;
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_init);
+
+void qmi_rmnet_qos_exit_pre(void *qos)
+{
+	struct qos_info *qosi = (struct qos_info *)qos;
+	struct rmnet_bearer_map *bearer;
+
+	if (!qos)
+		return;
+
+	list_for_each_entry(bearer, &qosi->bearer_head, list) {
+		bearer->watchdog_quit = true;
+		del_timer_sync(&bearer->watchdog);
+		bearer->ch_switch.timer_quit = true;
+		del_timer_sync(&bearer->ch_switch.guard_timer);
+	}
+
+	list_add(&qosi->list, &qos_cleanup_list);
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
+
+void qmi_rmnet_qos_exit_post(void)
+{
+	struct qos_info *qos, *tmp;
+
+	synchronize_rcu();
+	list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
+		list_del(&qos->list);
+		qmi_rmnet_clean_flow_list(qos);
+		kfree(qos);
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
+#endif
+
+#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
+static struct workqueue_struct  *rmnet_ps_wq;
+static struct rmnet_powersave_work *rmnet_work;
+static bool rmnet_work_quit;
+static bool rmnet_work_inited;
+static LIST_HEAD(ps_list);
+static u8 ps_bearer_id[32];
+
+struct rmnet_powersave_work {
+	struct delayed_work work;
+	void *port;
+	u64 old_rx_pkts;
+	u64 old_tx_pkts;
+};
+
+void qmi_rmnet_ps_on_notify(void *port)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	list_for_each_entry_rcu(tmp, &ps_list, list)
+		tmp->ps_on_handler(port);
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
+
+void qmi_rmnet_ps_off_notify(void *port)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	list_for_each_entry_rcu(tmp, &ps_list, list)
+		tmp->ps_off_handler(port);
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
+
+int qmi_rmnet_ps_ind_register(void *port,
+			      struct qmi_rmnet_ps_ind *ps_ind)
+{
+
+	if (!port || !ps_ind || !ps_ind->ps_on_handler ||
+	    !ps_ind->ps_off_handler)
+		return -EINVAL;
+
+	list_add_rcu(&ps_ind->list, &ps_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_ind_register);
+
+int qmi_rmnet_ps_ind_deregister(void *port,
+				struct qmi_rmnet_ps_ind *ps_ind)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	if (!port || !ps_ind)
+		return -EINVAL;
+
+	list_for_each_entry_rcu(tmp, &ps_list, list) {
+		if (tmp == ps_ind) {
+			list_del_rcu(&ps_ind->list);
+			goto done;
+		}
+	}
+done:
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
+
+int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
+				 u8 *bearer_id)
+{
+	int rc = -EINVAL;
+	struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+
+	if (!qmi || !qmi->wda_client)
+		return rc;
+
+	rc = wda_set_powersave_mode(qmi->wda_client, enable, num_bearers,
+				    bearer_id);
+	if (rc < 0) {
+		pr_err("%s() failed set powersave mode[%u], err=%d\n",
+			__func__, enable, rc);
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
+
+static void qmi_rmnet_work_restart(void *port)
+{
+	rcu_read_lock();
+	if (!rmnet_work_quit)
+		queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
+	rcu_read_unlock();
+}
+
+static void qmi_rmnet_check_stats(struct work_struct *work)
+{
+	struct rmnet_powersave_work *real_work;
+	struct qmi_info *qmi;
+	u64 rxd, txd;
+	u64 rx, tx;
+	bool dl_msg_active;
+
+	real_work = container_of(to_delayed_work(work),
+				 struct rmnet_powersave_work, work);
+
+	if (unlikely(!real_work || !real_work->port))
+		return;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
+	if (unlikely(!qmi))
+		return;
+
+	rmnet_get_packets(real_work->port, &rx, &tx);
+	rxd = rx - real_work->old_rx_pkts;
+	txd = tx - real_work->old_tx_pkts;
+	real_work->old_rx_pkts = rx;
+	real_work->old_tx_pkts = tx;
+
+	dl_msg_active = qmi->dl_msg_active;
+	qmi->dl_msg_active = false;
+
+	if (qmi->ps_enabled) {
+
+		/* Ready to accept grant */
+		qmi->ps_ignore_grant = false;
+
+		/* Register to get QMI DFC and DL marker */
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 0,
+						 0, NULL) < 0)
+			goto end;
+
+		qmi->ps_enabled = false;
+
+		/* Do a query when coming out of powersave */
+		qmi_rmnet_query_flows(qmi);
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_off_notify(real_work->port);
+
+		goto end;
+	}
+
+	if (!rxd && !txd) {
+		/* If no DL msg received and there is a flow disabled,
+		 * (likely in RLF), no need to enter powersave
+		 */
+		if (!dl_msg_active &&
+		    !rmnet_all_flows_enabled(real_work->port))
+			goto end;
+
+		/* Deregister to suppress QMI DFC and DL marker */
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 1,
+						 0, NULL) < 0)
+			goto end;
+
+		qmi->ps_enabled = true;
+
+		/* Ignore grant after going into powersave */
+		qmi->ps_ignore_grant = true;
+
+		/* Clear the bit before enabling flow so pending packets
+		 * can trigger the work again
+		 */
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+		rmnet_enable_all_flows(real_work->port);
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_on_notify(real_work->port);
+
+		return;
+	}
+end:
+	rcu_read_lock();
+	if (!rmnet_work_quit)
+		queue_delayed_work(rmnet_ps_wq, &real_work->work,
+				   PS_INTERVAL_JF);
+	rcu_read_unlock();
+}
+
+static void qmi_rmnet_check_stats_2(struct work_struct *work)
+{
+	struct rmnet_powersave_work *real_work;
+	struct qmi_info *qmi;
+	u64 rxd, txd;
+	u64 rx, tx;
+	u8 num_bearers;
+	int rc;
+
+	real_work = container_of(to_delayed_work(work),
+				 struct rmnet_powersave_work, work);
+
+	if (unlikely(!real_work->port))
+		return;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
+	if (unlikely(!qmi))
+		return;
+
+	rmnet_get_packets(real_work->port, &rx, &tx);
+	rxd = rx - real_work->old_rx_pkts;
+	txd = tx - real_work->old_tx_pkts;
+	real_work->old_rx_pkts = rx;
+	real_work->old_tx_pkts = tx;
+
+	if (qmi->ps_enabled) {
+
+		/* Ready to accept grant */
+		qmi->ps_ignore_grant = false;
+
+		/* Out of powersave */
+		if (dfc_qmap)
+			rc = dfc_qmap_set_powersave(0, 0, NULL);
+		else
+			rc = qmi_rmnet_set_powersave_mode(real_work->port, 0,
+							  0, NULL);
+		if (rc)
+			goto end;
+
+		qmi->ps_enabled = false;
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_off_notify(real_work->port);
+
+		goto end;
+	}
+
+	if (!rxd && !txd) {
+		qmi->ps_ignore_grant = true;
+		qmi->ps_enabled = true;
+		rmnet_module_hook_aps_data_inactive();
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+
+		smp_mb();
+
+		num_bearers = sizeof(ps_bearer_id);
+		memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
+		rmnet_prepare_ps_bearers(real_work->port, &num_bearers,
+					 ps_bearer_id);
+
+		/* Enter powersave */
+		if (dfc_qmap)
+			dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id);
+		else
+			qmi_rmnet_set_powersave_mode(real_work->port, 1,
+						     num_bearers, ps_bearer_id);
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_on_notify(real_work->port);
+
+		return;
+	}
+end:
+	rcu_read_lock();
+	if (!rmnet_work_quit)
+		queue_delayed_work(rmnet_ps_wq, &real_work->work,
+				   PS_INTERVAL_JF);
+	rcu_read_unlock();
+}
+
+static void qmi_rmnet_work_set_active(void *port, int status)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	if (status)
+		set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+	else
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+}
+
+void qmi_rmnet_work_init(void *port)
+{
+	if (rmnet_ps_wq)
+		return;
+
+	rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
+				      WQ_CPU_INTENSIVE, 1);
+
+	if (!rmnet_ps_wq)
+		return;
+
+	rmnet_work = kzalloc(sizeof(*rmnet_work), GFP_ATOMIC);
+	if (!rmnet_work) {
+		destroy_workqueue(rmnet_ps_wq);
+		rmnet_ps_wq = NULL;
+		return;
+	}
+
+	if (dfc_ps_ext)
+		INIT_DELAYED_WORK(&rmnet_work->work,
+				     qmi_rmnet_check_stats_2);
+	else
+		INIT_DELAYED_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
+
+	rmnet_work->port = port;
+	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
+			  &rmnet_work->old_tx_pkts);
+
+	rmnet_work_quit = false;
+	qmi_rmnet_work_set_active(rmnet_work->port, 0);
+	rmnet_work_inited = true;
+}
+EXPORT_SYMBOL(qmi_rmnet_work_init);
+
+void qmi_rmnet_work_maybe_restart(void *port, void *desc, struct sk_buff *skb)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi || !rmnet_work_inited))
+		return;
+
+	if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) {
+		qmi->ps_ignore_grant = false;
+		qmi_rmnet_work_restart(port);
+		if (desc || skb)
+			rmnet_module_hook_aps_data_active(
+				(struct rmnet_frag_descriptor *)desc, skb);
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
+
+void qmi_rmnet_work_exit(void *port)
+{
+	if (!rmnet_ps_wq || !rmnet_work)
+		return;
+
+	rmnet_work_quit = true;
+	synchronize_rcu();
+
+	rmnet_work_inited = false;
+	cancel_delayed_work_sync(&rmnet_work->work);
+	destroy_workqueue(rmnet_ps_wq);
+	qmi_rmnet_work_set_active(port, 0);
+	rmnet_ps_wq = NULL;
+	kfree(rmnet_work);
+	rmnet_work = NULL;
+}
+EXPORT_SYMBOL(qmi_rmnet_work_exit);
+
+void qmi_rmnet_set_dl_msg_active(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	qmi->dl_msg_active = true;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
+
+void qmi_rmnet_flush_ps_wq(void)
+{
+	if (rmnet_ps_wq)
+		flush_workqueue(rmnet_ps_wq);
+}
+
+bool qmi_rmnet_ignore_grant(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return false;
+
+	return qmi->ps_ignore_grant;
+}
+EXPORT_SYMBOL(qmi_rmnet_ignore_grant);
+#endif

+ 180 - 0
qcom/opensource/datarmnet/core/qmi_rmnet.h

@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_RMNET_H
+#define _QMI_RMNET_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#define CONFIG_QTI_QMI_RMNET 1
+#define CONFIG_QTI_QMI_DFC  1
+#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
+
+struct qmi_rmnet_ps_ind {
+	void (*ps_on_handler)(void *port);
+	void (*ps_off_handler)(void *port);
+	struct list_head list;
+};
+
+
+#ifdef CONFIG_QTI_QMI_RMNET
+void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
+int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
+			  int attr_len);
+void qmi_rmnet_enable_all_flows(struct net_device *dev);
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
+void qmi_rmnet_prepare_ps_bearers(struct net_device *dev, u8 *num_bearers,
+				  u8 *bearer_id);
+#else
+static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
+{
+}
+
+static inline int
+qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
+		      int attr_len)
+{
+	return 0;
+}
+
+static inline void
+qmi_rmnet_enable_all_flows(struct net_device *dev)
+{
+}
+
+static inline bool
+qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+	return true;
+}
+
+static inline void qmi_rmnet_prepare_ps_bearers(struct net_device *dev,
+						u8 *num_bearers, u8 *bearer_id)
+{
+	if (num_bearers)
+		*num_bearers = 0;
+}
+
+#endif
+
+#ifdef CONFIG_QTI_QMI_DFC
+void *qmi_rmnet_qos_init(struct net_device *real_dev,
+			 struct net_device *vnd_dev, u8 mux_id);
+void qmi_rmnet_qos_exit_pre(void *qos);
+void qmi_rmnet_qos_exit_post(void);
+bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
+			      bool *drop, bool *is_low_latency);
+void qmi_rmnet_burst_fc_check(struct net_device *dev,
+			      int ip_type, u32 mark, unsigned int len);
+int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
+#else
+static inline void *
+qmi_rmnet_qos_init(struct net_device *real_dev,
+		   struct net_device *vnd_dev, u8 mux_id)
+{
+	return NULL;
+}
+
+static inline void qmi_rmnet_qos_exit_pre(void *qos)
+{
+}
+
+static inline void qmi_rmnet_qos_exit_post(void)
+{
+}
+
+static inline bool qmi_rmnet_get_flow_state(struct net_device *dev,
+					    struct sk_buff *skb,
+					    bool *drop,
+					    bool *is_low_latency)
+{
+	return false;
+}
+
+static inline void
+qmi_rmnet_burst_fc_check(struct net_device *dev,
+			 int ip_type, u32 mark, unsigned int len)
+{
+}
+
+static inline int qmi_rmnet_get_queue(struct net_device *dev,
+				       struct sk_buff *skb)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
+int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable, u8 num_bearers,
+				 u8 *bearer_id);
+void qmi_rmnet_work_init(void *port);
+void qmi_rmnet_work_exit(void *port);
+void qmi_rmnet_work_maybe_restart(void *port, void *desc, struct sk_buff *skb);
+void qmi_rmnet_set_dl_msg_active(void *port);
+bool qmi_rmnet_ignore_grant(void *port);
+
+int qmi_rmnet_ps_ind_register(void *port,
+			      struct qmi_rmnet_ps_ind *ps_ind);
+int qmi_rmnet_ps_ind_deregister(void *port,
+				struct qmi_rmnet_ps_ind *ps_ind);
+void qmi_rmnet_ps_off_notify(void *port);
+void qmi_rmnet_ps_on_notify(void *port);
+
+#else
+static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable,
+					       u8 num_bearers, u8 *bearer_id)
+{
+	return 0;
+}
+static inline void qmi_rmnet_work_init(void *port)
+{
+}
+static inline void qmi_rmnet_work_exit(void *port)
+{
+}
+static inline void qmi_rmnet_work_maybe_restart(void *port, void *desc,
+						struct sk_buff *skb)
+{
+
+}
+static inline void qmi_rmnet_set_dl_msg_active(void *port)
+{
+}
+static inline bool qmi_rmnet_ignore_grant(void *port)
+{
+	return false;
+}
+
+static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
+				     struct qmi_rmnet_ps_ind *ps_ind)
+{
+	return 0;
+}
+static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
+				       struct qmi_rmnet_ps_ind *ps_ind)
+{
+	return 0;
+}
+
+static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
+{
+
+}
+
+static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
+{
+
+}
+#endif
+#endif /*_QMI_RMNET_H*/

+ 300 - 0
qcom/opensource/datarmnet/core/qmi_rmnet_i.h

@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_QMI_I_H
+#define _RMNET_QMI_I_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <uapi/linux/rtnetlink.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define MAX_MQ_NUM 16
+#define MAX_CLIENT_NUM 2
+#define MAX_FLOW_NUM 32
+#define DEFAULT_GRANT 1
+#define DEFAULT_CALL_GRANT 20480
+#define DFC_MAX_BEARERS_V01 16
+#define DEFAULT_MQ_NUM 0
+#define ACK_MQ_OFFSET (MAX_MQ_NUM - 1)
+#define INVALID_MQ 0xFF
+
+#define DFC_MODE_SA 4
+#define PS_MAX_BEARERS 32
+
+#define CONFIG_QTI_QMI_RMNET 1
+#define CONFIG_QTI_QMI_DFC  1
+#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
+
+extern int dfc_mode;
+extern int dfc_qmap;
+
+struct qos_info;
+
+enum {
+	RMNET_CH_DEFAULT,
+	RMNET_CH_LL,
+	RMNET_CH_MAX,
+	RMNET_CH_CTL = 0xFF
+};
+
+enum rmnet_ch_switch_state {
+	CH_SWITCH_NONE,
+	CH_SWITCH_STARTED,
+	CH_SWITCH_ACKED,
+	CH_SWITCH_FAILED_RETRY
+};
+
+struct rmnet_ch_switch {
+	u8 current_ch;
+	u8 switch_to_ch;
+	u8 retry_left;
+	u8 status_code;
+	bool auto_switched;
+	enum rmnet_ch_switch_state state;
+	__be32 switch_txid;
+	u32 flags;
+	bool timer_quit;
+	struct timer_list guard_timer;
+	u32 nl_pid;
+	u32 nl_seq;
+};
+
+struct rmnet_bearer_map {
+	struct list_head list;
+	u8 bearer_id;
+	int flow_ref;
+	u32 grant_size;
+	u32 grant_thresh;
+	u16 seq;
+	u8  ack_req;
+	u32 last_grant;
+	u16 last_seq;
+	u32 bytes_in_flight;
+	u32 last_adjusted_grant;
+	bool tcp_bidir;
+	bool rat_switch;
+	bool tx_off;
+	u32 ack_txid;
+	u32 mq_idx;
+	u32 ack_mq_idx;
+	struct qos_info *qos;
+	struct timer_list watchdog;
+	bool watchdog_started;
+	bool watchdog_quit;
+	u32 watchdog_expire_cnt;
+	struct rmnet_ch_switch ch_switch;
+};
+
+struct rmnet_flow_map {
+	struct list_head list;
+	u8 bearer_id;
+	u32 flow_id;
+	int ip_type;
+	u32 mq_idx;
+	struct rmnet_bearer_map *bearer;
+};
+
+struct svc_info {
+	u32 instance;
+	u32 ep_type;
+	u32 iface_id;
+};
+
+struct mq_map {
+	struct rmnet_bearer_map *bearer;
+	bool is_ll_ch;
+	bool drop_on_remove;
+};
+
+struct qos_info {
+	struct list_head list;
+	u8 mux_id;
+	struct net_device *real_dev;
+	struct net_device *vnd_dev;
+	struct list_head flow_head;
+	struct list_head bearer_head;
+	struct mq_map mq[MAX_MQ_NUM];
+	u32 tran_num;
+	spinlock_t qos_lock;
+	struct rmnet_bearer_map *removed_bearer;
+};
+
+struct qmi_info {
+	int flag;
+	void *wda_client;
+	void *wda_pending;
+	void *dfc_clients[MAX_CLIENT_NUM];
+	void *dfc_pending[MAX_CLIENT_NUM];
+	bool dfc_client_exiting[MAX_CLIENT_NUM];
+	unsigned long ps_work_active;
+	bool ps_enabled;
+	bool dl_msg_active;
+	bool ps_ignore_grant;
+	int ps_ext;
+};
+
+enum data_ep_type_enum_v01 {
+	DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN,
+	DATA_EP_TYPE_RESERVED_V01 = 0x00,
+	DATA_EP_TYPE_HSIC_V01 = 0x01,
+	DATA_EP_TYPE_HSUSB_V01 = 0x02,
+	DATA_EP_TYPE_PCIE_V01 = 0x03,
+	DATA_EP_TYPE_EMBEDDED_V01 = 0x04,
+	DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX
+};
+
+struct data_ep_id_type_v01 {
+
+	enum data_ep_type_enum_v01 ep_type;
+	u32 iface_id;
+};
+
+extern struct qmi_elem_info data_ep_id_type_v01_ei[];
+
+void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
+
+#ifdef CONFIG_QTI_QMI_DFC
+struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos_info,
+		       u32 flow_id, int ip_type);
+
+struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
+
+unsigned int qmi_rmnet_grant_per(unsigned int grant);
+
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+			struct qmi_info *qmi);
+
+void dfc_qmi_client_exit(void *dfc_data);
+
+void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
+			 int ip_type, u32 mark, unsigned int len);
+
+int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable);
+
+void dfc_qmi_query_flow(void *dfc_data);
+
+int dfc_bearer_flow_ctl(struct net_device *dev,
+			struct rmnet_bearer_map *bearer,
+			struct qos_info *qos);
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+			 struct qmi_info *qmi);
+
+void dfc_qmap_client_exit(void *dfc_data);
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
+
+struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
+						    u8 bearer_id);
+
+void qmi_rmnet_watchdog_add(struct rmnet_bearer_map *bearer);
+
+void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer);
+
+int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen);
+void rmnet_ll_guard_fn(struct timer_list *t);
+void rmnet_ll_wq_init(void);
+void rmnet_ll_wq_exit(void);
+#else
+static inline struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos_info,
+		       uint32_t flow_id, int ip_type)
+{
+	return NULL;
+}
+
+static inline struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
+{
+	return NULL;
+}
+
+static inline int
+dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+		    struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void dfc_qmi_client_exit(void *dfc_data)
+{
+}
+
+static inline int
+dfc_bearer_flow_ctl(struct net_device *dev,
+		    struct rmnet_bearer_map *bearer,
+		    struct qos_info *qos)
+{
+	return 0;
+}
+
+static inline int
+dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+		     struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void dfc_qmap_client_exit(void *dfc_data)
+{
+}
+
+static inline void qmi_rmnet_watchdog_remove(struct rmnet_bearer_map *bearer)
+{
+}
+
+static int rmnet_ll_switch(struct net_device *dev,
+			   struct tcmsg *tcm, int attrlen)
+{
+	return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
+void wda_qmi_client_exit(void *wda_data);
+int wda_set_powersave_mode(void *wda_data, u8 enable, u8 num_bearers,
+			   u8 *bearer_id);
+void qmi_rmnet_flush_ps_wq(void);
+void wda_qmi_client_release(void *wda_data);
+int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id);
+#else
+static inline int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void wda_qmi_client_exit(void *wda_data)
+{
+}
+
+static inline int wda_set_powersave_mode(void *wda_data, u8 enable,
+					 u8 num_bearers, u8 *bearer_id)
+{
+	return -EINVAL;
+}
+static inline void qmi_rmnet_flush_ps_wq(void)
+{
+}
+static inline void wda_qmi_client_release(void *wda_data)
+{
+}
+#endif
+#endif /*_RMNET_QMI_I_H*/

+ 891 - 0
qcom/opensource/datarmnet/core/rmnet_config.c

@@ -0,0 +1,891 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET configuration engine
+ *
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_vnd.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+#include "rmnet_descriptor.h"
+#include "rmnet_ll.h"
+#include "rmnet_genl.h"
+#include "rmnet_qmi.h"
+#include "qmi_rmnet.h"
+#define CONFIG_QTI_QMI_RMNET 1
+#define CONFIG_QTI_QMI_DFC  1
+#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
+
+#define QMAP_SHS_MASK 0xFF
+#define QMAP_SHS_PKT_LIMIT 200
+
+/* Locking scheme -
+ * The shared resource which needs to be protected is realdev->rx_handler_data.
+ * For the writer path, this is using rtnl_lock(). The writer paths are
+ * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
+ * paths are already called with rtnl_lock() acquired in. There is also an
+ * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
+ * dereference here, we will need to use rtnl_dereference(). Dev list writing
+ * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
+ * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
+ * path. We only need rcu_read_lock() for these scenarios. In these cases,
+ * the rcu_read_lock() is held in __dev_queue_xmit() and
+ * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
+ * to get the relevant information. For dev list reading, we again acquire
+ * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
+ * We also use unregister_netdevice_many() to free all rmnet devices in
+ * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
+ * same context.
+ */
+
+/* Local Definitions and Declarations */
+
+enum {
+	IFLA_RMNET_DFC_QOS = __IFLA_RMNET_MAX,
+	IFLA_RMNET_UL_AGG_PARAMS,
+	IFLA_RMNET_UL_AGG_STATE_ID,
+	__IFLA_RMNET_EXT_MAX,
+};
+
+static const struct nla_policy rmnet_policy[__IFLA_RMNET_EXT_MAX] = {
+	[IFLA_RMNET_MUX_ID] = {
+		.type = NLA_U16
+	},
+	[IFLA_RMNET_FLAGS] = {
+		.len = sizeof(struct ifla_rmnet_flags)
+	},
+	[IFLA_RMNET_DFC_QOS] = {
+		.len = sizeof(struct tcmsg)
+	},
+	[IFLA_RMNET_UL_AGG_PARAMS] = {
+		.len = sizeof(struct rmnet_egress_agg_params)
+	},
+	[IFLA_RMNET_UL_AGG_STATE_ID] = {
+		.type = NLA_U8
+	},
+};
+
+int rmnet_is_real_dev_registered(const struct net_device *real_dev)
+{
+	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
+}
+EXPORT_SYMBOL(rmnet_is_real_dev_registered);
+
+/* Needs rtnl lock */
+static struct rmnet_port*
+rmnet_get_port_rtnl(const struct net_device *real_dev)
+{
+	return rtnl_dereference(real_dev->rx_handler_data);
+}
+
+static int rmnet_unregister_real_device(struct net_device *real_dev,
+					struct rmnet_port *port)
+{
+	if (port->nr_rmnet_devs)
+		return -EINVAL;
+
+	netdev_rx_handler_unregister(real_dev);
+
+	rmnet_map_cmd_exit(port);
+	rmnet_map_tx_aggregate_exit(port);
+
+	rmnet_descriptor_deinit(port);
+
+	kfree(port);
+
+	/* release reference on real_dev */
+	dev_put(real_dev);
+
+	netdev_dbg(real_dev, "Removed from rmnet\n");
+	return 0;
+}
+
+static int rmnet_register_real_device(struct net_device *real_dev)
+{
+	struct rmnet_port *port;
+	int rc, entry;
+
+	ASSERT_RTNL();
+
+	if (rmnet_is_real_dev_registered(real_dev))
+		return 0;
+
+	port = kzalloc(sizeof(*port), GFP_ATOMIC);
+	if (!port)
+		return -ENOMEM;
+
+	port->dev = real_dev;
+	port->phy_shs_cfg.config = RMNET_SHS_NO_DLMKR |	RMNET_SHS_NO_PSH |
+				   RMNET_SHS_STMP_ALL;
+	port->phy_shs_cfg.map_mask = QMAP_SHS_MASK;
+	port->phy_shs_cfg.max_pkts = QMAP_SHS_PKT_LIMIT;
+
+
+	rmnet_map_tx_aggregate_init(port);
+	rmnet_map_cmd_init(port);
+
+
+	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
+		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+
+	rc = rmnet_descriptor_init(port);
+	if (rc) {
+		goto err;
+	}
+
+	rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
+	if (rc) {
+		rc = -EBUSY;
+		goto err;
+	}
+	/* hold on to real dev for MAP data */
+	dev_hold(real_dev);
+
+	netdev_dbg(real_dev, "registered with rmnet\n");
+	return 0;
+err:
+	rmnet_descriptor_deinit(port);
+	kfree(port);
+	return rc;
+}
+
+static void rmnet_unregister_bridge(struct net_device *dev,
+				    struct rmnet_port *port)
+{
+	struct rmnet_port *bridge_port;
+	struct net_device *bridge_dev;
+
+	if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
+		return;
+
+	/* bridge slave handling */
+	if (!port->nr_rmnet_devs) {
+		bridge_dev = port->bridge_ep;
+
+		bridge_port = rmnet_get_port_rtnl(bridge_dev);
+		bridge_port->bridge_ep = NULL;
+		bridge_port->rmnet_mode = RMNET_EPMODE_VND;
+	} else {
+		bridge_dev = port->bridge_ep;
+
+		bridge_port = rmnet_get_port_rtnl(bridge_dev);
+		rmnet_unregister_real_device(bridge_dev, bridge_port);
+	}
+}
+
+static int rmnet_newlink(struct net *src_net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[],
+			 struct netlink_ext_ack *extack)
+{
+	struct net_device *real_dev;
+	int mode = RMNET_EPMODE_VND;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	u32 data_format;
+	int err = 0;
+	u16 mux_id;
+
+	real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+	if (!real_dev || !dev)
+		return -ENODEV;
+
+	if (!data[IFLA_RMNET_MUX_ID])
+		return -EINVAL;
+
+	ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
+	if (!ep)
+		return -ENOMEM;
+
+	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+
+	err = rmnet_register_real_device(real_dev);
+	if (err)
+		goto err0;
+
+	port = rmnet_get_port_rtnl(real_dev);
+	err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep);
+	if (err)
+		goto err1;
+
+	port->rmnet_mode = mode;
+
+	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+	if (data[IFLA_RMNET_FLAGS]) {
+		struct ifla_rmnet_flags *flags;
+
+		flags = nla_data(data[IFLA_RMNET_FLAGS]);
+		data_format = flags->flags & flags->mask;
+		netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+		if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+			data_format |= RMNET_INGRESS_FORMAT_PS;
+		port->data_format = data_format;
+	}
+
+	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
+		struct rmnet_egress_agg_params *agg_params;
+		u8 state = RMNET_DEFAULT_AGG_STATE;
+
+		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
+		if (data[IFLA_RMNET_UL_AGG_STATE_ID])
+			state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
+
+		rmnet_map_update_ul_agg_config(&port->agg_state[state],
+					       agg_params->agg_size,
+					       agg_params->agg_count,
+					       agg_params->agg_features,
+					       agg_params->agg_time);
+	}
+
+	return 0;
+
+err1:
+	rmnet_unregister_real_device(real_dev, port);
+err0:
+	kfree(ep);
+	return err;
+}
+
+static void rmnet_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	u8 mux_id;
+
+	real_dev = priv->real_dev;
+
+	if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
+		return;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	mux_id = rmnet_vnd_get_mux(dev);
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (ep) {
+		hlist_del_init_rcu(&ep->hlnode);
+		synchronize_rcu();
+		rmnet_unregister_bridge(dev, port);
+		rmnet_vnd_dellink(mux_id, port, ep);
+		kfree(ep);
+	}
+
+	if (!port->nr_rmnet_devs)
+		qmi_rmnet_qmi_exit(port->qmi_info, port);
+
+	unregister_netdevice(dev);
+
+	qmi_rmnet_qos_exit_post();
+
+	rmnet_unregister_real_device(real_dev, port);
+}
+
+static void rmnet_force_unassociate_device(struct net_device *dev)
+{
+	struct net_device *real_dev = dev;
+	struct hlist_node *tmp_ep;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	unsigned long bkt_ep;
+	LIST_HEAD(list);
+	HLIST_HEAD(cleanup_list);
+
+	if (!rmnet_is_real_dev_registered(real_dev))
+		return;
+
+	ASSERT_RTNL();
+
+	port = rmnet_get_port_rtnl(dev);
+	qmi_rmnet_qmi_exit(port->qmi_info, port);
+
+	rmnet_unregister_bridge(dev, port);
+
+	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+		unregister_netdevice_queue(ep->egress_dev, &list);
+		rmnet_vnd_dellink(ep->mux_id, port, ep);
+
+		hlist_del_init_rcu(&ep->hlnode);
+		hlist_add_head(&ep->hlnode, &cleanup_list);
+	}
+
+	synchronize_rcu();
+
+	hlist_for_each_entry_safe(ep, tmp_ep, &cleanup_list, hlnode) {
+		hlist_del(&ep->hlnode);
+		kfree(ep);
+	}
+
+	/* Unregistering devices in context before freeing port.
+	 * If this API becomes non-context their order should switch.
+	 */
+	unregister_netdevice_many(&list);
+
+	qmi_rmnet_qos_exit_post();
+
+	rmnet_unregister_real_device(real_dev, port);
+}
+
+static int rmnet_config_notify_cb(struct notifier_block *nb,
+				  unsigned long event, void *data)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(data);
+	int rc;
+
+	if (!dev)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_REGISTER:
+		if (dev->rtnl_link_ops == &rmnet_link_ops) {
+			rc = netdev_rx_handler_register(dev,
+							rmnet_rx_priv_handler,
+							NULL);
+
+			if (rc)
+				return NOTIFY_BAD;
+		}
+
+		break;
+	case NETDEV_UNREGISTER:
+		if (dev->rtnl_link_ops == &rmnet_link_ops) {
+			netdev_rx_handler_unregister(dev);
+			break;
+		}
+
+		netdev_dbg(dev, "Kernel unregister\n");
+		rmnet_force_unassociate_device(dev);
+		break;
+	case NETDEV_DOWN:
+		rmnet_vnd_reset_mac_addr(dev);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block rmnet_dev_notifier __read_mostly = {
+	.notifier_call = rmnet_config_notify_cb,
+};
+
+static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+			       struct netlink_ext_ack *extack)
+{
+	struct rmnet_egress_agg_params *agg_params;
+	u16 mux_id;
+
+	if (!data)
+		return -EINVAL;
+
+	if (data[IFLA_RMNET_MUX_ID]) {
+		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+		if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+			return -ERANGE;
+	}
+
+	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
+		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
+		if (agg_params->agg_time < 1000000)
+			return -EINVAL;
+
+		if (data[IFLA_RMNET_UL_AGG_STATE_ID]) {
+			u8 state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
+
+			if (state >= RMNET_MAX_AGG_STATE)
+				return -ERANGE;
+		}
+	}
+
+	return 0;
+}
+
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+			    struct nlattr *data[],
+			    struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	u16 mux_id;
+	u32 data_format;
+	int rc = 0;
+
+	real_dev = __dev_get_by_index(dev_net(dev),
+				      nla_get_u32(tb[IFLA_LINK]));
+
+	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+		return -ENODEV;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	if (data[IFLA_RMNET_MUX_ID]) {
+		mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+		ep = rmnet_get_endpoint(port, priv->mux_id);
+		if (!ep)
+			return -ENODEV;
+
+		hlist_del_init_rcu(&ep->hlnode);
+		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+		ep->mux_id = mux_id;
+		priv->mux_id = mux_id;
+	}
+
+	if (data[IFLA_RMNET_FLAGS]) {
+		struct ifla_rmnet_flags *flags;
+
+		flags = nla_data(data[IFLA_RMNET_FLAGS]);
+		data_format = flags->flags & flags->mask;
+		if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+			data_format |= RMNET_INGRESS_FORMAT_PS;
+		port->data_format = data_format;
+	}
+
+	if (data[IFLA_RMNET_DFC_QOS]) {
+		struct nlattr *qos = data[IFLA_RMNET_DFC_QOS];
+		struct tcmsg *tcm;
+
+		tcm = nla_data(qos);
+		rc = qmi_rmnet_change_link(dev, port, tcm, nla_len(qos));
+	}
+
+	if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
+		struct rmnet_egress_agg_params *agg_params;
+		u8 state = RMNET_DEFAULT_AGG_STATE;
+
+		agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
+		if (data[IFLA_RMNET_UL_AGG_STATE_ID])
+			state = nla_get_u8(data[IFLA_RMNET_UL_AGG_STATE_ID]);
+
+		rmnet_map_update_ul_agg_config(&port->agg_state[state],
+					       agg_params->agg_size,
+					       agg_params->agg_count,
+					       agg_params->agg_features,
+					       agg_params->agg_time);
+	}
+
+	return rc;
+}
+
+static size_t rmnet_get_size(const struct net_device *dev)
+{
+	return
+		/* IFLA_RMNET_MUX_ID */
+		nla_total_size(2) +
+		/* IFLA_RMNET_FLAGS */
+		nla_total_size(sizeof(struct ifla_rmnet_flags)) +
+		/* IFLA_RMNET_DFC_QOS */
+		nla_total_size(sizeof(struct tcmsg)) +
+		/* IFLA_RMNET_UL_AGG_PARAMS */
+		nla_total_size(sizeof(struct rmnet_egress_agg_params));
+}
+
+static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct ifla_rmnet_flags f;
+	struct rmnet_port *port = NULL;
+
+	real_dev = priv->real_dev;
+
+	if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
+		goto nla_put_failure;
+
+	if (rmnet_is_real_dev_registered(real_dev)) {
+		port = rmnet_get_port_rtnl(real_dev);
+		f.flags = port->data_format;
+	} else {
+		f.flags = 0;
+	}
+
+	f.mask  = ~0;
+
+	if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
+		goto nla_put_failure;
+
+	if (port) {
+		struct rmnet_aggregation_state *state;
+
+		/* Only report default for now. The entire message type
+		 * would need to change to get both states in there (nested
+		 * messages, etc), since they both have the same NLA type...
+		 */
+		state = &port->agg_state[RMNET_DEFAULT_AGG_STATE];
+		if (nla_put(skb, IFLA_RMNET_UL_AGG_PARAMS,
+			    sizeof(state->params),
+			    &state->params))
+			goto nla_put_failure;
+	}
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+	.kind		= "rmnet",
+	.maxtype	= __IFLA_RMNET_EXT_MAX - 1,
+	.priv_size	= sizeof(struct rmnet_priv),
+	.setup		= rmnet_vnd_setup,
+	.validate	= rmnet_rtnl_validate,
+	.newlink	= rmnet_newlink,
+	.dellink	= rmnet_dellink,
+	.get_size	= rmnet_get_size,
+	.changelink     = rmnet_changelink,
+	.policy		= rmnet_policy,
+	.fill_info	= rmnet_fill_info,
+};
+
+/* Needs either rcu_read_lock() or rtnl lock */
+struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
+{
+	if (rmnet_is_real_dev_registered(real_dev))
+		return rcu_dereference_rtnl(real_dev->rx_handler_data);
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_port);
+
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
+{
+	struct rmnet_endpoint *ep;
+
+	hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
+		if (ep->mux_id == mux_id)
+			return ep;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_endpoint);
+
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev,
+		     struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+	struct net_device *real_dev = priv->real_dev;
+	struct rmnet_port *port, *slave_port;
+	int err;
+
+	port = rmnet_get_port(real_dev);
+
+	/* If there is more than one rmnet dev attached, its probably being
+	 * used for muxing. Skip the briding in that case
+	 */
+	if (port->nr_rmnet_devs > 1)
+		return -EINVAL;
+
+	if (rmnet_is_real_dev_registered(slave_dev))
+		return -EBUSY;
+
+	err = rmnet_register_real_device(slave_dev);
+	if (err)
+		return -EBUSY;
+
+	slave_port = rmnet_get_port(slave_dev);
+	slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+	slave_port->bridge_ep = real_dev;
+
+	port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+	port->bridge_ep = slave_dev;
+
+	netdev_dbg(slave_dev, "registered with rmnet as slave\n");
+	return 0;
+}
+
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+	struct net_device *real_dev = priv->real_dev;
+	struct rmnet_port *port, *slave_port;
+
+	port = rmnet_get_port(real_dev);
+	port->rmnet_mode = RMNET_EPMODE_VND;
+	port->bridge_ep = NULL;
+
+	slave_port = rmnet_get_port(slave_dev);
+	rmnet_unregister_real_device(slave_dev, slave_port);
+
+	netdev_dbg(slave_dev, "removed from rmnet as slave\n");
+	return 0;
+}
+
+void *rmnet_get_qmi_pt(void *port)
+{
+	if (port)
+		return ((struct rmnet_port *)port)->qmi_info;
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_qmi_pt);
+
+void *rmnet_get_qos_pt(struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+
+	if (dev) {
+		priv = netdev_priv(dev);
+		return rcu_dereference(priv->qos_info);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_qos_pt);
+
+void *rmnet_get_rmnet_port(struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+
+	if (dev) {
+		priv = netdev_priv(dev);
+		return (void *)rmnet_get_port(priv->real_dev);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_rmnet_port);
+
+struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
+{
+	struct rmnet_endpoint *ep;
+
+	if (port) {
+		ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
+		if (ep)
+			return ep->egress_dev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_rmnet_dev);
+
+void rmnet_reset_qmi_pt(void *port)
+{
+	if (port)
+		((struct rmnet_port *)port)->qmi_info = NULL;
+}
+EXPORT_SYMBOL(rmnet_reset_qmi_pt);
+
+void rmnet_init_qmi_pt(void *port, void *qmi)
+{
+	if (port)
+		((struct rmnet_port *)port)->qmi_info = qmi;
+}
+EXPORT_SYMBOL(rmnet_init_qmi_pt);
+
+void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+{
+	struct net_device *dev;
+	struct rmnet_priv *priv;
+	struct rmnet_pcpu_stats *ps;
+	unsigned int cpu, start;
+
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+
+	if (!port || !tx || !rx)
+		return;
+
+	*tx = 0;
+	*rx = 0;
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep, bkt, ep,
+			  hlnode) {
+		dev = ep->egress_dev;
+		if (!dev)
+			continue;
+		priv = netdev_priv(dev);
+		for_each_possible_cpu(cpu) {
+			ps = per_cpu_ptr(priv->pcpu_stats, cpu);
+			do {
+				start = u64_stats_fetch_begin_irq(&ps->syncp);
+				*tx += ps->stats.tx_pkts;
+				*rx += ps->stats.rx_pkts;
+			} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
+		}
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(rmnet_get_packets);
+
+void  rmnet_set_powersave_format(void *port)
+{
+	if (!port)
+		return;
+	((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
+}
+EXPORT_SYMBOL(rmnet_set_powersave_format);
+
+void  rmnet_clear_powersave_format(void *port)
+{
+	if (!port)
+		return;
+	((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
+}
+EXPORT_SYMBOL(rmnet_clear_powersave_format);
+
+void rmnet_enable_all_flows(void *port)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+
+	if (unlikely(!port))
+		return;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		qmi_rmnet_enable_all_flows(ep->egress_dev);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(rmnet_enable_all_flows);
+
+bool rmnet_all_flows_enabled(void *port)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+	bool ret = true;
+
+	if (unlikely(!port))
+		return true;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
+			ret = false;
+			goto out;
+		}
+	}
+out:
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(rmnet_all_flows_enabled);
+
+void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+	u8 current_num_bearers = 0;
+	u8 number_bearers_left = 0;
+	u8 num_bearers_in_out;
+
+	if (unlikely(!port || !num_bearers))
+		return;
+
+	number_bearers_left = *num_bearers;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		num_bearers_in_out = number_bearers_left;
+		qmi_rmnet_prepare_ps_bearers(ep->egress_dev,
+					     &num_bearers_in_out,
+					     bearer_id ? bearer_id +
+						current_num_bearers : NULL);
+		current_num_bearers += num_bearers_in_out;
+		number_bearers_left -= num_bearers_in_out;
+	}
+	rcu_read_unlock();
+
+	*num_bearers = current_num_bearers;
+}
+EXPORT_SYMBOL(rmnet_prepare_ps_bearers);
+
+int rmnet_get_powersave_notif(void *port)
+{
+	if (!port)
+		return 0;
+	return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
+}
+EXPORT_SYMBOL(rmnet_get_powersave_notif);
+
+struct net_device *rmnet_get_real_dev(void *port)
+{
+	if (port)
+		return ((struct rmnet_port *)port)->dev;
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_real_dev);
+
+int rmnet_get_dlmarker_info(void *port)
+{
+	if (!port)
+		return 0;
+
+	return ((struct rmnet_port *)port)->data_format &
+		(RMNET_INGRESS_FORMAT_DL_MARKER_V1 |
+		RMNET_INGRESS_FORMAT_DL_MARKER_V2);
+}
+EXPORT_SYMBOL(rmnet_get_dlmarker_info);
+
+/* Startup/Shutdown */
+
+static int __init rmnet_init(void)
+{
+	int rc;
+
+	rc = register_netdevice_notifier(&rmnet_dev_notifier);
+	if (rc != 0)
+		return rc;
+
+	rc = rtnl_link_register(&rmnet_link_ops);
+	if (rc != 0) {
+		unregister_netdevice_notifier(&rmnet_dev_notifier);
+		return rc;
+	}
+
+	rc = rmnet_ll_init();
+	if (rc != 0) {
+		unregister_netdevice_notifier(&rmnet_dev_notifier);
+		rtnl_link_unregister(&rmnet_link_ops);
+		return rc;
+	}
+
+	rmnet_core_genl_init();
+
+	try_module_get(THIS_MODULE);
+	return rc;
+}
+
+static void __exit rmnet_exit(void)
+{
+	unregister_netdevice_notifier(&rmnet_dev_notifier);
+	rtnl_link_unregister(&rmnet_link_ops);
+	rmnet_ll_exit();
+	rmnet_core_genl_deinit();
+
+	module_put(THIS_MODULE);
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_LICENSE("GPL v2");

+ 257 - 0
qcom/opensource/datarmnet/core/rmnet_config.h

@@ -0,0 +1,257 @@
+/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <net/gro_cells.h>
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#define RMNET_MAX_LOGICAL_EP 255
+#define RMNET_MAX_VEID 16
+
+#define RMNET_SHS_STMP_ALL BIT(0)
+#define RMNET_SHS_NO_PSH BIT(1)
+#define RMNET_SHS_NO_DLMKR BIT(2)
+
+#define RMNET_LLM(prio) ((prio) == 0xDA001A) /* qmipriod */
+
+#define RMNET_APS_MAJOR 0x9B6D
+#define RMNET_APS_LLC_MASK 0x0100
+#define RMNET_APS_LLB_MASK 0x0200
+
+#define RMNET_APS_LLC(prio) \
+	(((prio) >> 16 == RMNET_APS_MAJOR) && ((prio) & RMNET_APS_LLC_MASK))
+
+#define RMNET_APS_LLB(prio) \
+        (((prio) >> 16 == RMNET_APS_MAJOR) && ((prio) & RMNET_APS_LLB_MASK))
+
+struct rmnet_shs_clnt_s {
+	u16 config;
+	u16 map_mask;
+	u16 max_pkts;
+	union {
+		struct rmnet_port *port;
+	} info;
+};
+
+struct rmnet_endpoint {
+	u8 mux_id;
+	struct net_device *egress_dev;
+	struct hlist_node hlnode;
+};
+
+struct rmnet_agg_stats {
+	u64 ul_agg_reuse;
+	u64 ul_agg_alloc;
+};
+
+struct rmnet_port_priv_stats {
+	u64 dl_hdr_last_qmap_vers;
+	u64 dl_hdr_last_ep_id;
+	u64 dl_hdr_last_trans_id;
+	u64 dl_hdr_last_seq;
+	u64 dl_hdr_last_bytes;
+	u64 dl_hdr_last_pkts;
+	u64 dl_hdr_last_flows;
+	u64 dl_hdr_count;
+	u64 dl_hdr_total_bytes;
+	u64 dl_hdr_total_pkts;
+	u64 dl_trl_last_seq;
+	u64 dl_trl_count;
+	struct rmnet_agg_stats agg;
+	u64 dl_chain_stat[7];
+	u64 dl_frag_stat_1;
+	u64 dl_frag_stat[5];
+	u64 pb_marker_count;
+	u64 pb_marker_seq;
+};
+
+struct rmnet_egress_agg_params {
+	u16 agg_size;
+	u8 agg_count;
+	u8 agg_features;
+	u32 agg_time;
+};
+
+enum {
+	RMNET_DEFAULT_AGG_STATE,
+	RMNET_LL_AGG_STATE,
+	RMNET_MAX_AGG_STATE,
+};
+
+struct rmnet_aggregation_state {
+	struct rmnet_egress_agg_params params;
+	struct timespec64 agg_time;
+	struct timespec64 agg_last;
+	struct hrtimer hrtimer;
+	struct work_struct agg_wq;
+	/* Protect aggregation related elements */
+	spinlock_t agg_lock;
+	struct sk_buff *agg_skb;
+	int (*send_agg_skb)(struct sk_buff *skb);
+	int agg_state;
+	u8 agg_count;
+	u8 agg_size_order;
+	struct list_head agg_list;
+	struct rmnet_agg_page *agg_head;
+	struct rmnet_agg_stats *stats;
+};
+
+
+struct rmnet_agg_page {
+	struct list_head list;
+	struct page *page;
+};
+
+
+/* One instance of this structure is instantiated for each real_dev associated
+ * with rmnet.
+ */
+struct rmnet_port {
+	struct net_device *dev;
+	u32 data_format;
+	u8 nr_rmnet_devs;
+	u8 rmnet_mode;
+	struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
+	struct net_device *bridge_ep;
+	void *rmnet_perf;
+
+	struct rmnet_aggregation_state agg_state[RMNET_MAX_AGG_STATE];
+
+	void *qmi_info;
+
+	/* dl marker elements */
+	struct list_head dl_list;
+	struct rmnet_port_priv_stats stats;
+	int dl_marker_flush;
+	/* Pending Byte Marker */
+	struct list_head pb_list;
+	/* Port Config for shs */
+	struct rmnet_shs_clnt_s shs_cfg;
+	struct rmnet_shs_clnt_s phy_shs_cfg;
+
+	/* Descriptor pool */
+	spinlock_t desc_pool_lock;
+	struct rmnet_frag_descriptor_pool *frag_desc_pool;
+};
+
+extern struct rtnl_link_ops rmnet_link_ops;
+
+struct rmnet_vnd_stats {
+	u64 rx_pkts;
+	u64 rx_bytes;
+	u64 tx_pkts;
+	u64 tx_bytes;
+	u32 tx_drops;
+};
+
+struct rmnet_pcpu_stats {
+	struct rmnet_vnd_stats stats;
+	struct u64_stats_sync syncp;
+};
+
+struct rmnet_coal_close_stats {
+	u64 non_coal;
+	u64 ip_miss;
+	u64 trans_miss;
+	u64 hw_nl;
+	u64 hw_pkt;
+	u64 hw_byte;
+	u64 hw_time;
+	u64 hw_evict;
+	u64 coal;
+};
+
+struct rmnet_coal_stats {
+	u64 coal_rx;
+	u64 coal_pkts;
+	u64 coal_hdr_nlo_err;
+	u64 coal_hdr_pkt_err;
+	u64 coal_csum_err;
+	u64 coal_reconstruct;
+	u64 coal_ip_invalid;
+	u64 coal_trans_invalid;
+	struct rmnet_coal_close_stats close;
+	u64 coal_veid[RMNET_MAX_VEID];
+	u64 coal_tcp;
+	u64 coal_tcp_bytes;
+	u64 coal_udp;
+	u64 coal_udp_bytes;
+};
+
+struct rmnet_priv_stats {
+	u64 csum_ok;
+	u64 csum_valid_unset;
+	u64 csum_validation_failed;
+	u64 csum_err_bad_buffer;
+	u64 csum_err_invalid_ip_version;
+	u64 csum_err_invalid_transport;
+	u64 csum_fragmented_pkt;
+	u64 csum_skipped;
+	u64 csum_sw;
+	u64 csum_hw;
+	struct rmnet_coal_stats coal;
+	u64 ul_prio;
+	u64 tso_pkts;
+	u64 tso_arriv_errs;
+	u64 tso_segment_success;
+	u64 tso_segment_fail;
+	u64 tso_segment_skip;
+	u64 ll_tso_segs;
+	u64 ll_tso_errs;
+	u64 aps_prio;
+};
+
+struct rmnet_priv {
+	u8 mux_id;
+	struct net_device *real_dev;
+	struct rmnet_pcpu_stats __percpu *pcpu_stats;
+	struct gro_cells gro_cells;
+	struct rmnet_priv_stats stats;
+	void __rcu *qos_info;
+	char aps_cb[16];
+};
+
+enum rmnet_dl_marker_prio {
+	RMNET_PERF,
+	RMNET_SHS,
+};
+
+enum rmnet_trace_func {
+	RMNET_MODULE,
+	NW_STACK_MODULE,
+};
+
+enum rmnet_trace_evt {
+	RMNET_DLVR_SKB,
+	RMNET_RCV_FROM_PND,
+	RMNET_TX_UL_PKT,
+	NW_STACK_DEV_Q_XMIT,
+	NW_STACK_NAPI_GRO_FLUSH,
+	NW_STACK_RX,
+	NW_STACK_TX,
+};
+
+int rmnet_is_real_dev_registered(const struct net_device *real_dev);
+struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev,
+		     struct netlink_ext_ack *extack);
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+		     struct net_device *slave_dev);
+#endif /* _RMNET_CONFIG_H_ */

+ 42 - 0
qcom/opensource/datarmnet/core/rmnet_ctl.h

@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * RMNET_CTL header
+ *
+ */
+
+#ifndef _RMNET_CTL_H_
+#define _RMNET_CTL_H_
+
+
+#include <linux/skbuff.h>
+
+enum rmnet_ctl_log_lvl {
+	RMNET_CTL_LOG_CRIT,
+	RMNET_CTL_LOG_ERR,
+	RMNET_CTL_LOG_INFO,
+	RMNET_CTL_LOG_DEBUG,
+};
+
+struct rmnet_ctl_client_hooks {
+	void (*ctl_dl_client_hook)(struct sk_buff *skb);
+};
+
+struct rmnet_ctl_client_if {
+	void *	(*reg)(struct rmnet_ctl_client_hooks *hook);
+	int	(*dereg)(void *handle);
+	int	(*send)(void *handle, struct sk_buff *skb);
+	void	(*log)(enum rmnet_ctl_log_lvl lvl, const char *msg, int rc,
+		       const void *data, unsigned int len);
+};
+
+#ifdef CONFIG_RMNET_LA_PLATFORM
+struct rmnet_ctl_client_if *rmnet_ctl_if(void);
+int rmnet_ctl_get_stats(u64 *s, int n);
+#else
+static inline struct rmnet_ctl_client_if *rmnet_ctl_if(void) {return NULL;};
+static inline int rmnet_ctl_get_stats(u64 *s, int n) {return 0;};
+#endif /* CONFIG_RMNET_LA_PLATFORM */
+
+#endif /* _RMNET_CTL_H_ */

+ 242 - 0
qcom/opensource/datarmnet/core/rmnet_ctl_client.c

@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
+#include <linux/version.h>
+#include "rmnet_ctl.h"
+#include "rmnet_ctl_client.h"
+
+#define RMNET_CTL_LOG_PAGE 10
+#define RMNET_CTL_LOG_NAME "rmnet_ctl"
+#define RMNET_CTL_LOG_LVL  "ipc_log_lvl"
+
+struct rmnet_ctl_client {
+	struct rmnet_ctl_client_hooks hooks;
+};
+
+struct rmnet_ctl_endpoint {
+	struct rmnet_ctl_dev __rcu *dev;
+	struct rmnet_ctl_client __rcu *client;
+	struct dentry *dbgfs_dir;
+	struct dentry *dbgfs_loglvl;
+	void *ipc_log;
+};
+
+#if defined(CONFIG_IPA_DEBUG) || defined(CONFIG_MHI_DEBUG)
+#define CONFIG_RMNET_CTL_DEBUG 1
+#endif
+
+#ifdef CONFIG_RMNET_CTL_DEBUG
+static u8 ipc_log_lvl = RMNET_CTL_LOG_DEBUG;
+#else
+static u8 ipc_log_lvl = RMNET_CTL_LOG_ERR;
+#endif
+
+static DEFINE_SPINLOCK(client_lock);
+static struct rmnet_ctl_endpoint ctl_ep;
+
+void rmnet_ctl_set_dbgfs(bool enable)
+{
+	if (enable) {
+		if (IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
+			ctl_ep.dbgfs_dir = debugfs_create_dir(
+				RMNET_CTL_LOG_NAME, NULL);
+
+		if (!IS_ERR_OR_NULL(ctl_ep.dbgfs_dir))
+			debugfs_create_u8((const char *) RMNET_CTL_LOG_LVL,
+					  (umode_t) 0644,
+					  (struct dentry *) ctl_ep.dbgfs_dir,
+					  (u8 *) &ipc_log_lvl);
+
+		if (!ctl_ep.ipc_log)
+			ctl_ep.ipc_log = ipc_log_context_create(
+				RMNET_CTL_LOG_PAGE, RMNET_CTL_LOG_NAME, 0);
+	} else {
+		debugfs_remove_recursive(ctl_ep.dbgfs_dir);
+		ipc_log_context_destroy(ctl_ep.ipc_log);
+		ctl_ep.dbgfs_dir = NULL;
+		ctl_ep.dbgfs_loglvl = NULL;
+		ctl_ep.ipc_log = NULL;
+	}
+}
+
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
+{
+	rcu_assign_pointer(ctl_ep.dev, dev);
+}
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len)
+{
+	struct rmnet_ctl_client *client;
+	struct sk_buff *skb;
+
+	if (unlikely(!data || !len))
+		return;
+
+	if (len == 0xFFFFFFFF) {
+		skb = (struct sk_buff *)data;
+		rmnet_ctl_log_info("RX", skb->data, skb->len);
+
+		rcu_read_lock();
+
+		client = rcu_dereference(ctl_ep.client);
+		if (client && client->hooks.ctl_dl_client_hook) {
+			skb->protocol = htons(ETH_P_MAP);
+			client->hooks.ctl_dl_client_hook(skb);
+		} else {
+			kfree(skb);
+		}
+
+		rcu_read_unlock();
+	} else {
+		rmnet_ctl_log_info("RX", data, len);
+
+		rcu_read_lock();
+
+		client = rcu_dereference(ctl_ep.client);
+		if (client && client->hooks.ctl_dl_client_hook) {
+			skb = alloc_skb(len, GFP_ATOMIC);
+			if (skb) {
+				skb_put_data(skb, data, len);
+				skb->protocol = htons(ETH_P_MAP);
+				client->hooks.ctl_dl_client_hook(skb);
+			}
+		}
+
+		rcu_read_unlock();
+	}
+}
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
+{
+	struct rmnet_ctl_client *client;
+
+	if (!hook)
+		return NULL;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return NULL;
+	client->hooks = *hook;
+
+	spin_lock(&client_lock);
+
+	/* Only support one client for now */
+	if (rcu_dereference(ctl_ep.client)) {
+		spin_unlock(&client_lock);
+		kfree(client);
+		return NULL;
+	}
+
+	rcu_assign_pointer(ctl_ep.client, client);
+
+	spin_unlock(&client_lock);
+
+	rmnet_ctl_set_dbgfs(true);
+
+	return client;
+}
+EXPORT_SYMBOL(rmnet_ctl_register_client);
+
+int rmnet_ctl_unregister_client(void *handle)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+
+	spin_lock(&client_lock);
+
+	if (rcu_dereference(ctl_ep.client) != client) {
+		spin_unlock(&client_lock);
+		return -EINVAL;
+	}
+
+	RCU_INIT_POINTER(ctl_ep.client, NULL);
+
+	spin_unlock(&client_lock);
+
+	synchronize_rcu();
+	kfree(client);
+
+	rmnet_ctl_set_dbgfs(false);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_ctl_unregister_client);
+
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+	struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+	struct rmnet_ctl_dev *dev;
+	int rc = -EINVAL;
+
+	if (client != rcu_dereference(ctl_ep.client)) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	rmnet_ctl_log_info("TX", skb->data, skb->len);
+
+	rcu_read_lock();
+
+	dev = rcu_dereference(ctl_ep.dev);
+	if (dev && dev->xmit)
+		rc = dev->xmit(dev, skb);
+	else
+		kfree_skb(skb);
+
+	rcu_read_unlock();
+
+	if (rc)
+		rmnet_ctl_log_err("TXE", rc, NULL, 0);
+
+	return rc;
+}
+EXPORT_SYMBOL(rmnet_ctl_send_client);
+
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len)
+{
+	if (lvl <= ipc_log_lvl && ctl_ep.ipc_log) {
+		if (data == NULL || len == 0)
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): (null)\n",
+				       msg, rc);
+		else
+			ipc_log_string(ctl_ep.ipc_log, "%3s(%d): %*ph\n",
+				       msg, rc, len > 32 ? 32 : len, data);
+	}
+}
+EXPORT_SYMBOL(rmnet_ctl_log);
+
+static struct rmnet_ctl_client_if client_if = {
+	.reg = rmnet_ctl_register_client,
+	.dereg = rmnet_ctl_unregister_client,
+	.send = rmnet_ctl_send_client,
+	.log = rmnet_ctl_log,
+};
+
+struct rmnet_ctl_client_if *rmnet_ctl_if(void)
+{
+	return &client_if;
+}
+EXPORT_SYMBOL(rmnet_ctl_if);
+
+int rmnet_ctl_get_stats(u64 *s, int n)
+{
+	struct rmnet_ctl_dev *dev;
+
+	rcu_read_lock();
+	dev = rcu_dereference(ctl_ep.dev);
+	if (dev && n > 0) {
+		n = min(n, (int)(sizeof(dev->stats) / sizeof(u64)));
+		memcpy(s, &dev->stats, n * sizeof(u64));
+	}
+	rcu_read_unlock();
+
+	return n;
+}
+EXPORT_SYMBOL(rmnet_ctl_get_stats);

+ 43 - 0
qcom/opensource/datarmnet/core/rmnet_ctl_client.h

@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#ifndef _RMNET_CTL_CLIENT_H_
+#define _RMNET_CTL_CLIENT_H_
+
+#include <linux/skbuff.h>
+#include "rmnet_ctl.h"
+
+void rmnet_ctl_log(enum rmnet_ctl_log_lvl lvl, const char *msg,
+		   int rc, const void *data, unsigned int len);
+
+#define rmnet_ctl_log_err(msg, rc, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_ERR, msg, rc, data, len)
+
+#define rmnet_ctl_log_info(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_INFO, msg, 0, data, len)
+
+#define rmnet_ctl_log_debug(msg, data, len) \
+		rmnet_ctl_log(RMNET_CTL_LOG_DEBUG, msg, 0, data, len)
+
+struct rmnet_ctl_stats {
+	u64 rx_pkts;
+	u64 rx_err;
+	u64 tx_pkts;
+	u64 tx_err;
+	u64 tx_complete;
+};
+
+struct rmnet_ctl_dev {
+	int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
+	struct rmnet_ctl_stats stats;
+};
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len);
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
+void rmnet_ctl_set_dbgfs(bool enable);
+
+#endif /* _RMNET_CTL_CLIENT_H_ */

+ 113 - 0
qcom/opensource/datarmnet/core/rmnet_ctl_ipa.c

@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL mhi handler
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipa.h>
+#include "rmnet_ctl.h"
+#include "rmnet_ctl_client.h"
+
+struct rmnet_ctl_ipa_dev {
+	struct rmnet_ctl_dev dev;
+	spinlock_t rx_lock; /* rx lock */
+	spinlock_t tx_lock; /* tx lock */
+};
+
+static struct rmnet_ctl_ipa_dev ctl_ipa_dev;
+static bool rmnet_ctl_ipa_registered;
+
+static int rmnet_ctl_send_ipa(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
+{
+	struct rmnet_ctl_ipa_dev *ctl_dev = container_of(
+				dev, struct rmnet_ctl_ipa_dev, dev);
+	int rc;
+
+	spin_lock_bh(&ctl_dev->tx_lock);
+
+	rc = ipa_rmnet_ctl_xmit(skb);
+	if (rc)
+		dev->stats.tx_err++;
+	else
+		dev->stats.tx_pkts++;
+
+	spin_unlock_bh(&ctl_dev->tx_lock);
+
+	return rc;
+}
+
+static void rmnet_ctl_dl_callback(void *user_data, void *rx_data)
+{
+	struct rmnet_ctl_ipa_dev *ctl_dev = user_data;
+
+	ctl_dev->dev.stats.rx_pkts++;
+	rmnet_ctl_endpoint_post(rx_data, 0xFFFFFFFF);
+}
+
+static void rmnet_ctl_probe(void *user_data)
+{
+	memset(&ctl_ipa_dev, 0, sizeof(ctl_ipa_dev));
+
+	spin_lock_init(&ctl_ipa_dev.rx_lock);
+	spin_lock_init(&ctl_ipa_dev.tx_lock);
+
+	ctl_ipa_dev.dev.xmit = rmnet_ctl_send_ipa;
+	rmnet_ctl_endpoint_setdev(&ctl_ipa_dev.dev);
+
+	pr_info("rmnet_ctl driver probed\n");
+}
+
+static void rmnet_ctl_remove(void *user_data)
+{
+	rmnet_ctl_endpoint_setdev(NULL);
+
+	pr_info("rmnet_ctl driver removed\n");
+}
+
+static void rmnet_ctl_ipa_ready(void *user_data)
+{
+	int rc;
+
+	rc = ipa_register_rmnet_ctl_cb(
+			rmnet_ctl_probe,
+			&ctl_ipa_dev,
+			rmnet_ctl_remove,
+			&ctl_ipa_dev,
+			rmnet_ctl_dl_callback,
+			&ctl_ipa_dev);
+
+	if (rc)
+		pr_err("%s: %d\n", __func__, rc);
+	else
+		rmnet_ctl_ipa_registered = true;
+}
+
+static int __init rmnet_ctl_init(void)
+{
+	int rc;
+
+	rc = ipa_register_ipa_ready_cb(rmnet_ctl_ipa_ready, NULL);
+	if (rc == -EEXIST)
+		rmnet_ctl_ipa_ready(NULL);
+	else if (rc)
+		pr_err("%s: %d\n", __func__, rc);
+
+	return 0;
+}
+
+static void __exit rmnet_ctl_exit(void)
+{
+	if (rmnet_ctl_ipa_registered) {
+		ipa_unregister_rmnet_ctl_cb();
+		rmnet_ctl_ipa_registered = false;
+	}
+}
+
+module_init(rmnet_ctl_init)
+module_exit(rmnet_ctl_exit)
+
+MODULE_DESCRIPTION("RmNet control IPA Driver");
+MODULE_LICENSE("GPL v2");

+ 222 - 0
qcom/opensource/datarmnet/core/rmnet_ctl_mhi.c

@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL mhi handler
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/mhi.h>
+#include "rmnet_ctl.h"
+#include "rmnet_ctl_client.h"
+
+#define RMNET_CTL_DEFAULT_MRU 256
+
+struct rmnet_ctl_mhi_dev {
+	struct mhi_device *mhi_dev;
+	struct rmnet_ctl_dev dev;
+	u32 mru;
+	spinlock_t rx_lock; /* rx lock */
+	spinlock_t tx_lock; /* tx lock */
+	atomic_t in_reset;
+};
+
+static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
+				dev, struct rmnet_ctl_mhi_dev, dev);
+	int rc;
+
+	spin_lock_bh(&ctl_dev->tx_lock);
+
+	rc = mhi_queue_transfer(ctl_dev->mhi_dev,
+				DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+	if (rc)
+		dev->stats.tx_err++;
+	else
+		dev->stats.tx_pkts++;
+
+	spin_unlock_bh(&ctl_dev->tx_lock);
+
+	return rc;
+}
+
+static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
+				    gfp_t gfp, void *free_buf)
+{
+	struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
+	void *buf;
+	int no_tre, i, rc;
+
+	no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+
+	if (!no_tre && free_buf) {
+		kfree(free_buf);
+		return;
+	}
+
+	for (i = 0; i < no_tre; i++) {
+		if (free_buf) {
+			buf = free_buf;
+			free_buf = NULL;
+		} else {
+			buf = kmalloc(ctl_dev->mru, gfp);
+		}
+
+		if (!buf)
+			return;
+
+		spin_lock_bh(&ctl_dev->rx_lock);
+		rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+					buf, ctl_dev->mru, MHI_EOT);
+		spin_unlock_bh(&ctl_dev->rx_lock);
+
+		if (rc) {
+			kfree(buf);
+			return;
+		}
+	}
+}
+
+static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_res->transaction_status == -ENOTCONN) {
+		kfree(mhi_res->buf_addr);
+		return;
+	} else if (mhi_res->transaction_status ||
+		   !mhi_res->buf_addr || !mhi_res->bytes_xferd) {
+		rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
+		ctl_dev->dev.stats.rx_err++;
+	} else {
+		ctl_dev->dev.stats.rx_pkts++;
+		rmnet_ctl_endpoint_post(mhi_res->buf_addr,
+					mhi_res->bytes_xferd);
+	}
+
+	/* Re-supply receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
+}
+
+static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
+				  struct mhi_result *mhi_res)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+	struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
+
+	if (skb) {
+		if (mhi_res->transaction_status) {
+			rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
+					  skb->data, skb->len);
+			ctl_dev->dev.stats.tx_err++;
+		} else {
+			rmnet_ctl_log_debug("TXC", skb->data, skb->len);
+			ctl_dev->dev.stats.tx_complete++;
+		}
+		kfree_skb(skb);
+	}
+}
+
+static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
+				      enum MHI_CB mhi_cb)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+	if (mhi_cb != MHI_CB_FATAL_ERROR)
+		return;
+
+	atomic_inc(&ctl_dev->in_reset);
+}
+
+static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
+			   const struct mhi_device_id *id)
+{
+	struct rmnet_ctl_mhi_dev *ctl_dev;
+	struct device_node *of_node = mhi_dev->dev.of_node;
+	int rc;
+
+	ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
+	if (!ctl_dev)
+		return -ENOMEM;
+
+	ctl_dev->mhi_dev = mhi_dev;
+	ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
+
+	spin_lock_init(&ctl_dev->rx_lock);
+	spin_lock_init(&ctl_dev->tx_lock);
+	atomic_set(&ctl_dev->in_reset, 0);
+	dev_set_drvdata(&mhi_dev->dev, ctl_dev);
+
+	rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
+	if (rc || !ctl_dev->mru)
+		ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
+
+	rc = mhi_prepare_for_transfer(mhi_dev);
+	if (rc) {
+		pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
+		return -EINVAL;
+	}
+
+	/* Post receive buffers */
+	rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
+
+	rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
+
+	pr_info("rmnet_ctl driver probed\n");
+
+	return 0;
+}
+
+static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
+{
+	rmnet_ctl_endpoint_setdev(NULL);
+	synchronize_rcu();
+	dev_set_drvdata(&mhi_dev->dev, NULL);
+
+	pr_info("rmnet_ctl driver removed\n");
+}
+
+static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
+	{ .chan = "RMNET_CTL" },
+	{}
+};
+
+static struct mhi_driver rmnet_ctl_driver = {
+	.probe = rmnet_ctl_probe,
+	.remove = rmnet_ctl_remove,
+	.dl_xfer_cb = rmnet_ctl_dl_callback,
+	.ul_xfer_cb = rmnet_ctl_ul_callback,
+	.status_cb = rmnet_ctl_status_callback,
+	.id_table = rmnet_ctl_mhi_match,
+	.driver = {
+		.name = "rmnet_ctl",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init rmnet_ctl_init(void)
+{
+	int rc;
+
+	rc = mhi_driver_register(&rmnet_ctl_driver);
+	rmnet_ctl_set_dbgfs(true);
+
+	return rc;
+}
+
+static void __exit rmnet_ctl_exit(void)
+{
+	mhi_driver_unregister(&rmnet_ctl_driver);
+	rmnet_ctl_set_dbgfs(false);
+}
+
+module_init(rmnet_ctl_init)
+module_exit(rmnet_ctl_exit)
+
+MODULE_DESCRIPTION("RmNet Control MHI Driver");
+MODULE_LICENSE("GPL v2");

+ 2061 - 0
qcom/opensource/datarmnet/core/rmnet_descriptor.c

@@ -0,0 +1,2061 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Packet Descriptor Framework
+ *
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inet.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "rmnet_config.h"
+#include "rmnet_descriptor.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+#include "rmnet_qmi.h"
+#include "rmnet_trace.h"
+#include "qmi_rmnet.h"
+
+#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
+#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+#define RMNET_PB_IND_HDR_SIZE (sizeof(struct rmnet_map_pb_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+#define rmnet_descriptor_for_each_frag(p, desc) \
+	list_for_each_entry(p, &desc->frags, list)
+#define rmnet_descriptor_for_each_frag_safe(p, tmp, desc) \
+	list_for_each_entry_safe(p, tmp, &desc->frags, list)
+#define rmnet_descriptor_for_each_frag_safe_reverse(p, tmp, desc) \
+	list_for_each_entry_safe_reverse(p, tmp, &desc->frags, list)
+
+typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port);
+typedef void (*rmnet_perf_chain_hook_t)(void);
+
+typedef void (*rmnet_perf_tether_ingress_hook_t)(struct tcphdr *tp, struct sk_buff *skb);
+rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress_hook __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_tether_ingress_hook);
+
+struct rmnet_frag_descriptor *
+rmnet_get_frag_descriptor(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
+	struct rmnet_frag_descriptor *frag_desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&port->desc_pool_lock, flags);
+	if (!list_empty(&pool->free_list)) {
+		frag_desc = list_first_entry(&pool->free_list,
+					     struct rmnet_frag_descriptor,
+					     list);
+		list_del_init(&frag_desc->list);
+	} else {
+		frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
+		if (!frag_desc)
+			goto out;
+
+		INIT_LIST_HEAD(&frag_desc->list);
+		INIT_LIST_HEAD(&frag_desc->frags);
+		pool->pool_size++;
+	}
+
+out:
+	spin_unlock_irqrestore(&port->desc_pool_lock, flags);
+	return frag_desc;
+}
+EXPORT_SYMBOL(rmnet_get_frag_descriptor);
+
+void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
+				   struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
+	struct rmnet_fragment *frag, *tmp;
+	unsigned long flags;
+
+	list_del(&frag_desc->list);
+
+	rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
+		struct page *page = skb_frag_page(&frag->frag);
+
+		if (page)
+			put_page(page);
+
+		list_del(&frag->list);
+		kfree(frag);
+	}
+
+	memset(frag_desc, 0, sizeof(*frag_desc));
+	INIT_LIST_HEAD(&frag_desc->list);
+	INIT_LIST_HEAD(&frag_desc->frags);
+	spin_lock_irqsave(&port->desc_pool_lock, flags);
+	list_add_tail(&frag_desc->list, &pool->free_list);
+	spin_unlock_irqrestore(&port->desc_pool_lock, flags);
+}
+EXPORT_SYMBOL(rmnet_recycle_frag_descriptor);
+
+void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
+		      struct rmnet_port *port, unsigned int size)
+{
+	struct rmnet_fragment *frag, *tmp;
+
+	if (size >= frag_desc->len) {
+		pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
+			__func__, size, frag_desc->len);
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		return NULL;
+	}
+
+	rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
+		u32 frag_size = skb_frag_size(&frag->frag);
+
+		if (!size)
+			break;
+
+		if (size >= frag_size) {
+			/* Remove the whole frag */
+			struct page *page = skb_frag_page(&frag->frag);
+
+			if (page)
+				put_page(page);
+
+			list_del(&frag->list);
+			size -= frag_size;
+			frag_desc->len -= frag_size;
+			kfree(frag);
+			continue;
+		}
+
+		/* Pull off 'size' bytes */
+		skb_frag_off_add(&frag->frag, size);
+		skb_frag_size_sub(&frag->frag, size);
+		frag_desc->len -= size;
+		break;
+	}
+
+	return rmnet_frag_data_ptr(frag_desc);
+}
+EXPORT_SYMBOL(rmnet_frag_pull);
+
+void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
+		      struct rmnet_port *port, unsigned int size)
+{
+	struct rmnet_fragment *frag, *tmp;
+	unsigned int eat;
+
+	if (!size) {
+		pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
+			__func__, frag_desc->len);
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		return NULL;
+	}
+
+	/* Growing bigger doesn't make sense */
+	if (size >= frag_desc->len)
+		goto out;
+
+	/* Compute number of bytes to remove from the end */
+	eat = frag_desc->len - size;
+	rmnet_descriptor_for_each_frag_safe_reverse(frag, tmp, frag_desc) {
+		u32 frag_size = skb_frag_size(&frag->frag);
+
+		if (!eat)
+			goto out;
+
+		if (eat >= frag_size) {
+			/* Remove the whole frag */
+			struct page *page = skb_frag_page(&frag->frag);
+
+			if (page)
+				put_page(page);
+
+			list_del(&frag->list);
+			eat -= frag_size;
+			frag_desc->len -= frag_size;
+			kfree(frag);
+			continue;
+		}
+
+		/* Chop off 'eat' bytes from the end */
+		skb_frag_size_sub(&frag->frag, eat);
+		frag_desc->len -= eat;
+		goto out;
+	}
+
+out:
+	return rmnet_frag_data_ptr(frag_desc);
+}
+EXPORT_SYMBOL(rmnet_frag_trim);
+
+static int rmnet_frag_copy_data(struct rmnet_frag_descriptor *frag_desc,
+				u32 off, u32 len, void *buf)
+{
+	struct rmnet_fragment *frag;
+	u32 frag_size, copy_len;
+	u32 buf_offset = 0;
+
+	/* Don't make me do something we'd both regret */
+	if (off > frag_desc->len || len > frag_desc->len ||
+	    off + len > frag_desc->len)
+		return -EINVAL;
+
+	/* Copy 'len' bytes into the bufer starting from 'off' */
+	rmnet_descriptor_for_each_frag(frag, frag_desc) {
+		if (!len)
+			break;
+
+		frag_size = skb_frag_size(&frag->frag);
+		if (off < frag_size) {
+			copy_len = min_t(u32, len, frag_size - off);
+			memcpy(buf + buf_offset,
+			       skb_frag_address(&frag->frag) + off,
+			       copy_len);
+			buf_offset += copy_len;
+			len -= copy_len;
+			off = 0;
+		} else {
+			off -= frag_size;
+		}
+	}
+
+	return 0;
+}
+
+void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
+			    u32 len, void *buf)
+{
+	struct rmnet_fragment *frag;
+	u8 *start;
+	u32 frag_size, offset;
+
+	/* Don't take a long pointer off a short frag */
+	if (off > frag_desc->len || len > frag_desc->len ||
+	    off + len > frag_desc->len)
+		return NULL;
+
+	/* Find the starting fragment */
+	offset = off;
+	rmnet_descriptor_for_each_frag(frag, frag_desc) {
+		frag_size = skb_frag_size(&frag->frag);
+		if (off < frag_size) {
+			start = skb_frag_address(&frag->frag) + off;
+			/* If the header is entirely on this frag, just return
+			 * a pointer to it.
+			 */
+			if (off + len <= frag_size)
+				return start;
+
+			/* Otherwise, we need to copy the data into a linear
+			 * buffer.
+			 */
+			break;
+		}
+
+		off -= frag_size;
+	}
+
+	if (rmnet_frag_copy_data(frag_desc, offset, len, buf) < 0)
+		return NULL;
+
+	return buf;
+}
+EXPORT_SYMBOL(rmnet_frag_header_ptr);
+
+int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
+				   struct page *p, u32 page_offset, u32 len)
+{
+	struct rmnet_fragment *frag;
+
+	frag = kzalloc(sizeof(*frag), GFP_ATOMIC);
+	if (!frag)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&frag->list);
+	get_page(p);
+	__skb_frag_set_page(&frag->frag, p);
+	skb_frag_size_set(&frag->frag, len);
+	skb_frag_off_set(&frag->frag, page_offset);
+	list_add_tail(&frag->list, &frag_desc->frags);
+	frag_desc->len += len;
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_frag_descriptor_add_frag);
+
+int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
+					 struct rmnet_frag_descriptor *from,
+					 u32 off, u32 len)
+{
+	struct rmnet_fragment *frag;
+	int rc;
+
+	/* Sanity check the lengths */
+	if (off > from->len || len > from->len || off + len > from->len)
+		return -EINVAL;
+
+	rmnet_descriptor_for_each_frag(frag, from) {
+		u32 frag_size;
+
+		if (!len)
+			break;
+
+		frag_size = skb_frag_size(&frag->frag);
+		if (off < frag_size) {
+			struct page *p = skb_frag_page(&frag->frag);
+			u32 page_off = skb_frag_off(&frag->frag);
+			u32 copy_len = min_t(u32, len, frag_size - off);
+
+			rc = rmnet_frag_descriptor_add_frag(to, p,
+							    page_off + off,
+							    copy_len);
+			if (rc < 0)
+				return rc;
+
+			len -= copy_len;
+			off = 0;
+		} else {
+			off -= frag_size;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_frag_descriptor_add_frags_from);
+
+int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
+				int start, u8 *nexthdrp, __be16 *frag_offp,
+				bool *frag_hdrp)
+{
+	u8 nexthdr = *nexthdrp;
+
+	*frag_offp = 0;
+	*frag_hdrp = false;
+	while (ipv6_ext_hdr(nexthdr)) {
+		struct ipv6_opt_hdr *hp, __hp;
+		int hdrlen;
+
+		if (nexthdr == NEXTHDR_NONE)
+			return -EINVAL;
+
+		hp = rmnet_frag_header_ptr(frag_desc, (u32)start, sizeof(*hp),
+					   &__hp);
+		if (!hp)
+			return -EINVAL;
+
+		if (nexthdr == NEXTHDR_FRAGMENT) {
+			u32 off = offsetof(struct frag_hdr, frag_off);
+			__be16 *fp, __fp;
+
+			fp = rmnet_frag_header_ptr(frag_desc, (u32)start + off,
+						   sizeof(*fp), &__fp);
+			if (!fp)
+				return -EINVAL;
+
+			*frag_offp = *fp;
+			*frag_hdrp = true;
+			if (ntohs(*frag_offp) & ~0x7)
+				break;
+			hdrlen = 8;
+		} else if (nexthdr == NEXTHDR_AUTH) {
+			hdrlen = (hp->hdrlen + 2) << 2;
+		} else {
+			hdrlen = ipv6_optlen(hp);
+		}
+
+		nexthdr = hp->nexthdr;
+		start += hdrlen;
+	}
+
+	*nexthdrp = nexthdr;
+	return start;
+}
+EXPORT_SYMBOL(rmnet_frag_ipv6_skip_exthdr);
+
+static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
+				     struct rmnet_map_control_command *cmd,
+				     struct rmnet_port *port,
+				     int enable)
+{
+	struct rmnet_endpoint *ep;
+	struct net_device *vnd;
+	u16 ip_family;
+	u16 fc_seq;
+	u32 qos_id;
+	u8 mux_id;
+	int r;
+
+	mux_id = qmap->mux_id;
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		return RX_HANDLER_CONSUMED;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		return RX_HANDLER_CONSUMED;
+
+	vnd = ep->egress_dev;
+
+	ip_family = cmd->flow_control.ip_family;
+	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+	qos_id = ntohl(cmd->flow_control.qos_id);
+
+	/* Ignore the ip family and pass the sequence number for both v4 and v6
+	 * sequence. User space does not support creating dedicated flows for
+	 * the 2 protocols
+	 */
+	r = rmnet_vnd_do_flow_control(vnd, enable);
+	if (r)
+		return RMNET_MAP_COMMAND_UNSUPPORTED;
+	else
+		return RMNET_MAP_COMMAND_ACK;
+}
+
+static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
+				unsigned char type,
+				struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	struct net_device *dev = port->dev;
+	struct sk_buff *skb;
+	u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
+
+	skb = alloc_skb(alloc_len, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+	skb->dev = dev;
+
+	cmd = rmnet_map_get_cmd_start(skb);
+	cmd->cmd_type = type & 0x03;
+
+	netif_tx_lock(dev);
+	dev->netdev_ops->ndo_start_xmit(skb, dev);
+	netif_tx_unlock(dev);
+}
+
+static void
+rmnet_frag_process_pb_ind(struct rmnet_frag_descriptor *frag_desc,
+			  struct rmnet_map_control_command_header *cmd,
+			  struct rmnet_port *port,
+			  u16 cmd_len)
+{
+	struct rmnet_map_pb_ind_hdr *pbhdr, __pbhdr;
+	u32 offset = sizeof(struct rmnet_map_header);
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (cmd_len + offset < RMNET_PB_IND_HDR_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	pbhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
+				      sizeof(*pbhdr), &__pbhdr);
+	if (!pbhdr)
+		return;
+
+	port->stats.pb_marker_count++;
+
+	/* If a target is taking frag path, we can assume DL marker v2 is in
+	 * play
+	 */
+	if (is_dl_mark_v2)
+		rmnet_map_pb_ind_notify(port, pbhdr);
+}
+
+static void
+rmnet_frag_process_flow_start(struct rmnet_frag_descriptor *frag_desc,
+			      struct rmnet_map_control_command_header *cmd,
+			      struct rmnet_port *port,
+			      u16 cmd_len)
+{
+	struct rmnet_map_dl_ind_hdr *dlhdr, __dlhdr;
+	u32 offset = sizeof(struct rmnet_map_header);
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (cmd_len + offset < RMNET_DL_IND_HDR_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	dlhdr = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
+				      sizeof(*dlhdr), &__dlhdr);
+	if (!dlhdr)
+		return;
+
+	port->stats.dl_hdr_last_ep_id = cmd->source_id;
+	port->stats.dl_hdr_last_qmap_vers = cmd->reserved;
+	port->stats.dl_hdr_last_trans_id = cmd->transaction_id;
+	port->stats.dl_hdr_last_seq = dlhdr->le.seq;
+	port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
+	port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
+	port->stats.dl_hdr_last_flows = dlhdr->le.flows;
+	port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
+	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
+	port->stats.dl_hdr_count++;
+
+	/* If a target is taking frag path, we can assume DL marker v2 is in
+	 * play
+	 */
+	if (is_dl_mark_v2)
+		rmnet_map_dl_hdr_notify_v2(port, dlhdr, cmd);
+}
+
+static void
+rmnet_frag_process_flow_end(struct rmnet_frag_descriptor *frag_desc,
+			    struct rmnet_map_control_command_header *cmd,
+			    struct rmnet_port *port, u16 cmd_len)
+{
+	struct rmnet_map_dl_ind_trl *dltrl, __dltrl;
+	u32 offset = sizeof(struct rmnet_map_header);
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+
+	if (cmd_len + offset < RMNET_DL_IND_TRL_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	dltrl = rmnet_frag_header_ptr(frag_desc, offset + sizeof(*cmd),
+				      sizeof(*dltrl), &__dltrl);
+	if (!dltrl)
+		return;
+
+	port->stats.dl_trl_last_seq = dltrl->seq_le;
+	port->stats.dl_trl_count++;
+
+	/* If a target is taking frag path, we can assume DL marker v2 is in
+	 * play
+	 */
+	if (is_dl_mark_v2)
+		rmnet_map_dl_trl_notify_v2(port, dltrl, cmd);
+}
+
+/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ */
+void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_map_header *qmap, struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd, __cmd;
+	unsigned char rc = 0;
+
+	cmd = rmnet_frag_header_ptr(frag_desc, sizeof(*qmap), sizeof(*cmd),
+				    &__cmd);
+	if (!cmd)
+		return;
+
+	switch (cmd->command_name) {
+	case RMNET_MAP_COMMAND_FLOW_ENABLE:
+		rc = rmnet_frag_do_flow_control(qmap, cmd, port, 1);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_DISABLE:
+		rc = rmnet_frag_do_flow_control(qmap, cmd, port, 0);
+		break;
+
+	default:
+		rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+		break;
+	}
+	if (rc == RMNET_MAP_COMMAND_ACK)
+		rmnet_frag_send_ack(qmap, rc, port);
+}
+
+int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
+			    struct rmnet_port *port, u16 pkt_len)
+{
+	struct rmnet_map_control_command_header *cmd, __cmd;
+
+	cmd = rmnet_frag_header_ptr(frag_desc, sizeof(struct rmnet_map_header),
+				    sizeof(*cmd), &__cmd);
+	if (!cmd)
+		return -1;
+
+	/* Silently discard any marksers recived over the LL channel */
+	if (frag_desc->priority == 0xda1a &&
+	    (cmd->command_name == RMNET_MAP_COMMAND_FLOW_START ||
+	     cmd->command_name == RMNET_MAP_COMMAND_FLOW_END))
+		return 0;
+
+	switch (cmd->command_name) {
+	case RMNET_MAP_COMMAND_FLOW_START:
+		rmnet_frag_process_flow_start(frag_desc, cmd, port, pkt_len);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_END:
+		rmnet_frag_process_flow_end(frag_desc, cmd, port, pkt_len);
+		break;
+
+	case RMNET_MAP_COMMAND_PB_BYTES:
+		rmnet_frag_process_pb_ind(frag_desc, cmd, port, pkt_len);
+		break;
+	default:
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_frag_flow_command);
+
+static int rmnet_frag_deaggregate_one(struct sk_buff *skb,
+				      struct rmnet_port *port,
+				      struct list_head *list,
+				      u32 start, u32 priority)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+	struct rmnet_frag_descriptor *frag_desc;
+	struct rmnet_map_header *maph, __maph;
+	skb_frag_t *frag;
+	u32 start_frag, offset, i;
+	u32 start_frag_size, start_frag_off;
+	u32 pkt_len, copy_len = 0;
+	int rc;
+
+	for (start_frag = 0, offset = 0; start_frag < shinfo->nr_frags;
+	     start_frag++) {
+		frag = &shinfo->frags[start_frag];
+		if (start < skb_frag_size(frag) + offset)
+			break;
+
+		offset += skb_frag_size(frag);
+	}
+
+	if (start_frag == shinfo->nr_frags)
+		return -1;
+
+	/* start - offset is the additional offset into the page to account
+	 * for any data on it we've already used.
+	 */
+	start_frag_size = skb_frag_size(frag) - (start - offset);
+	start_frag_off = skb_frag_off(frag) + (start - offset);
+
+	/* Grab the QMAP header. Careful, as there's no guarantee that it's
+	 * continugous!
+	 */
+	if (likely(start_frag_size >= sizeof(*maph))) {
+		maph = skb_frag_address(frag) + (start - offset);
+	} else {
+		/* The header's split across pages. We can rebuild it.
+		 * Probably not faster or stronger than before. But certainly
+		 * more linear.
+		 */
+		if (skb_copy_bits(skb, start, &__maph, sizeof(__maph)) < 0)
+			return -1;
+
+		maph = &__maph;
+	}
+
+	pkt_len = ntohs(maph->pkt_len);
+	/* Catch empty frames */
+	if (!pkt_len)
+		return -1;
+
+	frag_desc = rmnet_get_frag_descriptor(port);
+	if (!frag_desc)
+		return -1;
+
+	frag_desc->priority = priority;
+	pkt_len += sizeof(*maph);
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+		pkt_len += sizeof(struct rmnet_map_dl_csum_trailer);
+	} else if ((port->data_format & (RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5 |
+					 RMNET_FLAGS_INGRESS_COALESCE)) &&
+		   !maph->cd_bit) {
+		u32 hsize = 0;
+		u8 type;
+
+		/* Check the type. This seems like should be overkill for less
+		 * than a single byte, doesn't it?
+		 */
+		if (likely(start_frag_size >= sizeof(*maph) + 1)) {
+			type = *((u8 *)maph + sizeof(*maph));
+		} else {
+			if (skb_copy_bits(skb, start + sizeof(*maph), &type,
+					  sizeof(type)) < 0)
+				return -1;
+		}
+
+		/* Type only uses the first 7 bits */
+		switch ((type & 0xFE) >> 1) {
+		case RMNET_MAP_HEADER_TYPE_COALESCING:
+			hsize = sizeof(struct rmnet_map_v5_coal_header);
+			break;
+		case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+			hsize = sizeof(struct rmnet_map_v5_csum_header);
+			break;
+		}
+
+		pkt_len += hsize;
+	}
+
+	/* Add all frags containing the packet data to the descriptor */
+	for (i = start_frag; pkt_len > 0 && i < shinfo->nr_frags; ) {
+		u32 size, off;
+		u32 copy;
+
+		frag = &shinfo->frags[i];
+		size = skb_frag_size(frag);
+		off = skb_frag_off(frag);
+		if (i == start_frag) {
+			/* These are different for the first one to account for
+			 * the starting offset.
+			 */
+			size = start_frag_size;
+			off = start_frag_off;
+		}
+
+		copy = min_t(u32, size, pkt_len);
+		rc = rmnet_frag_descriptor_add_frag(frag_desc,
+						    skb_frag_page(frag), off,
+						    copy);
+		if (rc < 0) {
+			rmnet_recycle_frag_descriptor(frag_desc, port);
+			return -1;
+		}
+
+		pkt_len -= copy;
+		copy_len += copy;
+		/* If the fragment is exhausted, we can move to the next one */
+		if (!(size - copy_len)) {
+			i++;
+			copy_len = 0;
+		}
+	}
+
+	if (pkt_len) {
+		/* Packet length is larger than the amount of data we have */
+		rmnet_recycle_frag_descriptor(frag_desc, port);
+		return -1;
+	}
+
+	list_add_tail(&frag_desc->list, list);
+	return (int)frag_desc->len;
+}
+
+void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
+			    struct list_head *list, u32 priority)
+{
+	u32 start = 0;
+	int rc;
+
+	while (start < skb->len) {
+		rc = rmnet_frag_deaggregate_one(skb, port, list, start,
+						priority);
+		if (rc < 0)
+			return;
+
+		start += (u32)rc;
+	}
+}
+
+/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
+ * if needed (i.e. forwarding, UDP GRO)
+ */
+static void rmnet_frag_gso_stamp(struct sk_buff *skb,
+				 struct rmnet_frag_descriptor *frag_desc)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+	if (frag_desc->trans_proto == IPPROTO_TCP)
+		shinfo->gso_type = (frag_desc->ip_proto == 4) ?
+				   SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+	else
+		shinfo->gso_type = SKB_GSO_UDP_L4;
+
+	shinfo->gso_size = frag_desc->gso_size;
+	shinfo->gso_segs = frag_desc->gso_segs;
+}
+
+/* Set the partial checksum information. Sets the transport checksum to the
+ * pseudoheader checksum and sets the offload metadata.
+ */
+static void rmnet_frag_partial_csum(struct sk_buff *skb,
+				    struct rmnet_frag_descriptor *frag_desc)
+{
+	rmnet_perf_tether_ingress_hook_t rmnet_perf_tether_ingress;
+	struct iphdr *iph = (struct iphdr *)skb->data;
+	__sum16 pseudo;
+	u16 pkt_len = skb->len - frag_desc->ip_len;
+
+	if (frag_desc->ip_proto == 4) {
+		iph->tot_len = htons(skb->len);
+		iph->check = 0;
+		iph->check = ip_fast_csum(iph, iph->ihl);
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    pkt_len, frag_desc->trans_proto,
+					    0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+
+		/* Payload length includes any extension headers */
+		ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  pkt_len, frag_desc->trans_proto, 0);
+	}
+
+	if (frag_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *tp = (struct tcphdr *)
+				    ((u8 *)iph + frag_desc->ip_len);
+
+		tp->check = pseudo;
+		skb->csum_offset = offsetof(struct tcphdr, check);
+
+		rmnet_perf_tether_ingress = rcu_dereference(rmnet_perf_tether_ingress_hook);
+		if (rmnet_perf_tether_ingress)
+			rmnet_perf_tether_ingress(tp, skb);
+	} else {
+		struct udphdr *up = (struct udphdr *)
+				    ((u8 *)iph + frag_desc->ip_len);
+
+		up->len = htons(pkt_len);
+		up->check = pseudo;
+		skb->csum_offset = offsetof(struct udphdr, check);
+	}
+
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
+}
+
+#define PFN_ENTRY_MAX (128)
+#define PFNI (count++ % PFN_ENTRY_MAX)
+static void rmnet_descriptor_trace_pfn(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+	struct sk_buff *frag_iter;
+	unsigned long rpfn[PFN_ENTRY_MAX];
+	int i, count;
+
+	if (!trace_print_pfn_enabled())
+		return;
+
+	shinfo = skb_shinfo(skb);
+	memset(rpfn, 0, sizeof(rpfn));
+	count = 0;
+
+	for (i = 0; i < shinfo->nr_frags; i++)
+		rpfn[PFNI] = page_to_pfn(skb_frag_page(&shinfo->frags[i]));
+
+	skb_walk_frags(skb, frag_iter) {
+		shinfo = skb_shinfo(frag_iter);
+
+		for (i = 0; i < shinfo->nr_frags; i++)
+			rpfn[PFNI] = page_to_pfn(skb_frag_page(&shinfo->frags[i]));
+	}
+
+	trace_print_pfn(skb, rpfn, count);
+}
+
+/* Allocate and populate an skb to contain the packet represented by the
+ * frag descriptor.
+ */
+static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port)
+{
+	struct sk_buff *head_skb, *current_skb, *skb;
+	struct skb_shared_info *shinfo;
+	struct rmnet_fragment *frag, *tmp;
+	struct rmnet_skb_cb *cb;
+
+	/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
+	if (frag_desc->hdrs_valid) {
+		u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
+
+		head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
+				     GFP_ATOMIC);
+		if (!head_skb)
+			return NULL;
+
+		skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
+		rmnet_frag_copy_data(frag_desc, 0, hdr_len,
+				     skb_put(head_skb, hdr_len));
+		skb_reset_network_header(head_skb);
+		if (frag_desc->trans_len)
+			skb_set_transport_header(head_skb, frag_desc->ip_len);
+
+		/* Pull the headers off carefully */
+		if (hdr_len == frag_desc->len)
+			/* Fast forward "header only" packets */
+			goto skip_frags;
+
+		if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
+			kfree(head_skb);
+			return NULL;
+		}
+	} else {
+		/* Allocate enough space to avoid penalties in the stack
+		 * from __pskb_pull_tail()
+		 */
+		head_skb = alloc_skb(256 + RMNET_MAP_DEAGGR_HEADROOM,
+				     GFP_ATOMIC);
+		if (!head_skb)
+			return NULL;
+
+		skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
+	}
+
+	shinfo = skb_shinfo(head_skb);
+	current_skb = head_skb;
+
+	/* Add in the page fragments */
+	rmnet_descriptor_for_each_frag_safe(frag, tmp, frag_desc) {
+		struct page *p = skb_frag_page(&frag->frag);
+		u32 frag_size = skb_frag_size(&frag->frag);
+
+add_frag:
+		if (shinfo->nr_frags < MAX_SKB_FRAGS) {
+			get_page(p);
+			skb_add_rx_frag(current_skb, shinfo->nr_frags, p,
+					skb_frag_off(&frag->frag), frag_size,
+					frag_size);
+			if (current_skb != head_skb) {
+				head_skb->len += frag_size;
+				head_skb->data_len += frag_size;
+			}
+		} else {
+			/* Alloc a new skb and try again */
+			skb = alloc_skb(0, GFP_ATOMIC);
+			if (!skb)
+				break;
+
+			if (current_skb == head_skb)
+				shinfo->frag_list = skb;
+			else
+				current_skb->next = skb;
+
+			current_skb = skb;
+			shinfo = skb_shinfo(current_skb);
+			goto add_frag;
+		}
+	}
+
+skip_frags:
+	head_skb->dev = frag_desc->dev;
+	rmnet_set_skb_proto(head_skb);
+	cb = RMNET_SKB_CB(head_skb);
+	cb->coal_bytes = frag_desc->coal_bytes;
+	cb->coal_bufsize = frag_desc->coal_bufsize;
+
+	/* Handle any header metadata that needs to be updated after RSB/RSC
+	 * segmentation
+	 */
+	if (frag_desc->ip_id_set) {
+		struct iphdr *iph;
+
+		iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
+		csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
+		iph->id = frag_desc->ip_id;
+	}
+
+	if (frag_desc->tcp_seq_set) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)
+		     (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
+		th->seq = frag_desc->tcp_seq;
+	}
+
+	if (frag_desc->tcp_flags_set) {
+		struct tcphdr *th;
+		__be16 *flags;
+
+		th = (struct tcphdr *)
+		     (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
+		flags = (__be16 *)&tcp_flag_word(th);
+		*flags = frag_desc->tcp_flags;
+	}
+
+	/* Handle csum offloading */
+	if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
+		/* Set the partial checksum information */
+		rmnet_frag_partial_csum(head_skb, frag_desc);
+	} else if (frag_desc->csum_valid) {
+		/* Non-RSB/RSC/perf packet. The current checksum is fine */
+		head_skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (frag_desc->hdrs_valid &&
+		   (frag_desc->trans_proto == IPPROTO_TCP ||
+		    frag_desc->trans_proto == IPPROTO_UDP)) {
+		/* Unfortunately, we have to fake a bad checksum here, since
+		 * the original bad value is lost by the hardware. The only
+		 * reliable way to do it is to calculate the actual checksum
+		 * and corrupt it.
+		 */
+		__sum16 *check;
+		__wsum csum;
+		unsigned int offset = skb_transport_offset(head_skb);
+		__sum16 pseudo;
+
+		/* Calculate pseudo header and update header fields */
+		if (frag_desc->ip_proto == 4) {
+			struct iphdr *iph = ip_hdr(head_skb);
+			__be16 tot_len = htons(head_skb->len);
+
+			csum_replace2(&iph->check, iph->tot_len, tot_len);
+			iph->tot_len = tot_len;
+			pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+						    head_skb->len -
+						    frag_desc->ip_len,
+						    frag_desc->trans_proto, 0);
+		} else {
+			struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
+
+			ip6h->payload_len = htons(head_skb->len -
+						  sizeof(*ip6h));
+			pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+						  head_skb->len -
+						  frag_desc->ip_len,
+						  frag_desc->trans_proto, 0);
+		}
+
+		if (frag_desc->trans_proto == IPPROTO_TCP) {
+			check = &tcp_hdr(head_skb)->check;
+		} else {
+			udp_hdr(head_skb)->len = htons(head_skb->len -
+						       frag_desc->ip_len);
+			check = &udp_hdr(head_skb)->check;
+		}
+
+		*check = pseudo;
+		csum = skb_checksum(head_skb, offset, head_skb->len - offset,
+				    0);
+		/* Add 1 to corrupt. This cannot produce a final value of 0
+		 * since csum_fold() can't return a value of 0xFFFF
+		 */
+		*check = csum16_add(csum_fold(csum), htons(1));
+		head_skb->ip_summed = CHECKSUM_NONE;
+	}
+
+	/* Handle any rmnet_perf metadata */
+	if (frag_desc->hash) {
+		head_skb->hash = frag_desc->hash;
+		head_skb->sw_hash = 1;
+	}
+
+	if (frag_desc->flush_shs)
+		cb->flush_shs = 1;
+
+	/* Handle coalesced packets */
+	if (frag_desc->gso_segs > 1)
+		rmnet_frag_gso_stamp(head_skb, frag_desc);
+
+	/* Propagate original priority value */
+	head_skb->priority = frag_desc->priority;
+
+	if (trace_print_tcp_rx_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+
+		if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
+			goto skip_trace_print_tcp_rx;
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (head_skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(head_skb)->protocol != IPPROTO_TCP)
+				goto skip_trace_print_tcp_rx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
+		}
+
+		if (head_skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_TCP)
+				goto skip_trace_print_tcp_rx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
+		}
+
+		trace_print_tcp_rx(head_skb, saddr, daddr, tcp_hdr(head_skb));
+
+		rmnet_descriptor_trace_pfn(head_skb);
+	}
+skip_trace_print_tcp_rx:
+
+	if (trace_print_udp_rx_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+		u16 ip_id = 0;
+
+		if (!frag_desc->hdrs_valid && !frag_desc->trans_len)
+			goto skip_trace_print_udp_rx;
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (head_skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(head_skb)->protocol != IPPROTO_UDP)
+				goto skip_trace_print_udp_rx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(head_skb)->daddr);
+			ip_id = ntohs(ip_hdr(head_skb)->id);
+		}
+
+		if (head_skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(head_skb)->nexthdr != IPPROTO_UDP)
+				goto skip_trace_print_udp_rx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(head_skb)->daddr);
+		}
+
+		trace_print_udp_rx(head_skb, saddr, daddr, udp_hdr(head_skb), ip_id);
+
+		rmnet_descriptor_trace_pfn(head_skb);
+	}
+skip_trace_print_udp_rx:
+
+	return head_skb;
+}
+
+/* Deliver the packets contained within a frag descriptor */
+void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_port *port)
+{
+	struct sk_buff *skb;
+
+	skb = rmnet_alloc_skb(frag_desc, port);
+	if (skb)
+		rmnet_deliver_skb(skb, port);
+	rmnet_recycle_frag_descriptor(frag_desc, port);
+}
+EXPORT_SYMBOL(rmnet_frag_deliver);
+
+static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
+				      struct rmnet_port *port,
+				      struct list_head *list, u8 pkt_id,
+				      bool csum_valid)
+{
+	struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
+	struct rmnet_frag_descriptor *new_desc;
+	u32 dlen = coal_desc->gso_size * coal_desc->gso_segs;
+	u32 hlen = coal_desc->ip_len + coal_desc->trans_len;
+	u32 offset = hlen + coal_desc->data_offset;
+	int rc;
+
+	new_desc = rmnet_get_frag_descriptor(port);
+	if (!new_desc)
+		return;
+
+	/* Header information and most metadata is the same as the original */
+	memcpy(new_desc, coal_desc, sizeof(*coal_desc));
+	INIT_LIST_HEAD(&new_desc->list);
+	INIT_LIST_HEAD(&new_desc->frags);
+	new_desc->len = 0;
+
+	/* Add the header fragments */
+	rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, 0,
+						  hlen);
+	if (rc < 0)
+		goto recycle;
+
+	/* Add in the data fragments */
+	rc = rmnet_frag_descriptor_add_frags_from(new_desc, coal_desc, offset,
+						  dlen);
+	if (rc < 0)
+		goto recycle;
+
+	/* Update protocol-specific metadata */
+	if (coal_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th, __th;
+
+		th = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
+					   sizeof(*th), &__th);
+		if (!th)
+			goto recycle;
+
+		new_desc->tcp_seq_set = 1;
+		new_desc->tcp_seq = htonl(ntohl(th->seq) +
+					  coal_desc->data_offset);
+
+		/* Don't allow any dangerous flags to appear in any segments
+		 * other than the last.
+		 */
+		if (th->fin || th->psh) {
+			if (offset + dlen < coal_desc->len) {
+				__be32 flag_word = tcp_flag_word(th);
+
+				/* Clear the FIN and PSH flags from this
+				 * segment.
+				 */
+				flag_word &= ~TCP_FLAG_FIN;
+				flag_word &= ~TCP_FLAG_PSH;
+
+				new_desc->tcp_flags_set = 1;
+				new_desc->tcp_flags = *((__be16 *)&flag_word);
+			}
+		}
+	} else if (coal_desc->trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh, __uh;
+
+		uh = rmnet_frag_header_ptr(coal_desc, coal_desc->ip_len,
+					   sizeof(*uh), &__uh);
+		if (!uh)
+			goto recycle;
+
+		if (coal_desc->ip_proto == 4 && !uh->check)
+			csum_valid = true;
+	}
+
+	if (coal_desc->ip_proto == 4) {
+		struct iphdr *iph, __iph;
+
+		iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
+					    &__iph);
+		if (!iph)
+			goto recycle;
+
+		new_desc->ip_id_set = 1;
+		new_desc->ip_id = htons(ntohs(iph->id) + coal_desc->pkt_id);
+	}
+
+	new_desc->csum_valid = csum_valid;
+	priv->stats.coal.coal_reconstruct++;
+
+	/* Update meta information to move past the data we just segmented */
+	coal_desc->data_offset += dlen;
+	coal_desc->pkt_id = pkt_id + 1;
+	coal_desc->gso_segs = 0;
+
+	/* Only relevant for the first segment to avoid overcoutning */
+	coal_desc->coal_bytes = 0;
+	coal_desc->coal_bufsize = 0;
+
+	list_add_tail(&new_desc->list, list);
+	return;
+
+recycle:
+	rmnet_recycle_frag_descriptor(new_desc, port);
+}
+
+static bool rmnet_frag_validate_csum(struct rmnet_frag_descriptor *frag_desc)
+{
+	u8 *data = rmnet_frag_data_ptr(frag_desc);
+	unsigned int datagram_len;
+	__wsum csum;
+	__sum16 pseudo;
+
+	/* Keep analysis tools happy, since they will see that
+	 * rmnet_frag_data_ptr() could return NULL. It can't in this case,
+	 * since we can't get this far otherwise...
+	 */
+	if (unlikely(!data))
+		return false;
+
+	datagram_len = frag_desc->len - frag_desc->ip_len;
+	if (frag_desc->ip_proto == 4) {
+		struct iphdr *iph = (struct iphdr *)data;
+
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    datagram_len,
+					    frag_desc->trans_proto, 0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
+
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  datagram_len, frag_desc->trans_proto,
+					  0);
+	}
+
+	csum = csum_partial(data + frag_desc->ip_len, datagram_len,
+			    csum_unfold(pseudo));
+	return !csum_fold(csum);
+}
+
+/* Converts the coalesced frame into a list of descriptors */
+static void
+rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
+			     u64 nlo_err_mask, struct rmnet_port *port,
+			     struct list_head *list)
+{
+	struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
+	struct rmnet_map_v5_coal_header coal_hdr;
+	struct rmnet_fragment *frag;
+	u8 *version;
+	u16 pkt_len;
+	u8 pkt, total_pkt = 0;
+	u8 nlo;
+	bool gro = coal_desc->dev->features & NETIF_F_GRO_HW;
+	bool zero_csum = false;
+
+	/* Copy the coal header into our local storage before pulling it. It's
+	 * possible that this header (or part of it) is the last port of a page
+	 * a pulling it off would cause it to be freed. Referring back to the
+	 * header would be invalid in that case.
+	 */
+	if (rmnet_frag_copy_data(coal_desc, sizeof(struct rmnet_map_header),
+				 sizeof(coal_hdr), &coal_hdr) < 0)
+		return;
+
+	/* Pull off the headers we no longer need */
+	if (!rmnet_frag_pull(coal_desc, port, sizeof(struct rmnet_map_header) +
+					      sizeof(coal_hdr)))
+		return;
+
+	/* By definition, this byte is linear, and the first byte on the
+	 * first fragment. ;) Hence why no header_ptr() call is needed
+	 * for it.
+	*/
+	version = rmnet_frag_data_ptr(coal_desc);
+	if (unlikely(!version))
+		return;
+
+	if ((*version & 0xF0) == 0x40) {
+		struct iphdr *iph, __iph;
+
+		iph = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*iph),
+					    &__iph);
+		if (!iph)
+			return;
+
+		coal_desc->ip_proto = 4;
+		coal_desc->ip_len = iph->ihl * 4;
+		coal_desc->trans_proto = iph->protocol;
+
+		/* Don't allow coalescing of any packets with IP options */
+		if (iph->ihl != 5)
+			gro = false;
+	} else if ((*version & 0xF0) == 0x60) {
+		struct ipv6hdr *ip6h, __ip6h;
+		int ip_len;
+		__be16 frag_off;
+		bool frag_hdr;
+		u8 protocol;
+
+		ip6h = rmnet_frag_header_ptr(coal_desc, 0, sizeof(*ip6h),
+					     &__ip6h);
+		if (!ip6h)
+			return;
+
+		coal_desc->ip_proto = 6;
+		protocol = ip6h->nexthdr;
+		ip_len = rmnet_frag_ipv6_skip_exthdr(coal_desc,
+						     sizeof(*ip6h),
+						     &protocol,
+						     &frag_off,
+						     &frag_hdr);
+		coal_desc->trans_proto = protocol;
+
+		/* If we run into a problem, or this is fragmented packet
+		 * (which should technically not be possible, if the HW
+		 * works as intended...), bail.
+		 */
+		if (ip_len < 0 || frag_off) {
+			priv->stats.coal.coal_ip_invalid++;
+			return;
+		}
+
+		if (frag_hdr) {
+			/* There is a fragment header, but this is not a
+			 * fragmented packet. We can handle this, but it
+			 * cannot be coalesced because of kernel limitations.
+			 */
+			gro = false;
+		}
+
+		coal_desc->ip_len = (u16)ip_len;
+		if (coal_desc->ip_len > sizeof(*ip6h)) {
+			/* Don't allow coalescing of any packets with IPv6
+			 * extension headers.
+			 */
+			gro = false;
+		}
+	} else {
+		priv->stats.coal.coal_ip_invalid++;
+		return;
+	}
+
+	if (coal_desc->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th, __th;
+
+		th = rmnet_frag_header_ptr(coal_desc,
+					   coal_desc->ip_len, sizeof(*th),
+					   &__th);
+		if (!th)
+			return;
+
+		coal_desc->trans_len = th->doff * 4;
+		priv->stats.coal.coal_tcp++;
+		priv->stats.coal.coal_tcp_bytes += coal_desc->len;
+	} else if (coal_desc->trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh, __uh;
+
+		uh = rmnet_frag_header_ptr(coal_desc,
+					   coal_desc->ip_len, sizeof(*uh),
+					   &__uh);
+		if (!uh)
+			return;
+
+		coal_desc->trans_len = sizeof(*uh);
+		priv->stats.coal.coal_udp++;
+		priv->stats.coal.coal_udp_bytes += coal_desc->len;
+		if (coal_desc->ip_proto == 4 && !uh->check)
+			zero_csum = true;
+	} else {
+		priv->stats.coal.coal_trans_invalid++;
+		return;
+	}
+
+	coal_desc->hdrs_valid = 1;
+	coal_desc->coal_bytes = coal_desc->len;
+	rmnet_descriptor_for_each_frag(frag, coal_desc)
+		coal_desc->coal_bufsize +=
+			page_size(skb_frag_page(&frag->frag));
+
+	if (rmnet_map_v5_csum_buggy(&coal_hdr) && !zero_csum) {
+		/* Mark the checksum as valid if it checks out */
+		if (rmnet_frag_validate_csum(coal_desc))
+			coal_desc->csum_valid = true;
+
+		coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
+		coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
+		coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
+		list_add_tail(&coal_desc->list, list);
+		return;
+	}
+
+	/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
+	 * no checksum errors, and are allowing GRO. We can just reuse this
+	 * descriptor unchanged.
+	 */
+	if (gro && coal_hdr.num_nlos == 1 && coal_hdr.csum_valid) {
+		coal_desc->csum_valid = true;
+		coal_desc->gso_size = ntohs(coal_hdr.nl_pairs[0].pkt_len);
+		coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
+		coal_desc->gso_segs = coal_hdr.nl_pairs[0].num_packets;
+		list_add_tail(&coal_desc->list, list);
+		return;
+	}
+
+	/* Segment the coalesced descriptor into new packets */
+	for (nlo = 0; nlo < coal_hdr.num_nlos; nlo++) {
+		pkt_len = ntohs(coal_hdr.nl_pairs[nlo].pkt_len);
+		pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
+		coal_desc->gso_size = pkt_len;
+		for (pkt = 0; pkt < coal_hdr.nl_pairs[nlo].num_packets;
+		     pkt++, total_pkt++, nlo_err_mask >>= 1) {
+			bool csum_err = nlo_err_mask & 1;
+
+			/* Segment the packet if we're not sending the larger
+			 * packet up the stack.
+			 */
+			if (!gro) {
+				coal_desc->gso_segs = 1;
+				if (csum_err)
+					priv->stats.coal.coal_csum_err++;
+
+				__rmnet_frag_segment_data(coal_desc, port,
+							  list, total_pkt,
+							  !csum_err);
+				continue;
+			}
+
+			if (csum_err) {
+				priv->stats.coal.coal_csum_err++;
+
+				/* Segment out the good data */
+				if (coal_desc->gso_segs)
+					__rmnet_frag_segment_data(coal_desc,
+								  port,
+								  list,
+								  total_pkt,
+								  true);
+
+				/* Segment out the bad checksum */
+				coal_desc->gso_segs = 1;
+				__rmnet_frag_segment_data(coal_desc, port,
+							  list, total_pkt,
+							  false);
+			} else {
+				coal_desc->gso_segs++;
+			}
+		}
+
+		/* If we're switching NLOs, we need to send out everything from
+		 * the previous one, if we haven't done so. NLOs only switch
+		 * when the packet length changes.
+		 */
+		if (coal_desc->gso_segs)
+			__rmnet_frag_segment_data(coal_desc, port, list,
+						  total_pkt, true);
+	}
+}
+
+/* Record reason for coalescing pipe closure */
+static void rmnet_frag_data_log_close_stats(struct rmnet_priv *priv, u8 type,
+					    u8 code)
+{
+	struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
+
+	switch (type) {
+	case RMNET_MAP_COAL_CLOSE_NON_COAL:
+		stats->non_coal++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_IP_MISS:
+		stats->ip_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
+		stats->trans_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_HW:
+		switch (code) {
+		case RMNET_MAP_COAL_CLOSE_HW_NL:
+			stats->hw_nl++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_PKT:
+			stats->hw_pkt++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
+			stats->hw_byte++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_TIME:
+			stats->hw_time++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_EVICT:
+			stats->hw_evict++;
+			break;
+		default:
+			break;
+		}
+		break;
+	case RMNET_MAP_COAL_CLOSE_COAL:
+		stats->coal++;
+		break;
+	default:
+		break;
+	}
+}
+
+/* Check if the coalesced header has any incorrect values, in which case, the
+ * entire coalesced frame must be dropped. Then check if there are any
+ * checksum issues
+ */
+static int
+rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
+				  u64 *nlo_err_mask)
+{
+	struct rmnet_map_v5_coal_header *coal_hdr, __coal_hdr;
+	struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
+	u64 mask = 0;
+	int i;
+	u8 veid, pkts = 0;
+
+	coal_hdr = rmnet_frag_header_ptr(frag_desc,
+					 sizeof(struct rmnet_map_header),
+					 sizeof(*coal_hdr), &__coal_hdr);
+	if (!coal_hdr)
+		return -EINVAL;
+
+	veid = coal_hdr->virtual_channel_id;
+
+	if (coal_hdr->num_nlos == 0 ||
+	    coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
+		priv->stats.coal.coal_hdr_nlo_err++;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
+		/* If there is a checksum issue, we need to split
+		 * up the skb. Rebuild the full csum error field
+		 */
+		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
+		u8 pkt = coal_hdr->nl_pairs[i].num_packets;
+
+		mask |= ((u64)err) << (8 * i);
+
+		/* Track total packets in frame */
+		pkts += pkt;
+		if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
+			priv->stats.coal.coal_hdr_pkt_err++;
+			return -EINVAL;
+		}
+	}
+
+	/* Track number of packets we get inside of coalesced frames */
+	priv->stats.coal.coal_pkts += pkts;
+
+	/* Update ethtool stats */
+	rmnet_frag_data_log_close_stats(priv,
+					coal_hdr->close_type,
+					coal_hdr->close_value);
+	if (veid < RMNET_MAX_VEID)
+		priv->stats.coal.coal_veid[veid]++;
+
+	*nlo_err_mask = mask;
+
+	return 0;
+}
+
+static int rmnet_frag_checksum_pkt(struct rmnet_frag_descriptor *frag_desc)
+{
+	struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
+	struct rmnet_fragment *frag;
+	int offset = sizeof(struct rmnet_map_header) +
+		     sizeof(struct rmnet_map_v5_csum_header);
+	u8 *version, __version;
+	__wsum csum;
+	u16 csum_len;
+
+	version = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*version),
+					&__version);
+	if (!version)
+		return -EINVAL;
+
+	if ((*version & 0xF0) == 0x40) {
+		struct iphdr *iph;
+		u8 __iph[60]; /* Max IP header size (0xF * 4) */
+
+		/* We need to access the entire IP header including options
+		 * to validate its checksum. Fortunately, the version byte
+		 * also will tell us the length, so we only need to pull
+		 * once ;)
+		 */
+		frag_desc->ip_len = (*version & 0xF) * 4;
+		iph = rmnet_frag_header_ptr(frag_desc, offset,
+					    frag_desc->ip_len,
+					    __iph);
+		if (!iph || ip_is_fragment(iph))
+			return -EINVAL;
+
+		/* Length needs to be sensible */
+		csum_len = ntohs(iph->tot_len);
+		if (csum_len > frag_desc->len - offset)
+			return -EINVAL;
+
+		csum_len -= frag_desc->ip_len;
+		/* IPv4 checksum must be valid */
+		if (ip_fast_csum((u8 *)iph, iph->ihl)) {
+			priv->stats.csum_sw++;
+			return 0;
+		}
+
+		frag_desc->ip_proto = 4;
+		frag_desc->trans_proto = iph->protocol;
+		csum = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					  csum_len,
+					  frag_desc->trans_proto, 0);
+	} else if ((*version & 0xF0) == 0x60) {
+		struct ipv6hdr *ip6h, __ip6h;
+		int ip_len;
+		__be16 frag_off;
+		bool frag_hdr;
+		u8 protocol;
+
+		ip6h = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*ip6h),
+					     &__ip6h);
+		if (!ip6h)
+			return -EINVAL;
+
+		frag_desc->ip_proto = 6;
+		protocol = ip6h->nexthdr;
+		ip_len = rmnet_frag_ipv6_skip_exthdr(frag_desc,
+						     offset + sizeof(*ip6h),
+						     &protocol, &frag_off,
+						     &frag_hdr);
+		if (ip_len < 0 || frag_off)
+			return -EINVAL;
+
+		/* Length needs to be sensible */
+		frag_desc->ip_len = (u16)ip_len;
+		csum_len = ntohs(ip6h->payload_len);
+		if (csum_len + frag_desc->ip_len > frag_desc->len - offset)
+			return -EINVAL;
+
+		frag_desc->trans_proto = protocol;
+		csum = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					csum_len,
+					frag_desc->trans_proto, 0);
+	} else {
+		/* Not checksumable */
+		return -EINVAL;
+	}
+
+	/* Protocol check */
+	if (frag_desc->trans_proto != IPPROTO_TCP &&
+	    frag_desc->trans_proto != IPPROTO_UDP)
+		return -EINVAL;
+
+	offset += frag_desc->ip_len;
+	/* Check for UDP zero csum packets */
+	if (frag_desc->trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh, __uh;
+
+		uh = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*uh),
+					   &__uh);
+		if (!uh)
+			return -EINVAL;
+
+		if (!uh->check) {
+			if (frag_desc->ip_proto == 4) {
+				/* Zero checksum is valid */
+				priv->stats.csum_sw++;
+				return 1;
+			}
+
+			/* Not valid in IPv6 */
+			priv->stats.csum_sw++;
+			return 0;
+		}
+	}
+
+	/* Walk the frags and checksum each chunk */
+	list_for_each_entry(frag, &frag_desc->frags, list) {
+		u32 frag_size = skb_frag_size(&frag->frag);
+
+		if (!csum_len)
+			break;
+
+		if (offset < frag_size) {
+			void *addr = skb_frag_address(&frag->frag) + offset;
+			u32 len = min_t(u32, csum_len, frag_size - offset);
+
+			/* Checksum 'len' bytes and add them in */
+			csum = csum_partial(addr, len, csum);
+			csum_len -= len;
+			offset = 0;
+		} else {
+			offset -= frag_size;
+		}
+	}
+
+	priv->stats.csum_sw++;
+	return !csum_fold(csum);
+}
+
+/* Process a QMAPv5 packet header */
+int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port,
+				       struct list_head *list,
+				       u16 len)
+{
+	struct rmnet_map_v5_csum_header *csum_hdr, __csum_hdr;
+	struct rmnet_priv *priv = netdev_priv(frag_desc->dev);
+	u64 nlo_err_mask;
+	u32 offset = sizeof(struct rmnet_map_header);
+	int rc = 0;
+
+	/* Grab the header type. It's easier to grab enough for a full csum
+	 * offload header here since it's only 8 bytes and then check the
+	 * header type using that. This also doubles as a check to make sure
+	 * there's enough data after the QMAP header to ensure that another
+	 * header is present.
+	 */
+	csum_hdr = rmnet_frag_header_ptr(frag_desc, offset, sizeof(*csum_hdr),
+					 &__csum_hdr);
+	if (!csum_hdr)
+		return -EINVAL;
+
+	switch (csum_hdr->header_type) {
+	case RMNET_MAP_HEADER_TYPE_COALESCING:
+		priv->stats.coal.coal_rx++;
+		rc = rmnet_frag_data_check_coal_header(frag_desc,
+						       &nlo_err_mask);
+		if (rc)
+			return rc;
+
+		rmnet_frag_segment_coal_data(frag_desc, nlo_err_mask, port,
+					     list);
+		if (list_first_entry(list, struct rmnet_frag_descriptor,
+				     list) != frag_desc)
+			rmnet_recycle_frag_descriptor(frag_desc, port);
+		break;
+	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+		if (unlikely(!(frag_desc->dev->features & NETIF_F_RXCSUM))) {
+			priv->stats.csum_sw++;
+		} else if (csum_hdr->csum_valid_required) {
+			priv->stats.csum_ok++;
+			frag_desc->csum_valid = true;
+		} else {
+			int valid = rmnet_frag_checksum_pkt(frag_desc);
+
+			if (valid < 0) {
+				priv->stats.csum_validation_failed++;
+			} else if (valid) {
+				/* All's good */
+				priv->stats.csum_ok++;
+				frag_desc->csum_valid = true;
+			} else {
+				/* Checksum is actually bad */
+				priv->stats.csum_valid_unset++;
+			}
+		}
+
+		if (!rmnet_frag_pull(frag_desc, port,
+				     offset + sizeof(*csum_hdr))) {
+			rc = -EINVAL;
+			break;
+		}
+
+		/* Remove padding only for csum offload packets.
+		 * Coalesced packets should never have padding.
+		 */
+		if (!rmnet_frag_trim(frag_desc, port, len)) {
+			rc = -EINVAL;
+			break;
+		}
+
+		list_del_init(&frag_desc->list);
+		list_add_tail(&frag_desc->list, list);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/* Perf hook handler */
+rmnet_perf_desc_hook_t rmnet_perf_desc_entry __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_desc_entry);
+
+static void
+__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
+			     struct rmnet_port *port)
+{
+	rmnet_perf_desc_hook_t rmnet_perf_ingress;
+	struct rmnet_map_header *qmap, __qmap;
+	struct rmnet_endpoint *ep;
+	struct rmnet_frag_descriptor *frag, *tmp;
+	LIST_HEAD(segs);
+	u16 len, pad;
+	u8 mux_id;
+	bool skip_perf = (frag_desc->priority == 0xda1a);
+
+	qmap = rmnet_frag_header_ptr(frag_desc, 0, sizeof(*qmap), &__qmap);
+	if (!qmap)
+		goto recycle;
+
+	mux_id = qmap->mux_id;
+	pad = qmap->pad_len;
+	len = ntohs(qmap->pkt_len) - pad;
+
+	if (qmap->cd_bit) {
+		qmi_rmnet_set_dl_msg_active(port);
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			rmnet_frag_flow_command(frag_desc, port, len);
+			goto recycle;
+		}
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
+			rmnet_frag_command(frag_desc, qmap, port);
+
+		goto recycle;
+	}
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		goto recycle;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		goto recycle;
+
+	frag_desc->dev = ep->egress_dev;
+
+	/* Handle QMAPv5 packet */
+	if (qmap->next_hdr &&
+	    (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
+				  RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
+		if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
+						       len))
+			goto recycle;
+	} else {
+		/* We only have the main QMAP header to worry about */
+		if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
+			return;
+
+		if (!rmnet_frag_trim(frag_desc, port, len))
+			return;
+
+		list_add_tail(&frag_desc->list, &segs);
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port,
+			list_first_entry_or_null(&segs,
+						 struct rmnet_frag_descriptor,
+						 list),
+			NULL);
+
+	if (skip_perf)
+		goto no_perf;
+
+	rcu_read_lock();
+	rmnet_perf_ingress = rcu_dereference(rmnet_perf_desc_entry);
+	if (rmnet_perf_ingress) {
+		list_for_each_entry_safe(frag, tmp, &segs, list) {
+			list_del_init(&frag->list);
+			rmnet_perf_ingress(frag, port);
+		}
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+no_perf:
+	list_for_each_entry_safe(frag, tmp, &segs, list) {
+		list_del_init(&frag->list);
+		rmnet_frag_deliver(frag, port);
+	}
+	return;
+
+recycle:
+	rmnet_recycle_frag_descriptor(frag_desc, port);
+}
+
+/* Notify perf at the end of SKB chain */
+rmnet_perf_chain_hook_t rmnet_perf_chain_end __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_chain_end);
+
+void rmnet_descriptor_classify_chain_count(u64 chain_count,
+					   struct rmnet_port *port)
+{
+	u64 index;
+
+	if (chain_count >= 60) {
+		port->stats.dl_chain_stat[6] += chain_count;
+		return;
+	}
+
+	index = chain_count;
+	do_div(index, 10);
+	port->stats.dl_chain_stat[index] += chain_count;
+}
+
+void rmnet_descriptor_classify_frag_count(u64 frag_count,
+					  struct rmnet_port *port)
+{
+	u64 index;
+
+	if (frag_count <= 1) {
+		port->stats.dl_frag_stat_1 += frag_count;
+		return;
+	}
+
+	if (frag_count >= 16) {
+		port->stats.dl_frag_stat[4] += frag_count;
+		return;
+	}
+
+	index = frag_count;
+	do_div(index, 4);
+	port->stats.dl_frag_stat[index] += frag_count;
+}
+
+void rmnet_frag_ingress_handler(struct sk_buff *skb,
+				struct rmnet_port *port)
+{
+	rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end;
+	LIST_HEAD(desc_list);
+	bool skip_perf = (skb->priority == 0xda1a);
+	u64 chain_count = 0;
+	struct sk_buff *head = skb;
+
+	/* Deaggregation and freeing of HW originating
+	 * buffers is done within here
+	 */
+	while (skb) {
+		struct sk_buff *skb_frag;
+
+		chain_count++;
+		rmnet_descriptor_classify_frag_count(skb_shinfo(skb)->nr_frags,
+						     port);
+
+		rmnet_frag_deaggregate(skb, port, &desc_list, skb->priority);
+		if (!list_empty(&desc_list)) {
+			struct rmnet_frag_descriptor *frag_desc, *tmp;
+
+			list_for_each_entry_safe(frag_desc, tmp, &desc_list,
+						 list) {
+				list_del_init(&frag_desc->list);
+				__rmnet_frag_ingress_handler(frag_desc, port);
+			}
+		}
+
+		if (skb == head) {
+			skb_frag = skb_shinfo(skb)->frag_list;
+			skb_shinfo(skb)->frag_list = NULL;
+		} else {
+			skb_frag = skb->next;
+			skb->next = NULL;
+		}
+
+		consume_skb(skb);
+		skb = skb_frag;
+	}
+
+	rmnet_descriptor_classify_chain_count(chain_count, port);
+
+	if (skip_perf)
+		return;
+
+	rcu_read_lock();
+	rmnet_perf_opt_chain_end = rcu_dereference(rmnet_perf_chain_end);
+	if (rmnet_perf_opt_chain_end)
+		rmnet_perf_opt_chain_end();
+	rcu_read_unlock();
+}
+
+void rmnet_descriptor_deinit(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool;
+	struct rmnet_frag_descriptor *frag_desc, *tmp;
+
+	pool = port->frag_desc_pool;
+	if (pool) {
+		list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
+			kfree(frag_desc);
+			pool->pool_size--;
+		}
+
+		kfree(pool);
+		port->frag_desc_pool = NULL;
+	}
+}
+
+int rmnet_descriptor_init(struct rmnet_port *port)
+{
+	struct rmnet_frag_descriptor_pool *pool;
+	int i;
+
+	spin_lock_init(&port->desc_pool_lock);
+	pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
+	if (!pool)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pool->free_list);
+	port->frag_desc_pool = pool;
+
+	for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
+		struct rmnet_frag_descriptor *frag_desc;
+
+		frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
+		if (!frag_desc)
+			return -ENOMEM;
+
+		INIT_LIST_HEAD(&frag_desc->list);
+		INIT_LIST_HEAD(&frag_desc->frags);
+		list_add_tail(&frag_desc->list, &pool->free_list);
+		pool->pool_size++;
+	}
+
+	return 0;
+}

+ 119 - 0
qcom/opensource/datarmnet/core/rmnet_descriptor.h

@@ -0,0 +1,119 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Packet Descriptor Framework
+ *
+ */
+
+#ifndef _RMNET_DESCRIPTOR_H_
+#define _RMNET_DESCRIPTOR_H_
+
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+
+struct rmnet_frag_descriptor_pool {
+	struct list_head free_list;
+	u32 pool_size;
+};
+
+struct rmnet_fragment {
+	struct list_head list;
+	skb_frag_t frag;
+};
+
+struct rmnet_frag_descriptor {
+	struct list_head list;
+	struct list_head frags;
+	struct net_device *dev;
+	u32 coal_bufsize;
+	u32 coal_bytes;
+	u32 len;
+	u32 hash;
+	u32 priority;
+	__be32 tcp_seq;
+	__be16 ip_id;
+	__be16 tcp_flags;
+	u16 data_offset;
+	u16 gso_size;
+	u16 gso_segs;
+	u16 ip_len;
+	u16 trans_len;
+	u8 ip_proto;
+	u8 trans_proto;
+	u8 pkt_id;
+	u8 csum_valid:1,
+	   hdrs_valid:1,
+	   ip_id_set:1,
+	   tcp_seq_set:1,
+	   flush_shs:1,
+	   tcp_flags_set:1,
+	   reserved:2;
+};
+
+/* Descriptor management */
+struct rmnet_frag_descriptor *
+rmnet_get_frag_descriptor(struct rmnet_port *port);
+void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
+				   struct rmnet_port *port);
+void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
+		      struct rmnet_port *port, unsigned int size);
+void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
+		      struct rmnet_port *port, unsigned int size);
+void *rmnet_frag_header_ptr(struct rmnet_frag_descriptor *frag_desc, u32 off,
+			    u32 len, void *buf);
+int rmnet_frag_descriptor_add_frag(struct rmnet_frag_descriptor *frag_desc,
+				   struct page *p, u32 page_offset, u32 len);
+int rmnet_frag_descriptor_add_frags_from(struct rmnet_frag_descriptor *to,
+					 struct rmnet_frag_descriptor *from,
+					 u32 off, u32 len);
+int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc,
+				int start, u8 *nexthdrp, __be16 *frag_offp,
+				bool *frag_hdrp);
+
+/* QMAP command packets */
+void rmnet_frag_command(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_map_header *qmap, struct rmnet_port *port);
+int rmnet_frag_flow_command(struct rmnet_frag_descriptor *frag_desc,
+			    struct rmnet_port *port, u16 pkt_len);
+
+/* Ingress data handlers */
+void rmnet_frag_deaggregate(struct sk_buff *skb, struct rmnet_port *port,
+			    struct list_head *list, u32 priority);
+void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
+			struct rmnet_port *port);
+int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
+				       struct rmnet_port *port,
+				       struct list_head *list,
+				       u16 len);
+void rmnet_frag_ingress_handler(struct sk_buff *skb,
+				struct rmnet_port *port);
+
+int rmnet_descriptor_init(struct rmnet_port *port);
+void rmnet_descriptor_deinit(struct rmnet_port *port);
+
+static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
+{
+	struct rmnet_fragment *frag;
+
+	frag = list_first_entry_or_null(&frag_desc->frags,
+					struct rmnet_fragment, list);
+
+	if (!frag)
+		return NULL;
+
+	return skb_frag_address(&frag->frag);
+}
+
+#endif /* _RMNET_DESCRIPTOR_H_ */

+ 463 - 0
qcom/opensource/datarmnet/core/rmnet_genl.c

@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Generic Netlink
+ *
+ */
+
+#include "rmnet_genl.h"
+#include <net/sock.h>
+#include <linux/skbuff.h>
+#include <linux/ktime.h>
+
+#define RMNET_CORE_GENL_MAX_STR_LEN	255
+
+/* Static Functions and Definitions */
+static struct nla_policy rmnet_genl_attr_policy[RMNET_CORE_GENL_ATTR_MAX +
+						1] = {
+	[RMNET_CORE_GENL_ATTR_INT]  = { .type = NLA_S32 },
+	[RMNET_CORE_GENL_ATTR_PID_BPS] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_bps_resp)),
+	[RMNET_CORE_GENL_ATTR_PID_BOOST] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_pid_boost_req)),
+	[RMNET_CORE_GENL_ATTR_TETHER_INFO] = NLA_POLICY_EXACT_LEN(sizeof(struct rmnet_core_tether_info_req)),
+	[RMNET_CORE_GENL_ATTR_STR]  = { .type = NLA_NUL_STRING, .len =
+				RMNET_CORE_GENL_MAX_STR_LEN },
+};
+
+#define RMNET_CORE_GENL_OP(_cmd, _func)			\
+	{						\
+		.cmd	= _cmd,				\
+		.doit	= _func,			\
+		.dumpit	= NULL,				\
+		.flags	= 0,				\
+	}
+
+static const struct genl_ops rmnet_core_genl_ops[] = {
+	RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BPS_REQ,
+			   rmnet_core_genl_pid_bps_req_hdlr),
+	RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
+			   rmnet_core_genl_pid_boost_req_hdlr),
+	RMNET_CORE_GENL_OP(RMNET_CORE_GENL_CMD_TETHER_INFO_REQ,
+			   rmnet_core_genl_tether_info_req_hdlr),
+};
+
+struct genl_family rmnet_core_genl_family = {
+	.hdrsize = 0,
+	.name    = RMNET_CORE_GENL_FAMILY_NAME,
+	.version = RMNET_CORE_GENL_VERSION,
+	.maxattr = RMNET_CORE_GENL_ATTR_MAX,
+	.policy  = rmnet_genl_attr_policy,
+	.ops     = rmnet_core_genl_ops,
+	.n_ops   = ARRAY_SIZE(rmnet_core_genl_ops),
+};
+
+#define RMNET_PID_STATS_HT_SIZE (8)
+#define RMNET_PID_STATS_HT rmnet_pid_ht
+DEFINE_HASHTABLE(rmnet_pid_ht, RMNET_PID_STATS_HT_SIZE);
+
+/* Spinlock definition for pid hash table */
+static DEFINE_SPINLOCK(rmnet_pid_ht_splock);
+
+#define RMNET_GENL_SEC_TO_MSEC(x)   ((x) * 1000)
+#define RMNET_GENL_SEC_TO_NSEC(x)   ((x) * 1000000000)
+#define RMNET_GENL_BYTES_TO_BITS(x) ((x) * 8)
+#define RMNET_GENL_NSEC_TO_SEC(x) ({\
+	u64 __quotient = (x); \
+	do_div(__quotient, 1000000000); \
+	__quotient; \
+})
+
+int rmnet_core_userspace_connected;
+#define RMNET_QUERY_PERIOD_SEC (1) /* Period of pid/bps queries */
+
+struct rmnet_pid_node_s {
+	struct hlist_node list;
+	ktime_t timstamp_last_query;
+	u64 tx_bytes;
+	u64 tx_bytes_last_query;
+	u64 tx_bps;
+	u64 sched_boost_period_ms;
+	int sched_boost_remaining_ms;
+	int sched_boost_enable;
+	pid_t pid;
+};
+
+typedef void (*rmnet_perf_tether_cmd_hook_t)(u8 message, u64 val);
+rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd_hook __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_tether_cmd_hook);
+
+void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
+				      int *boost_enable, u64 *boost_period)
+{
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+	u8 is_match_found = 0;
+	u64 tx_bytes = 0;
+
+	*boost_enable = 0;
+	*boost_period = 0;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
+					    list, pid) {
+			if (pid != node_p->pid)
+				continue;
+
+			/* PID Match found */
+			is_match_found = 1;
+			node_p->tx_bytes += len;
+			tx_bytes = node_p->tx_bytes;
+
+			if (node_p->sched_boost_enable) {
+				rm_err("boost triggered for pid %d",
+				       pid);
+				/* Just triggered boost, dont re-trigger */
+				node_p->sched_boost_enable = 0;
+				*boost_enable = 1;
+				*boost_period = node_p->sched_boost_period_ms;
+				node_p->sched_boost_remaining_ms =
+							(int)*boost_period;
+			}
+
+			break;
+		}
+
+		if (is_match_found)
+			break;
+
+		/* No PID match */
+		node_p = kzalloc(sizeof(*node_p), GFP_ATOMIC);
+		if (!node_p)
+			break;
+
+		node_p->pid = pid;
+		node_p->tx_bytes = len;
+		node_p->sched_boost_enable = 0;
+		node_p->sched_boost_period_ms = 0;
+		node_p->sched_boost_remaining_ms = 0;
+		hash_add_rcu(RMNET_PID_STATS_HT, &node_p->list, pid);
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+}
+
+void rmnet_boost_for_pid(pid_t pid, int boost_enable,
+			 u64 boost_period)
+{
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		hash_for_each_possible_safe(RMNET_PID_STATS_HT, node_p, tmp,
+					    list, pid) {
+			if (pid != node_p->pid)
+				continue;
+
+			/* PID Match found */
+			rm_err("CORE_BOOST: enable boost for pid %d for %llu ms",
+			       pid, boost_period);
+			node_p->sched_boost_enable = boost_enable;
+			node_p->sched_boost_period_ms = boost_period;
+			break;
+		}
+
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+}
+
+static void rmnet_create_pid_bps_resp(struct rmnet_core_pid_bps_resp
+				      *pid_bps_resp_ptr)
+{
+	struct timespec64 time;
+	struct hlist_node *tmp;
+	struct rmnet_pid_node_s *node_p;
+	unsigned long ht_flags;
+	u64 tx_bytes_cur, byte_diff, time_diff_ns, tmp_bits;
+	int i;
+	u16 bkt;
+
+	ktime_get_real_ts64(&time);
+	pid_bps_resp_ptr->timestamp = RMNET_GENL_SEC_TO_NSEC(time.tv_sec) +
+		   time.tv_nsec;
+
+	/*  Using do while to spin lock and unlock only once */
+	spin_lock_irqsave(&rmnet_pid_ht_splock, ht_flags);
+	do {
+		i = 0;
+
+		hash_for_each_safe(RMNET_PID_STATS_HT, bkt, tmp,
+				   node_p, list) {
+			tx_bytes_cur = node_p->tx_bytes;
+			if (tx_bytes_cur <= node_p->tx_bytes_last_query) {
+				/* Dont send inactive pids to userspace */
+				hash_del(&node_p->list);
+				kfree(node_p);
+				continue;
+			}
+
+			/* Compute bits per second */
+			byte_diff = (node_p->tx_bytes -
+				     node_p->tx_bytes_last_query);
+			time_diff_ns = (pid_bps_resp_ptr->timestamp -
+					node_p->timstamp_last_query);
+
+			tmp_bits = RMNET_GENL_BYTES_TO_BITS(byte_diff);
+			/* Note that do_div returns remainder and the */
+			/* numerator gets assigned the quotient */
+			/* Since do_div takes the numerator as a reference, */
+			/* a tmp_bits is used*/
+			do_div(tmp_bits, RMNET_GENL_NSEC_TO_SEC(time_diff_ns));
+			node_p->tx_bps = tmp_bits;
+
+			if (node_p->sched_boost_remaining_ms >=
+			    RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC)) {
+				node_p->sched_boost_remaining_ms -=
+				RMNET_GENL_SEC_TO_MSEC(RMNET_QUERY_PERIOD_SEC);
+
+				rm_err("CORE_BOOST: enabling boost for pid %d\n"
+				       "sched boost remaining = %d ms",
+				       node_p->pid,
+				       node_p->sched_boost_remaining_ms);
+			} else {
+				node_p->sched_boost_remaining_ms = 0;
+			}
+
+			pid_bps_resp_ptr->list[i].pid = node_p->pid;
+			pid_bps_resp_ptr->list[i].tx_bps = node_p->tx_bps;
+			pid_bps_resp_ptr->list[i].boost_remaining_ms =
+					node_p->sched_boost_remaining_ms;
+
+			node_p->timstamp_last_query =
+				pid_bps_resp_ptr->timestamp;
+			node_p->tx_bytes_last_query = tx_bytes_cur;
+			i++;
+
+			/* Support copying up to 32 active pids */
+			if (i >= RMNET_CORE_GENL_MAX_PIDS)
+				break;
+		}
+		break;
+	} while (0);
+	spin_unlock_irqrestore(&rmnet_pid_ht_splock, ht_flags);
+
+	pid_bps_resp_ptr->list_len = i;
+}
+
+int rmnet_core_genl_send_resp(struct genl_info *info,
+			      struct rmnet_core_pid_bps_resp *pid_bps_resp)
+{
+	struct sk_buff *skb;
+	void *msg_head;
+	int rc;
+
+	if (!info || !pid_bps_resp) {
+		rm_err("%s", "SHS_GNL: Invalid params\n");
+		goto out;
+	}
+
+	skb = genlmsg_new(sizeof(struct rmnet_core_pid_bps_resp), GFP_KERNEL);
+	if (!skb)
+		goto out;
+
+	msg_head = genlmsg_put(skb, 0, info->snd_seq + 1,
+			       &rmnet_core_genl_family,
+			       0, RMNET_CORE_GENL_CMD_PID_BPS_REQ);
+	if (!msg_head) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	rc = nla_put(skb, RMNET_CORE_GENL_ATTR_PID_BPS,
+		     sizeof(struct rmnet_core_pid_bps_resp),
+		     pid_bps_resp);
+	if (rc != 0)
+		goto out;
+
+	genlmsg_end(skb, msg_head);
+
+	rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+	if (rc != 0)
+		goto out;
+
+	rm_err("%s", "SHS_GNL: Successfully sent pid/bytes info\n");
+	return RMNET_GENL_SUCCESS;
+
+out:
+	/* TODO: Need to free skb?? */
+	rm_err("%s", "SHS_GNL: FAILED to send pid/bytes info\n");
+	rmnet_core_userspace_connected = 0;
+	return RMNET_GENL_FAILURE;
+}
+
+int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
+				     struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_core_pid_bps_req  pid_bps_req;
+	struct rmnet_core_pid_bps_resp pid_bps_resp;
+	int is_req_valid = 0;
+
+	rm_err("CORE_GNL: %s connected = %d", __func__,
+	       rmnet_core_userspace_connected);
+
+	if (!info) {
+		rm_err("%s", "CORE_GNL: error - info is null");
+		pid_bps_resp.valid = 0;
+	} else {
+		na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BPS];
+		if (na) {
+			if (nla_memcpy(&pid_bps_req, na,
+				       sizeof(pid_bps_req)) > 0) {
+				is_req_valid = 1;
+			} else {
+				rm_err("CORE_GNL: nla_memcpy failed %d\n",
+				       RMNET_CORE_GENL_ATTR_PID_BPS);
+			}
+		} else {
+			rm_err("CORE_GNL: no info->attrs %d\n",
+			       RMNET_CORE_GENL_ATTR_PID_BPS);
+		}
+	}
+
+	if (!rmnet_core_userspace_connected)
+		rmnet_core_userspace_connected = 1;
+
+	/* Copy to pid/byte list to the payload */
+	memset(&pid_bps_resp, 0x0,
+	       sizeof(pid_bps_resp));
+	if (is_req_valid) {
+		rmnet_create_pid_bps_resp(&pid_bps_resp);
+	}
+	pid_bps_resp.valid = 1;
+
+	rmnet_core_genl_send_resp(info, &pid_bps_resp);
+
+	return RMNET_GENL_SUCCESS;
+}
+
+int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
+				       struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_core_pid_boost_req pid_boost_req;
+	int is_req_valid = 0;
+	u16 boost_pid_cnt = RMNET_CORE_GENL_MAX_PIDS;
+	u16 i = 0;
+
+	rm_err("CORE_GNL: %s", __func__);
+
+	if (!info) {
+		rm_err("%s", "CORE_GNL: error - info is null");
+		return RMNET_GENL_FAILURE;
+	}
+
+	na = info->attrs[RMNET_CORE_GENL_ATTR_PID_BOOST];
+	if (na) {
+		if (nla_memcpy(&pid_boost_req, na, sizeof(pid_boost_req)) > 0) {
+			is_req_valid = 1;
+		} else {
+			rm_err("CORE_GNL: nla_memcpy failed %d\n",
+			       RMNET_CORE_GENL_ATTR_PID_BOOST);
+			return RMNET_GENL_FAILURE;
+		}
+	} else {
+		rm_err("CORE_GNL: no info->attrs %d\n",
+		       RMNET_CORE_GENL_ATTR_PID_BOOST);
+		return RMNET_GENL_FAILURE;
+	}
+
+	if (pid_boost_req.list_len < RMNET_CORE_GENL_MAX_PIDS)
+		boost_pid_cnt = pid_boost_req.list_len;
+
+	if (!pid_boost_req.valid)
+		boost_pid_cnt = 0;
+
+	for (i = 0; i < boost_pid_cnt; i++) {
+		if (pid_boost_req.list[i].boost_enabled) {
+			rmnet_boost_for_pid(pid_boost_req.list[i].pid, 1,
+					    pid_boost_req.list[i].boost_period);
+		}
+	}
+
+	return RMNET_GENL_SUCCESS;
+}
+
+int rmnet_core_genl_tether_info_req_hdlr(struct sk_buff *skb_2,
+				       struct genl_info *info)
+{
+	struct nlattr *na;
+	struct rmnet_core_tether_info_req tether_info_req;
+	int is_req_valid = 0;
+	rmnet_perf_tether_cmd_hook_t rmnet_perf_tether_cmd;
+
+	rm_err("CORE_GNL: %s", __func__);
+
+	if (!info) {
+		rm_err("%s", "CORE_GNL: error - info is null");
+		return RMNET_GENL_FAILURE;
+	}
+
+	na = info->attrs[RMNET_CORE_GENL_ATTR_TETHER_INFO];
+	if (na) {
+		if (nla_memcpy(&tether_info_req, na, sizeof(tether_info_req)) > 0) {
+			is_req_valid = 1;
+		} else {
+			rm_err("CORE_GNL: nla_memcpy failed %d\n",
+			       RMNET_CORE_GENL_ATTR_TETHER_INFO);
+			return RMNET_GENL_FAILURE;
+		}
+	} else {
+		rm_err("CORE_GNL: no info->attrs %d\n",
+		       RMNET_CORE_GENL_ATTR_TETHER_INFO);
+		return RMNET_GENL_FAILURE;
+	}
+
+	if (!tether_info_req.valid) {
+		rm_err("%s", "CORE_GNL: tether info req is invalid");
+		return RMNET_GENL_FAILURE;
+	}
+
+	rmnet_perf_tether_cmd = rcu_dereference(rmnet_perf_tether_cmd_hook);
+	if (rmnet_perf_tether_cmd)
+		rmnet_perf_tether_cmd(1, tether_info_req.tether_filters_en);
+
+	rm_err("CORE_GNL: tether filters %s",
+	       tether_info_req.tether_filters_en ? "enabled" : "disabled");
+
+	return RMNET_GENL_SUCCESS;
+}
+
+/* register new rmnet core driver generic netlink family */
+int rmnet_core_genl_init(void)
+{
+	int ret;
+
+	ret = genl_register_family(&rmnet_core_genl_family);
+	if (ret != 0) {
+		rm_err("CORE_GNL: register family failed: %i", ret);
+		genl_unregister_family(&rmnet_core_genl_family);
+		return RMNET_GENL_FAILURE;
+	}
+
+	rm_err("CORE_GNL: successfully registered generic netlink family: %s",
+	       RMNET_CORE_GENL_FAMILY_NAME);
+
+	return RMNET_GENL_SUCCESS;
+}
+
+/* Unregister the generic netlink family */
+int rmnet_core_genl_deinit(void)
+{
+	int ret;
+
+	ret = genl_unregister_family(&rmnet_core_genl_family);
+	if (ret != 0)
+		rm_err("CORE_GNL: unregister family failed: %i\n", ret);
+
+	return RMNET_GENL_SUCCESS;
+}

+ 107 - 0
qcom/opensource/datarmnet/core/rmnet_genl.h

@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Generic Netlink
+ *
+ */
+
+#ifndef _RMNET_GENL_H_
+#define _RMNET_GENL_H_
+
+#include <net/genetlink.h>
+
+#define RMNET_CORE_DEBUG 0
+
+#define rm_err(fmt, ...)  \
+	do { if (RMNET_CORE_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
+
+/* Generic Netlink Definitions */
+#define RMNET_CORE_GENL_VERSION 1
+#define RMNET_CORE_GENL_FAMILY_NAME "RMNET_CORE"
+
+#define RMNET_CORE_GENL_MAX_PIDS 32
+
+#define RMNET_GENL_SUCCESS (0)
+#define RMNET_GENL_FAILURE (-1)
+
+extern int rmnet_core_userspace_connected;
+
+enum {
+	RMNET_CORE_GENL_CMD_UNSPEC,
+	RMNET_CORE_GENL_CMD_PID_BPS_REQ,
+	RMNET_CORE_GENL_CMD_PID_BOOST_REQ,
+	RMNET_CORE_GENL_CMD_TETHER_INFO_REQ,
+	__RMNET_CORE_GENL_CMD_MAX,
+};
+
+enum {
+	RMNET_CORE_GENL_ATTR_UNSPEC,
+	RMNET_CORE_GENL_ATTR_STR,
+	RMNET_CORE_GENL_ATTR_INT,
+	RMNET_CORE_GENL_ATTR_PID_BPS,
+	RMNET_CORE_GENL_ATTR_PID_BOOST,
+	RMNET_CORE_GENL_ATTR_TETHER_INFO,
+	__RMNET_CORE_GENL_ATTR_MAX,
+};
+
+#define RMNET_CORE_GENL_ATTR_MAX (__RMNET_CORE_GENL_ATTR_MAX - 1)
+
+struct rmnet_core_pid_bps_info {
+	u64 tx_bps;
+	u32 pid;
+	u32 boost_remaining_ms;
+};
+
+struct rmnet_core_pid_boost_info {
+	u32 boost_enabled;
+	/* Boost period in ms */
+	u32 boost_period;
+	u32 pid;
+};
+
+struct rmnet_core_pid_bps_req {
+	struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+struct rmnet_core_pid_bps_resp {
+	struct rmnet_core_pid_bps_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+struct rmnet_core_pid_boost_req {
+	struct rmnet_core_pid_boost_info list[RMNET_CORE_GENL_MAX_PIDS];
+	u64 timestamp;
+	u16 list_len;
+	u8 valid;
+};
+
+/* Tether Info Request Structure */
+struct rmnet_core_tether_info_req {
+	uint8_t tether_filters_en;
+	uint8_t valid;
+};
+
+/* Function Prototypes */
+int rmnet_core_genl_pid_bps_req_hdlr(struct sk_buff *skb_2,
+				     struct genl_info *info);
+
+int rmnet_core_genl_pid_boost_req_hdlr(struct sk_buff *skb_2,
+				       struct genl_info *info);
+
+int rmnet_core_genl_tether_info_req_hdlr(struct sk_buff *skb_2,
+					 struct genl_info *info);
+
+/* Called by vnd select queue */
+void rmnet_update_pid_and_check_boost(pid_t pid, unsigned int len,
+				      int *boost_enable, u64 *boost_period);
+
+int rmnet_core_genl_init(void);
+
+int rmnet_core_genl_deinit(void);
+
+#endif /*_RMNET_CORE_GENL_H_*/

+ 551 - 0
qcom/opensource/datarmnet/core/rmnet_handlers.c

@@ -0,0 +1,551 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inet.h>
+#include <net/sock.h>
+#include <linux/tracepoint.h>
+#include "rmnet_private.h"
+#include "rmnet_config.h"
+#include "rmnet_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_handlers.h"
+#include "rmnet_descriptor.h"
+#include "rmnet_ll.h"
+#include "rmnet_module.h"
+
+
+#include "rmnet_qmi.h"
+#include "qmi_rmnet.h"
+
+#define RMNET_IP_VERSION_4 0x40
+#define RMNET_IP_VERSION_6 0x60
+#define CREATE_TRACE_POINTS
+#include "rmnet_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_update);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_reset);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_freq_boost);
+EXPORT_TRACEPOINT_SYMBOL(print_icmp_rx);
+
+
+
+/* Helper Functions */
+
+void rmnet_set_skb_proto(struct sk_buff *skb)
+{
+	switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
+	case RMNET_IP_VERSION_4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case RMNET_IP_VERSION_6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		skb->protocol = htons(ETH_P_MAP);
+		break;
+	}
+}
+EXPORT_SYMBOL(rmnet_set_skb_proto);
+
+bool (*rmnet_shs_slow_start_detect)(u32 hash_key) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_slow_start_detect);
+
+bool rmnet_slow_start_on(u32 hash_key)
+{
+	bool (*rmnet_shs_slow_start_on)(u32 hash_key);
+
+	rmnet_shs_slow_start_on = rcu_dereference(rmnet_shs_slow_start_detect);
+	if (rmnet_shs_slow_start_on)
+		return rmnet_shs_slow_start_on(hash_key);
+
+	return false;
+}
+EXPORT_SYMBOL(rmnet_slow_start_on);
+
+/* Shs hook handler */
+int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
+			   struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry);
+
+int (*rmnet_shs_switch)(struct sk_buff *skb,
+			   struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_switch);
+
+
+/* Shs hook handler for work queue*/
+int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
+			      struct rmnet_shs_clnt_s *cfg) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
+
+/* Generic handler */
+
+void
+rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
+{
+	int (*rmnet_shs_stamp)(struct sk_buff *skb,
+			       struct rmnet_shs_clnt_s *cfg);
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+			0xDEF, 0xDEF, (void *)skb, NULL);
+	skb_reset_network_header(skb);
+	rmnet_vnd_rx_fixup(skb->dev, skb->len);
+
+	skb->pkt_type = PACKET_HOST;
+	skb_set_mac_header(skb, 0);
+
+	/* Low latency packets use a different balancing scheme */
+	if (skb->priority == 0xda1a)
+		goto skip_shs;
+
+	rcu_read_lock();
+	rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
+	if (rmnet_shs_stamp) {
+		rmnet_shs_stamp(skb, &port->shs_cfg);
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+skip_shs:
+	if (rmnet_module_hook_shs_skb_ll_entry(NULL, skb, &port->shs_cfg))
+		return;
+
+	netif_receive_skb(skb);
+}
+EXPORT_SYMBOL(rmnet_deliver_skb);
+
+/* Important to note, port cannot be used here if it has gone stale */
+void
+rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+		     enum rmnet_packet_context ctx)
+{
+	int (*rmnet_shs_stamp)(struct sk_buff *skb,
+			       struct rmnet_shs_clnt_s *cfg);
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+			0xDEF, 0xDEF, (void *)skb, NULL);
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+	rmnet_vnd_rx_fixup(skb->dev, skb->len);
+
+	skb->pkt_type = PACKET_HOST;
+	skb_set_mac_header(skb, 0);
+
+	/* packets coming from work queue context due to packet flush timer
+	 * must go through the special workqueue path in SHS driver
+	 */
+	rcu_read_lock();
+	rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
+				   rcu_dereference(rmnet_shs_skb_entry_wq);
+	if (rmnet_shs_stamp) {
+		rmnet_shs_stamp(skb, &port->shs_cfg);
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+	if (ctx == RMNET_NET_RX_CTX)
+		netif_receive_skb(skb);
+	else
+		gro_cells_receive(&priv->gro_cells, skb);
+}
+EXPORT_SYMBOL(rmnet_deliver_skb_wq);
+
+/* Deliver a list of skbs after undoing coalescing */
+static void rmnet_deliver_skb_list(struct sk_buff_head *head,
+				   struct rmnet_port *port)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(head))) {
+		rmnet_set_skb_proto(skb);
+		rmnet_deliver_skb(skb, port);
+	}
+}
+
+/* MAP handler */
+
+static void
+__rmnet_map_ingress_handler(struct sk_buff *skb,
+			    struct rmnet_port *port)
+{
+	struct rmnet_map_header *qmap;
+	struct rmnet_endpoint *ep;
+	struct sk_buff_head list;
+	u16 len, pad;
+	u8 mux_id;
+
+	/* We don't need the spinlock since only we touch this */
+	__skb_queue_head_init(&list);
+
+	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
+	if (qmap->cd_bit) {
+		qmi_rmnet_set_dl_msg_active(port);
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			if (!rmnet_map_flow_command(skb, port, false))
+				return;
+		}
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
+			return rmnet_map_command(skb, port);
+
+		goto free_skb;
+	}
+
+	mux_id = qmap->mux_id;
+	pad = qmap->pad_len;
+	len = ntohs(qmap->pkt_len) - pad;
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP)
+		goto free_skb;
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep)
+		goto free_skb;
+
+	skb->dev = ep->egress_dev;
+
+	/* Handle QMAPv5 packet */
+	if (qmap->next_hdr &&
+	    (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
+				  RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5))) {
+		if (rmnet_map_process_next_hdr_packet(skb, &list, len))
+			goto free_skb;
+	} else {
+		/* We only have the main QMAP header to worry about */
+		pskb_pull(skb, sizeof(*qmap));
+
+		rmnet_set_skb_proto(skb);
+
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+			if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+		}
+
+		pskb_trim(skb, len);
+
+		/* Push the single packet onto the list */
+		__skb_queue_tail(&list, skb);
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port, NULL, skb_peek(&list));
+
+	rmnet_deliver_skb_list(&list, port);
+	return;
+
+free_skb:
+	kfree_skb(skb);
+}
+
+int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
+			     struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_deag_entry);
+
+static void
+rmnet_map_ingress_handler(struct sk_buff *skb,
+			  struct rmnet_port *port)
+{
+	struct sk_buff *skbn;
+	int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
+					   struct rmnet_port *port);
+
+	if (skb->dev->type == ARPHRD_ETHER) {
+		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, ETH_HLEN);
+	}
+
+	if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
+				 RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5)) {
+		if (skb_is_nonlinear(skb)) {
+			rmnet_frag_ingress_handler(skb, port);
+			return;
+		}
+	}
+
+	/* No aggregation. Pass the frame on as is */
+	if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
+		__rmnet_map_ingress_handler(skb, port);
+		return;
+	}
+
+	if (skb->priority == 0xda1a)
+		goto no_perf;
+
+	/* Pass off handling to rmnet_perf module, if present */
+	rcu_read_lock();
+	rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
+	if (rmnet_perf_core_deaggregate) {
+		rmnet_perf_core_deaggregate(skb, port);
+		rcu_read_unlock();
+		return;
+	}
+	rcu_read_unlock();
+
+no_perf:
+	/* Deaggregation and freeing of HW originating
+	 * buffers is done within here
+	 */
+	while (skb) {
+		struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
+
+		skb_shinfo(skb)->frag_list = NULL;
+		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
+			__rmnet_map_ingress_handler(skbn, port);
+
+			if (skbn == skb)
+				goto next_skb;
+		}
+
+		consume_skb(skb);
+next_skb:
+		skb = skb_frag;
+	}
+}
+
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+				    struct rmnet_port *port, u8 mux_id,
+				    struct net_device *orig_dev,
+				    bool low_latency)
+{
+	int required_headroom, additional_header_len, csum_type, tso = 0;
+	struct rmnet_map_header *map_header;
+	struct rmnet_aggregation_state *state;
+
+	additional_header_len = 0;
+	required_headroom = sizeof(struct rmnet_map_header);
+	csum_type = 0;
+
+	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
+		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
+	} else if ((port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5) ||
+		   (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)) {
+		additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
+		csum_type = RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5;
+	}
+
+	required_headroom += additional_header_len;
+
+	if (skb_headroom(skb) < required_headroom) {
+		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
+			return -ENOMEM;
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port, NULL, NULL);
+
+	state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
+				 RMNET_DEFAULT_AGG_STATE];
+
+	if (csum_type &&
+	    (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) &&
+	     skb_shinfo(skb)->gso_size) {
+		spin_lock_bh(&state->agg_lock);
+		rmnet_map_send_agg_skb(state);
+
+		if (rmnet_map_add_tso_header(skb, port, orig_dev))
+			return -EINVAL;
+		csum_type = 0;
+		tso = 1;
+	}
+
+	if (csum_type)
+		rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+						 csum_type);
+
+	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
+					      port);
+	if (!map_header)
+		return -ENOMEM;
+
+	map_header->mux_id = mux_id;
+
+	if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+		if (state->params.agg_count < 2 ||
+		    rmnet_map_tx_agg_skip(skb, required_headroom) || tso)
+			goto done;
+
+		rmnet_map_tx_aggregate(skb, port, low_latency);
+		return -EINPROGRESS;
+	}
+
+done:
+	skb->protocol = htons(ETH_P_MAP);
+	return 0;
+}
+
+static void
+rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
+{
+	if (bridge_dev) {
+		skb->dev = bridge_dev;
+		dev_queue_xmit(skb);
+	}
+}
+
+/* Ingress / Egress Entry Points */
+
+/* Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct rmnet_port *port;
+	struct net_device *dev;
+	struct rmnet_skb_cb *cb;
+	int (*rmnet_core_shs_switch)(struct sk_buff *skb,
+				     struct rmnet_shs_clnt_s *cfg);
+
+	if (!skb)
+		goto done;
+
+	if (skb->pkt_type == PACKET_LOOPBACK)
+		return RX_HANDLER_PASS;
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
+			0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+	dev = skb->dev;
+	port = rmnet_get_port(dev);
+	if (unlikely(!port)) {
+#if (KERNEL_VERSION(6, 0, 0) < LINUX_VERSION_CODE)
+		dev_core_stats_rx_nohandler_inc(skb->dev);
+#else
+		atomic_long_inc(&skb->dev->rx_nohandler);
+#endif
+		kfree_skb(skb);
+		goto done;
+	}
+
+	switch (port->rmnet_mode) {
+	case RMNET_EPMODE_VND:
+
+		rcu_read_lock();
+		rmnet_core_shs_switch = rcu_dereference(rmnet_shs_switch);
+		cb = RMNET_SKB_CB(skb);
+		if (rmnet_core_shs_switch && !cb->qmap_steer &&
+		    skb->priority != 0xda1a) {
+			cb->qmap_steer = 1;
+			rmnet_core_shs_switch(skb, &port->phy_shs_cfg);
+			rcu_read_unlock();
+			return RX_HANDLER_CONSUMED;
+		}
+		rcu_read_unlock();
+
+		rmnet_map_ingress_handler(skb, port);
+		break;
+	case RMNET_EPMODE_BRIDGE:
+		rmnet_bridge_handler(skb, port->bridge_ep);
+		break;
+	}
+
+done:
+	return RX_HANDLER_CONSUMED;
+}
+EXPORT_SYMBOL(rmnet_rx_handler);
+
+rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	rx_handler_result_t rc = RX_HANDLER_PASS;
+
+	rmnet_module_hook_wlan_ingress_rx_handler(&rc, pskb);
+	if (rc != RX_HANDLER_PASS)
+		return rc;
+
+	rmnet_module_hook_perf_ingress_rx_handler(skb);
+
+	return RX_HANDLER_PASS;
+}
+
+/* Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb, bool low_latency)
+{
+	struct net_device *orig_dev;
+	struct rmnet_port *port;
+	struct rmnet_priv *priv;
+	u8 mux_id;
+	int err;
+	u32 skb_len;
+
+	trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
+			0xDEF, (void *)skb, NULL);
+	sk_pacing_shift_update(skb->sk, 8);
+
+	orig_dev = skb->dev;
+	priv = netdev_priv(orig_dev);
+	skb->dev = priv->real_dev;
+	mux_id = priv->mux_id;
+
+	port = rmnet_get_port(skb->dev);
+	if (!port)
+		goto drop;
+
+	skb_len = skb->len;
+	err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev,
+				       low_latency);
+	if (err == -ENOMEM || err == -EINVAL) {
+		goto drop;
+	} else if (err == -EINPROGRESS) {
+		rmnet_vnd_tx_fixup(orig_dev, skb_len);
+		return;
+	}
+
+	rmnet_vnd_tx_fixup(orig_dev, skb_len);
+
+	if (low_latency) {
+		if (rmnet_ll_send_skb(skb)) {
+			/* Drop but no need to free. Above API handles that */
+			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+			return;
+		}
+	} else {
+		dev_queue_xmit(skb);
+	}
+
+	return;
+
+drop:
+	this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+	kfree_skb(skb);
+}

+ 37 - 0
qcom/opensource/datarmnet/core/rmnet_handlers.h

@@ -0,0 +1,37 @@
+/* Copyright (c) 2013, 2016-2017, 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#ifndef _RMNET_HANDLERS_H_
+#define _RMNET_HANDLERS_H_
+
+#include "rmnet_config.h"
+
+enum rmnet_packet_context {
+	RMNET_NET_RX_CTX,
+	RMNET_WQ_CTX,
+};
+
+void rmnet_egress_handler(struct sk_buff *skb, bool low_latency);
+void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+			  enum rmnet_packet_context ctx);
+void rmnet_set_skb_proto(struct sk_buff *skb);
+bool rmnet_slow_start_on(u32 hash_key);
+rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
+					       struct rmnet_port *port);
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb);
+#endif /* _RMNET_HANDLERS_H_ */

+ 159 - 0
qcom/opensource/datarmnet/core/rmnet_hook.h

@@ -0,0 +1,159 @@
+/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__RMNET_HOOKS__) || defined(__RMNET_HOOK_MULTIREAD__)
+#define __RMNET_HOOKS__
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include "rmnet_descriptor.h"
+
+RMNET_MODULE_HOOK(offload_ingress,
+	RMNET_MODULE_HOOK_NUM(OFFLOAD_INGRESS),
+	RMNET_MODULE_HOOK_PROTOCOL(struct list_head *desc_list,
+				   struct rmnet_port *port),
+	RMNET_MODULE_HOOK_ARGS(desc_list, port),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(offload_chain_end,
+	RMNET_MODULE_HOOK_NUM(OFFLOAD_CHAIN_END),
+	RMNET_MODULE_HOOK_PROTOCOL(void),
+	RMNET_MODULE_HOOK_ARGS(),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(shs_skb_entry,
+	RMNET_MODULE_HOOK_NUM(SHS_SKB_ENTRY),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
+				   struct rmnet_shs_clnt_s *cfg),
+	RMNET_MODULE_HOOK_ARGS(skb, cfg),
+	RMNET_MODULE_HOOK_RETURN_TYPE(int)
+);
+
+RMNET_MODULE_HOOK(shs_skb_ll_entry,
+	RMNET_MODULE_HOOK_NUM(SHS_SKB_LL_ENTRY),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
+				   struct rmnet_shs_clnt_s *cfg),
+	RMNET_MODULE_HOOK_ARGS(skb, cfg),
+	RMNET_MODULE_HOOK_RETURN_TYPE(int)
+);
+
+RMNET_MODULE_HOOK(shs_switch,
+	RMNET_MODULE_HOOK_NUM(SHS_SWITCH),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb,
+				   struct rmnet_shs_clnt_s *cfg),
+	RMNET_MODULE_HOOK_ARGS(skb, cfg),
+	RMNET_MODULE_HOOK_RETURN_TYPE(int)
+);
+
+RMNET_MODULE_HOOK(perf_tether_ingress,
+	RMNET_MODULE_HOOK_NUM(PERF_TETHER_INGRESS),
+	RMNET_MODULE_HOOK_PROTOCOL(struct tcphdr *tp,
+				   struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(tp, skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(perf_tether_egress,
+	RMNET_MODULE_HOOK_NUM(PERF_TETHER_EGRESS),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(perf_tether_cmd,
+	RMNET_MODULE_HOOK_NUM(PERF_TETHER_CMD),
+	RMNET_MODULE_HOOK_PROTOCOL(u8 message, u64 val),
+	RMNET_MODULE_HOOK_ARGS(message, val),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(perf_ingress,
+	RMNET_MODULE_HOOK_NUM(PERF_INGRESS),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(int)
+);
+
+RMNET_MODULE_HOOK(perf_egress,
+	RMNET_MODULE_HOOK_NUM(PERF_EGRESS),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(perf_set_thresh,
+	RMNET_MODULE_HOOK_NUM(PERF_SET_THRESH),
+	RMNET_MODULE_HOOK_PROTOCOL(u32 hash, u32 thresh),
+	RMNET_MODULE_HOOK_ARGS(hash, thresh),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(aps_pre_queue,
+	RMNET_MODULE_HOOK_NUM(APS_PRE_QUEUE),
+	RMNET_MODULE_HOOK_PROTOCOL(struct net_device *dev, struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(dev, skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(aps_post_queue,
+	RMNET_MODULE_HOOK_NUM(APS_POST_QUEUE),
+	RMNET_MODULE_HOOK_PROTOCOL(struct net_device *dev, struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(dev, skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(int)
+);
+
+RMNET_MODULE_HOOK(wlan_flow_match,
+	RMNET_MODULE_HOOK_NUM(WLAN_FLOW_MATCH),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(aps_data_inactive,
+	RMNET_MODULE_HOOK_NUM(APS_DATA_INACTIVE),
+	RMNET_MODULE_HOOK_PROTOCOL(void),
+	RMNET_MODULE_HOOK_ARGS(),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(aps_data_active,
+	RMNET_MODULE_HOOK_NUM(APS_DATA_ACTIVE),
+	RMNET_MODULE_HOOK_PROTOCOL(struct rmnet_frag_descriptor *frag_desc,
+				   struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(frag_desc, skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(aps_data_report,
+	RMNET_MODULE_HOOK_NUM(APS_DATA_REPORT),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(perf_ingress_rx_handler,
+	RMNET_MODULE_HOOK_NUM(PERF_INGRESS_RX_HANDLER),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff *skb),
+	RMNET_MODULE_HOOK_ARGS(skb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(void)
+);
+
+RMNET_MODULE_HOOK(wlan_ingress_rx_handler,
+	RMNET_MODULE_HOOK_NUM(WLAN_INGRESS_RX_HANDLER),
+	RMNET_MODULE_HOOK_PROTOCOL(struct sk_buff **pskb),
+	RMNET_MODULE_HOOK_ARGS(pskb),
+	RMNET_MODULE_HOOK_RETURN_TYPE(rx_handler_result_t)
+);
+
+#endif

+ 173 - 0
qcom/opensource/datarmnet/core/rmnet_ll.c

@@ -0,0 +1,173 @@
+/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RmNet Low Latency channel handlers
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include "rmnet_ll.h"
+#include "rmnet_ll_core.h"
+
+#define RMNET_LL_MAX_RECYCLE_ITER 16
+
+static struct rmnet_ll_stats rmnet_ll_stats;
+/* For TX sync with DMA operations */
+DEFINE_SPINLOCK(rmnet_ll_tx_lock);
+
+/* Client operations for respective underlying HW */
+extern struct rmnet_ll_client_ops rmnet_ll_client;
+
+static void rmnet_ll_buffers_submit(struct rmnet_ll_endpoint *ll_ep,
+				    struct list_head *buf_list)
+{
+	struct rmnet_ll_buffer *ll_buf;
+
+	list_for_each_entry(ll_buf, buf_list, list) {
+		if (ll_buf->submitted)
+			continue;
+
+		if (!rmnet_ll_client.buffer_queue ||
+		    rmnet_ll_client.buffer_queue(ll_ep, ll_buf)) {
+			rmnet_ll_stats.rx_queue_err++;
+			/* Don't leak the page if we're not storing it */
+			if (ll_buf->temp_alloc)
+				put_page(ll_buf->page);
+		} else {
+			ll_buf->submitted = true;
+			rmnet_ll_stats.rx_queue++;
+		}
+	}
+}
+
+static struct rmnet_ll_buffer *
+rmnet_ll_buffer_alloc(struct rmnet_ll_endpoint *ll_ep, gfp_t gfp)
+{
+	struct rmnet_ll_buffer *ll_buf;
+	struct page *page;
+	void *addr;
+
+	page = __dev_alloc_pages(gfp, ll_ep->page_order);
+	if (!page)
+		return NULL;
+
+	/* Store the buffer information at the end */
+	addr = page_address(page);
+	ll_buf = addr + ll_ep->buf_len;
+	ll_buf->page = page;
+	ll_buf->submitted = false;
+	INIT_LIST_HEAD(&ll_buf->list);
+	return ll_buf;
+}
+
+int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
+{
+	spin_lock_init(&ll_ep->buf_pool.pool_lock);
+	INIT_LIST_HEAD(&ll_ep->buf_pool.buf_list);
+	ll_ep->buf_pool.last = ll_ep->buf_pool.buf_list.next;
+	ll_ep->buf_pool.pool_size = 0;
+	return 0;
+}
+
+void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
+{
+	struct rmnet_ll_buffer *ll_buf, *tmp;
+	list_for_each_entry_safe(ll_buf, tmp, &ll_ep->buf_pool.buf_list, list) {
+		list_del(&ll_buf->list);
+		put_page(ll_buf->page);
+	}
+
+	ll_ep->buf_pool.last = NULL;
+}
+
+void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
+{
+	struct rmnet_ll_buffer *ll_buf, *tmp;
+	LIST_HEAD(buf_list);
+	int num_tre, count = 0, iter = 0;
+
+	if (!rmnet_ll_client.query_free_descriptors)
+		goto out;
+
+	num_tre = rmnet_ll_client.query_free_descriptors(ll_ep);
+	if (!num_tre)
+		goto out;
+
+	list_for_each_entry_safe(ll_buf, tmp, ll_ep->buf_pool.last, list) {
+		if (++iter > RMNET_LL_MAX_RECYCLE_ITER || count == num_tre)
+			break;
+
+		if (ll_buf->submitted)
+			continue;
+
+		count++;
+		list_move_tail(&ll_buf->list, &buf_list);
+	}
+
+	/* Mark where we left off */
+	ll_ep->buf_pool.last = &ll_buf->list;
+	/* Submit any pool buffers to the HW if we found some */
+	if (count) {
+		rmnet_ll_buffers_submit(ll_ep, &buf_list);
+		/* requeue immediately BEFORE the last checked element */
+		list_splice_tail_init(&buf_list, ll_ep->buf_pool.last);
+	}
+
+	/* Do any temporary allocations needed to fill the rest */
+	for (; count < num_tre; count++) {
+		ll_buf = rmnet_ll_buffer_alloc(ll_ep, GFP_ATOMIC);
+		if (!ll_buf)
+			break;
+
+		list_add_tail(&ll_buf->list, &buf_list);
+		ll_buf->temp_alloc = true;
+		rmnet_ll_stats.rx_tmp_allocs++;
+	}
+
+	if (!list_empty(&buf_list))
+		rmnet_ll_buffers_submit(ll_ep, &buf_list);
+
+out:
+	return;
+}
+
+int rmnet_ll_send_skb(struct sk_buff *skb)
+{
+	int rc;
+
+	spin_lock_bh(&rmnet_ll_tx_lock);
+	rc = rmnet_ll_client.tx(skb);
+	spin_unlock_bh(&rmnet_ll_tx_lock);
+	if (rc)
+		rmnet_ll_stats.tx_queue_err++;
+	else
+		rmnet_ll_stats.tx_queue++;
+
+	return rc;
+}
+
+struct rmnet_ll_stats *rmnet_ll_get_stats(void)
+{
+	return &rmnet_ll_stats;
+}
+
+int rmnet_ll_init(void)
+{
+	return rmnet_ll_client.init();
+}
+
+void rmnet_ll_exit(void)
+{
+	rmnet_ll_client.exit();
+}

+ 45 - 0
qcom/opensource/datarmnet/core/rmnet_ll.h

@@ -0,0 +1,45 @@
+/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RmNet Low Latency channel handlers
+ */
+
+#ifndef __RMNET_LL_H__
+#define __RMNET_LL_H__
+
+#include <linux/skbuff.h>
+
+struct rmnet_ll_stats {
+		u64 tx_queue;
+		u64 tx_queue_err;
+		u64 tx_complete;
+		u64 tx_complete_err;
+		u64 rx_queue;
+		u64 rx_queue_err;
+		u64 rx_status_err;
+		u64 rx_null;
+		u64 rx_oom;
+		u64 rx_pkts;
+		u64 rx_tmp_allocs;
+		u64 tx_disabled;
+		u64 tx_enabled;
+		u64 tx_fc_queued;
+		u64 tx_fc_sent;
+		u64 tx_fc_err;
+};
+
+int rmnet_ll_send_skb(struct sk_buff *skb);
+struct rmnet_ll_stats *rmnet_ll_get_stats(void);
+int rmnet_ll_init(void);
+void rmnet_ll_exit(void);
+
+#endif

+ 69 - 0
qcom/opensource/datarmnet/core/rmnet_ll_core.h

@@ -0,0 +1,69 @@
+/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RmNet Low Latency channel handlers
+ */
+
+#ifndef __RMNET_LL_CORE_H__
+#define __RMNET_LL_CORE_H__
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+
+#define RMNET_LL_DEFAULT_MRU 0x8000
+
+struct rmnet_ll_buffer {
+	struct list_head list;
+	struct page *page;
+	bool temp_alloc;
+	bool submitted;
+};
+
+struct rmnet_ll_buffer_pool {
+	struct list_head buf_list;
+	/* Protect access to the recycle buffer pool */
+	spinlock_t pool_lock;
+	struct list_head *last;
+	u32 pool_size;
+};
+
+struct rmnet_ll_endpoint {
+	struct rmnet_ll_buffer_pool buf_pool;
+	struct net_device *phys_dev;
+	void *priv;
+	u32 dev_mru;
+	u32 page_order;
+	u32 buf_len;
+};
+
+/* Core operations to hide differences between physical transports.
+ *
+ * buffer_queue: Queue an allocated buffer to the HW for RX. Optional.
+ * query_free_descriptors: Return number of free RX descriptors. Optional.
+ * tx: Send an SKB over the channel in the TX direction.
+ * init: Initialization callback on module load
+ * exit: Exit callback on module unload
+ */
+struct rmnet_ll_client_ops {
+	int (*buffer_queue)(struct rmnet_ll_endpoint *ll_ep,
+			    struct rmnet_ll_buffer *ll_buf);
+	int (*query_free_descriptors)(struct rmnet_ll_endpoint *ll_ep);
+	int (*tx)(struct sk_buff *skb);
+	int (*init)(void);
+	int (*exit)(void);
+};
+
+int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep);
+void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep);
+void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep);
+
+#endif

+ 222 - 0
qcom/opensource/datarmnet/core/rmnet_ll_ipa.c

@@ -0,0 +1,222 @@
+/* Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RmNet IPA Low Latency channel handlers
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#if !defined(__arch_um__)
+	#include <linux/ipa.h>
+#endif /* !defined(__arch_um__) */
+#include <linux/if_ether.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include "rmnet_ll.h"
+#include "rmnet_ll_core.h"
+
+#define IPA_RMNET_LL_RECEIVE 1
+#define IPA_RMNET_LL_FLOW_EVT 2
+
+#define MAX_Q_LEN 1000
+
+#if !defined(__arch_um__)
+static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
+static struct sk_buff_head tx_pending_list;
+extern spinlock_t rmnet_ll_tx_lock;
+
+static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t);
+DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending);
+static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t)
+{
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	struct sk_buff *skb;
+	int rc;
+
+	spin_lock_bh(&rmnet_ll_tx_lock);
+
+	while ((skb = __skb_dequeue(&tx_pending_list))) {
+		rc = ipa_rmnet_ll_xmit(skb);
+		if (rc == -EAGAIN) {
+			stats->tx_disabled++;
+			__skb_queue_head(&tx_pending_list, skb);
+			break;
+		}
+		if (rc >= 0)
+			stats->tx_fc_sent++;
+		else
+			stats->tx_fc_err++;
+	}
+
+	spin_unlock_bh(&rmnet_ll_tx_lock);
+}
+
+static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
+{
+	struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ipa_ep;
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	struct sk_buff *skb, *tmp;
+
+	if (arg == (void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT)) {
+		stats->tx_enabled++;
+		tasklet_schedule(&tx_pending_task);
+		return;
+	}
+
+	if (unlikely(arg != (void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE))) {
+		pr_err("%s: invalid arg %u\n", __func__, (uintptr_t)arg);
+		return;
+	}
+
+	skb = rx_data;
+	/* Odds are IPA does this, but just to be safe */
+	skb->dev = ll_ep->phys_dev;
+	skb->protocol = htons(ETH_P_MAP);
+	skb_record_rx_queue(skb, 1);
+
+	tmp = skb;
+	while (tmp) {
+		/* Mark the SKB as low latency */
+		tmp->priority = 0xda1a;
+		if (tmp == skb)
+			tmp = skb_shinfo(tmp)->frag_list;
+		else
+			tmp = tmp->next;
+	}
+
+	stats->rx_pkts++;
+	netif_rx(skb);
+}
+
+static void rmnet_ll_ipa_probe(void *arg)
+{
+	struct rmnet_ll_endpoint *ll_ep;
+
+	ll_ep = kzalloc(sizeof(*ll_ep), GFP_KERNEL);
+	if (!ll_ep) {
+		pr_err("%s(): allocating LL CTX failed\n", __func__);
+		return;
+	}
+
+	ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_ipa0");
+	if (!ll_ep->phys_dev) {
+		pr_err("%s(): Invalid physical device\n", __func__);
+		kfree(ll_ep);
+		return;
+	}
+
+	*((struct rmnet_ll_endpoint **)arg) = ll_ep;
+}
+
+static void rmnet_ll_ipa_remove(void *arg)
+{
+	struct rmnet_ll_endpoint **ll_ep = arg;
+	struct sk_buff *skb;
+
+	dev_put((*ll_ep)->phys_dev);
+	kfree(*ll_ep);
+	*ll_ep = NULL;
+
+	spin_lock_bh(&rmnet_ll_tx_lock);
+	while ((skb = __skb_dequeue(&tx_pending_list)))
+		kfree_skb(skb);
+	spin_unlock_bh(&rmnet_ll_tx_lock);
+}
+
+static void rmnet_ll_ipa_ready(void * __unused)
+{
+	int rc;
+
+	rc = ipa_register_rmnet_ll_cb(rmnet_ll_ipa_probe,
+				      (void *)&rmnet_ll_ipa_ep,
+				      rmnet_ll_ipa_remove,
+				      (void *)&rmnet_ll_ipa_ep,
+				      rmnet_ll_ipa_rx,
+				      (void *)&rmnet_ll_ipa_ep);
+	if (rc)
+		pr_err("%s(): Registering IPA LL callback failed with rc %d\n",
+		       __func__, rc);
+}
+
+static int rmnet_ll_ipa_tx(struct sk_buff *skb)
+{
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	int rc;
+
+	if (!rmnet_ll_ipa_ep)
+		return -ENODEV;
+
+	if (!skb_queue_empty(&tx_pending_list))
+		goto queue_skb;
+
+	rc = ipa_rmnet_ll_xmit(skb);
+
+	/* rc >=0: success, return number of free descriptors left */
+	if (rc >= 0)
+		return 0;
+
+	/* IPA handles freeing the SKB on failure */
+	if (rc != -EAGAIN)
+		return rc;
+
+	stats->tx_disabled++;
+
+queue_skb:
+	/* Flow controlled */
+	if (skb_queue_len(&tx_pending_list) >= MAX_Q_LEN) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	__skb_queue_tail(&tx_pending_list, skb);
+	stats->tx_fc_queued++;
+
+	return 0;
+}
+
+static int rmnet_ll_ipa_init(void)
+{
+	int rc;
+
+	__skb_queue_head_init(&tx_pending_list);
+	rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
+	if (rc == -EEXIST) {
+		/* IPA is already up. Call it ourselves, since they don't */
+		rmnet_ll_ipa_ready(NULL);
+		rc = 0;
+	}
+
+	return rc;
+}
+
+static int rmnet_ll_ipa_exit(void)
+{
+	if (rmnet_ll_ipa_ep) {
+		ipa_unregister_rmnet_ll_cb();
+		/* Teardown? */
+		rmnet_ll_ipa_ep = NULL;
+	}
+
+	return 0;
+}
+#else
+static int rmnet_ll_ipa_tx(struct sk_buff *skb){return 0;};
+static int rmnet_ll_ipa_init(void){return 0;}
+static int rmnet_ll_ipa_exit(void){return 0;};
+#endif /* !defined(__arch_um__) */
+
+/* Export operations struct to the main framework */
+struct rmnet_ll_client_ops rmnet_ll_client = {
+	.tx = rmnet_ll_ipa_tx,
+	.init = rmnet_ll_ipa_init,
+	.exit = rmnet_ll_ipa_exit,
+};

+ 239 - 0
qcom/opensource/datarmnet/core/rmnet_ll_mhi.c

@@ -0,0 +1,239 @@
+/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RmNet MHI Low Latency channel handlers
+ */
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/mhi.h>
+#include <linux/if_ether.h>
+#include <linux/mm.h>
+#include "rmnet_ll.h"
+#include "rmnet_ll_core.h"
+
+static struct rmnet_ll_endpoint *rmnet_ll_mhi_ep;
+
+static void rmnet_ll_mhi_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
+{
+	struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	struct rmnet_ll_buffer *ll_buf;
+	struct sk_buff *skb;
+
+	/* Get the buffer struct back for our page information */
+	ll_buf = res->buf_addr + ll_ep->buf_len;
+	ll_buf->submitted = false;
+	if (res->transaction_status) {
+		stats->rx_status_err++;
+		goto err;
+	} else if (!res->bytes_xferd) {
+		stats->rx_null++;
+		goto err;
+	}
+
+	/* Store this away so we don't have to look it up every time */
+	if (!ll_ep->phys_dev) {
+		ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_mhi0");
+		if (!ll_ep->phys_dev)
+			goto err;
+	}
+
+	skb = alloc_skb(0, GFP_ATOMIC);
+	if (!skb) {
+		stats->rx_oom++;
+		goto err;
+	}
+
+	/* Build the SKB and pass it off to the stack */
+	skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
+			ll_ep->buf_len);
+	if (!ll_buf->temp_alloc)
+		get_page(ll_buf->page);
+
+	skb->dev = ll_ep->phys_dev;
+	skb->protocol = htons(ETH_P_MAP);
+	/* Mark this as arriving on the LL channel. Allows rmnet to skip
+	 * module handling as needed.
+	 */
+	skb->priority = 0xda1a;
+	stats->rx_pkts++;
+	netif_rx(skb);
+	rmnet_ll_buffers_recycle(ll_ep);
+	return;
+
+err:
+	/* Go, and never darken my towels again! */
+	if (ll_buf->temp_alloc)
+		put_page(ll_buf->page);
+}
+
+static void rmnet_ll_mhi_tx_complete(struct mhi_device *mhi_dev,
+				     struct mhi_result *res)
+{
+	struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
+	struct sk_buff *skb = res->buf_addr;
+
+	/* Check the result and free the SKB */
+	if (res->transaction_status)
+		stats->tx_complete_err++;
+	else
+		stats->tx_complete++;
+
+	dev_consume_skb_any(skb);
+}
+
+static int rmnet_ll_mhi_probe(struct mhi_device *mhi_dev,
+			      const struct mhi_device_id *id)
+{
+	struct rmnet_ll_endpoint *ll_ep;
+	int rc;
+
+	/* Allocate space for our state from the managed pool tied to the life
+	 * of the mhi device.
+	 */
+	ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
+	if (!ll_ep)
+		return -ENOMEM;
+
+	/* Hold on to the mhi_dev so we can send data to it later */
+	ll_ep->priv = (void *)mhi_dev;
+
+	/* Grab the MRU of the device so we know the size of the pages we need
+	 * to allocate for the pool.
+	 */
+	rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
+				  &ll_ep->dev_mru);
+	if (rc || !ll_ep->dev_mru)
+		/* Use our default mru */
+		ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
+
+	ll_ep->page_order = get_order(ll_ep->dev_mru);
+	/* We store some stuff at the end of the page, so don't let the HW
+	 * use that part of it.
+	 */
+	ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
+
+	/* Tell MHI to initialize the UL/DL channels for transfer */
+	rc = mhi_prepare_for_transfer(mhi_dev);
+	if (rc) {
+		pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
+		       __func__, rc);
+		return rc;
+	}
+
+	rc = rmnet_ll_buffer_pool_alloc(ll_ep);
+	if (rc) {
+		pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
+		       rc);
+		mhi_unprepare_from_transfer(mhi_dev);
+		return rc;
+	}
+
+	rmnet_ll_buffers_recycle(ll_ep);
+
+	/* Not a fan of storing this pointer in two locations, but I've yet to
+	 * come up with any other good way of accessing it on the TX path from
+	 * rmnet otherwise, since we won't have any references to the mhi_dev.
+	 */
+	dev_set_drvdata(&mhi_dev->dev, ll_ep);
+	rmnet_ll_mhi_ep = ll_ep;
+	return 0;
+}
+
+static void rmnet_ll_mhi_remove(struct mhi_device *mhi_dev)
+{
+	struct rmnet_ll_endpoint *ll_ep;
+
+	ll_ep = dev_get_drvdata(&mhi_dev->dev);
+	/* Remove our private data form the device. No need to free it though.
+	 * It will be freed once the mhi_dev is released since it was alloced
+	 * from a managed pool.
+	 */
+	dev_set_drvdata(&mhi_dev->dev, NULL);
+	rmnet_ll_mhi_ep = NULL;
+	rmnet_ll_buffer_pool_free(ll_ep);
+}
+
+static const struct mhi_device_id rmnet_ll_mhi_channel_table[] = {
+	{
+		.chan = "RMNET_DATA_LL",
+	},
+	{},
+};
+
+static struct mhi_driver rmnet_ll_driver = {
+	.probe = rmnet_ll_mhi_probe,
+	.remove = rmnet_ll_mhi_remove,
+	.dl_xfer_cb = rmnet_ll_mhi_rx,
+	.ul_xfer_cb = rmnet_ll_mhi_tx_complete,
+	.id_table = rmnet_ll_mhi_channel_table,
+	.driver = {
+		.name = "rmnet_ll",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int rmnet_ll_mhi_queue(struct rmnet_ll_endpoint *ll_ep,
+			      struct rmnet_ll_buffer *ll_buf)
+{
+	struct mhi_device *mhi_dev = ll_ep->priv;
+
+	return mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE,
+			     page_address(ll_buf->page),
+			     ll_ep->buf_len, MHI_EOT);
+}
+
+static int rmnet_ll_mhi_query_free_descriptors(struct rmnet_ll_endpoint *ll_ep)
+{
+	struct mhi_device *mhi_dev = ll_ep->priv;
+
+	return mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+}
+
+static int rmnet_ll_mhi_tx(struct sk_buff *skb)
+{
+	struct mhi_device *mhi_dev;
+	int rc;
+
+	if (!rmnet_ll_mhi_ep)
+		return -ENODEV;
+
+	mhi_dev = rmnet_ll_mhi_ep->priv;
+	rc = mhi_queue_skb(mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+	if (rc)
+		kfree_skb(skb);
+
+	return rc;
+}
+
+static int rmnet_ll_mhi_init(void)
+{
+	return mhi_driver_register(&rmnet_ll_driver);
+}
+
+static int rmnet_ll_mhi_exit(void)
+{
+	mhi_driver_unregister(&rmnet_ll_driver);
+	return 0;
+}
+
+/* Export operations struct to the main framework */
+struct rmnet_ll_client_ops rmnet_ll_client = {
+	.buffer_queue = rmnet_ll_mhi_queue,
+	.query_free_descriptors = rmnet_ll_mhi_query_free_descriptors,
+	.tx = rmnet_ll_mhi_tx,
+	.init = rmnet_ll_mhi_init,
+	.exit = rmnet_ll_mhi_exit,
+};

+ 542 - 0
qcom/opensource/datarmnet/core/rmnet_ll_qmap.c

@@ -0,0 +1,542 @@
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/netlink.h>
+#include <uapi/linux/rtnetlink.h>
+#include <linux/net.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+#include "dfc.h"
+#include "rmnet_qmi.h"
+#include "rmnet_qmap.h"
+#include "qmi_rmnet_i.h"
+
+#define QMAP_LL_VER		1
+#define QMAP_LL_MAX_BEARER	15
+
+#define QMAP_SWITCH_TO_LL	1
+#define QMAP_SWITCH_TO_DEFAULT	2
+#define QMAP_SWITCH_QUERY	3
+
+/* Switch status from modem */
+#define SWITCH_STATUS_ERROR	0
+#define SWITCH_STATUS_SUCCESS	1
+#define SWITCH_STATUS_DEFAULT	2
+#define SWITCH_STATUS_LL	3
+#define SWITCH_STATUS_FAIL_TEMP	4
+#define SWITCH_STATUS_FAIL_PERM	5
+
+/* Internal switch status */
+#define SWITCH_STATUS_NONE	0xFF
+#define SWITCH_STATUS_TIMEOUT	0xFE
+#define SWITCH_STATUS_NO_EFFECT	0xFD
+
+#define LL_MASK_NL_ACK 1
+#define LL_MASK_AUTO_RETRY 2
+
+#define LL_TIMEOUT (5 * HZ)
+#define LL_RETRY_TIME (10 * HZ)
+#define LL_MAX_RETRY (3)
+
+struct qmap_ll_bearer {
+	u8			bearer_id;
+	u8			status;
+	u8			reserved[2];
+}  __aligned(1);
+
+struct qmap_ll_switch {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved;
+	u8			request_type;
+	u8			num_bearers;
+	struct qmap_ll_bearer	bearer[0];
+} __aligned(1);
+
+struct qmap_ll_switch_resp {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved[2];
+	u8			num_bearers;
+	struct qmap_ll_bearer	bearer[0];
+} __aligned(1);
+
+struct qmap_ll_switch_status {
+	struct qmap_cmd_hdr	hdr;
+	u8			cmd_ver;
+	u8			reserved[2];
+	u8			num_bearers;
+	struct qmap_ll_bearer	bearer[0];
+} __aligned(1);
+
+/*
+ * LL workqueue
+ */
+static DEFINE_SPINLOCK(ll_wq_lock);
+static struct workqueue_struct *ll_wq;
+
+struct ll_ack_work {
+	struct work_struct work;
+	u32 nl_pid;
+	u32 nl_seq;
+	u8 bearer_id;
+	u8 status_code;
+	u8 current_ch;
+};
+
+static void ll_ack_fn(struct work_struct *work)
+{
+	struct ll_ack_work *ack_work;
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+	struct nlmsgerr *errmsg;
+	unsigned int flags = NLM_F_CAPPED;
+
+	ack_work = container_of(work, struct ll_ack_work, work);
+
+	skb = nlmsg_new(sizeof(*errmsg), GFP_KERNEL);
+	if (!skb)
+		goto out;
+
+	nlh = __nlmsg_put(skb, ack_work->nl_pid,
+			  ack_work->nl_seq, NLMSG_ERROR,
+			  sizeof(*errmsg), flags);
+	errmsg = nlmsg_data(nlh);
+	errmsg->error = 0;
+	errmsg->msg.nlmsg_len = sizeof(struct nlmsghdr);
+	errmsg->msg.nlmsg_type = ack_work->bearer_id;
+	errmsg->msg.nlmsg_flags = ack_work->status_code;
+	errmsg->msg.nlmsg_seq = ack_work->current_ch;
+	errmsg->msg.nlmsg_pid = ack_work->nl_pid;
+	nlmsg_end(skb, nlh);
+
+	rtnl_unicast(skb, &init_net, ack_work->nl_pid);
+out:
+	kfree(ack_work);
+}
+
+static void ll_send_nl_ack(struct rmnet_bearer_map *bearer)
+{
+	struct ll_ack_work *ack_work;
+
+	if (!(bearer->ch_switch.flags & LL_MASK_NL_ACK))
+		return;
+
+	ack_work = kzalloc(sizeof(*ack_work), GFP_ATOMIC);
+	if (!ack_work)
+		return;
+
+	ack_work->nl_pid = bearer->ch_switch.nl_pid;
+	ack_work->nl_seq = bearer->ch_switch.nl_seq;
+	ack_work->bearer_id = bearer->bearer_id;
+	ack_work->status_code = bearer->ch_switch.status_code;
+	ack_work->current_ch = bearer->ch_switch.current_ch;
+	INIT_WORK(&ack_work->work, ll_ack_fn);
+
+	spin_lock_bh(&ll_wq_lock);
+	if (ll_wq)
+		queue_work(ll_wq, &ack_work->work);
+	else
+		kfree(ack_work);
+	spin_unlock_bh(&ll_wq_lock);
+}
+
+void rmnet_ll_wq_init(void)
+{
+	WARN_ON(ll_wq);
+	ll_wq = alloc_ordered_workqueue("rmnet_ll_wq", 0);
+}
+
+void rmnet_ll_wq_exit(void)
+{
+	struct workqueue_struct *tmp = NULL;
+
+	spin_lock_bh(&ll_wq_lock);
+	if (ll_wq) {
+		tmp = ll_wq;
+		ll_wq = NULL;
+	}
+	spin_unlock_bh(&ll_wq_lock);
+
+	if (tmp)
+		destroy_workqueue(tmp);
+}
+
+/*
+ * LLC switch
+ */
+
+static void ll_qmap_maybe_set_ch(struct qos_info *qos,
+				 struct rmnet_bearer_map *bearer, u8 status)
+{
+	u8 ch;
+
+	if (status == SWITCH_STATUS_DEFAULT)
+		ch = RMNET_CH_DEFAULT;
+	else if (status == SWITCH_STATUS_LL)
+		ch = RMNET_CH_LL;
+	else
+		return;
+
+	bearer->ch_switch.current_ch = ch;
+	if (bearer->mq_idx < MAX_MQ_NUM)
+		qos->mq[bearer->mq_idx].is_ll_ch = ch;
+}
+
+static void ll_switch_complete(struct rmnet_bearer_map *bearer, u8 status)
+{
+	bearer->ch_switch.status_code = status;
+
+	if (status == SWITCH_STATUS_FAIL_TEMP &&
+	    bearer->ch_switch.retry_left) {
+		/* Temp failure retry */
+		bearer->ch_switch.state = CH_SWITCH_FAILED_RETRY;
+		mod_timer(&bearer->ch_switch.guard_timer,
+			  jiffies + LL_RETRY_TIME);
+		bearer->ch_switch.retry_left--;
+	} else {
+		/* Success or permanent failure */
+		bearer->ch_switch.timer_quit = true;
+		del_timer(&bearer->ch_switch.guard_timer);
+		bearer->ch_switch.state = CH_SWITCH_NONE;
+		bearer->ch_switch.retry_left = 0;
+		ll_send_nl_ack(bearer);
+		bearer->ch_switch.flags = 0;
+	}
+}
+
+static int ll_qmap_handle_switch_resp(struct sk_buff *skb)
+{
+	struct qmap_ll_switch_resp *cmd;
+	struct rmnet_bearer_map *bearer;
+	struct qos_info *qos;
+	struct net_device *dev;
+	int i;
+
+	if (skb->len < sizeof(struct qmap_ll_switch_resp))
+		return QMAP_CMD_DONE;
+
+	cmd = (struct qmap_ll_switch_resp *)skb->data;
+	if (!cmd->num_bearers)
+		return QMAP_CMD_DONE;
+
+	if (skb->len < sizeof(*cmd) +
+		       cmd->num_bearers * sizeof(struct qmap_ll_bearer))
+		return QMAP_CMD_DONE;
+
+	dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
+	if (!dev)
+		return QMAP_CMD_DONE;
+
+	qos = rmnet_get_qos_pt(dev);
+	if (!qos)
+		return QMAP_CMD_DONE;
+
+	trace_dfc_ll_switch("ACK", 0, cmd->num_bearers, cmd->bearer);
+
+	spin_lock_bh(&qos->qos_lock);
+
+	for (i = 0; i < cmd->num_bearers; i++) {
+		bearer = qmi_rmnet_get_bearer_map(qos,
+						  cmd->bearer[i].bearer_id);
+		if (!bearer)
+			continue;
+
+		ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
+
+		if (bearer->ch_switch.state == CH_SWITCH_STARTED &&
+		    bearer->ch_switch.switch_txid == cmd->hdr.tx_id) {
+			/* This is an ACK to the switch request */
+			if (cmd->bearer[i].status == SWITCH_STATUS_SUCCESS)
+				bearer->ch_switch.state = CH_SWITCH_ACKED;
+			else
+				ll_switch_complete(bearer,
+						   cmd->bearer[i].status);
+		}
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return QMAP_CMD_DONE;
+}
+
+static int ll_qmap_handle_switch_status(struct sk_buff *skb)
+{
+	struct qmap_ll_switch_status *cmd;
+	struct rmnet_bearer_map *bearer;
+	struct qos_info *qos;
+	struct net_device *dev;
+	int i;
+
+	if (skb->len < sizeof(struct qmap_ll_switch_status))
+		return QMAP_CMD_INVALID;
+
+	cmd = (struct qmap_ll_switch_status *)skb->data;
+	if (!cmd->num_bearers)
+		return QMAP_CMD_ACK;
+
+	if (skb->len < sizeof(*cmd) +
+		       cmd->num_bearers * sizeof(struct qmap_ll_bearer))
+		return QMAP_CMD_INVALID;
+
+	dev = rmnet_qmap_get_dev(cmd->hdr.mux_id);
+	if (!dev)
+		return QMAP_CMD_ACK;
+
+	qos = rmnet_get_qos_pt(dev);
+	if (!qos)
+		return QMAP_CMD_ACK;
+
+	trace_dfc_ll_switch("STS", 0, cmd->num_bearers, cmd->bearer);
+
+	spin_lock_bh(&qos->qos_lock);
+
+	for (i = 0; i < cmd->num_bearers; i++) {
+		bearer = qmi_rmnet_get_bearer_map(qos,
+						  cmd->bearer[i].bearer_id);
+		if (!bearer)
+			continue;
+
+		ll_qmap_maybe_set_ch(qos, bearer, cmd->bearer[i].status);
+
+		if (bearer->ch_switch.state == CH_SWITCH_ACKED)
+			ll_switch_complete(bearer, cmd->bearer[i].status);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+
+	return QMAP_CMD_ACK;
+}
+
+int ll_qmap_cmd_handler(struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+	int rc = QMAP_CMD_DONE;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+
+	if (cmd->cmd_name == QMAP_LL_SWITCH) {
+		if (cmd->cmd_type != QMAP_CMD_ACK)
+			return rc;
+	} else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
+		return rc;
+	}
+
+	switch (cmd->cmd_name) {
+	case QMAP_LL_SWITCH:
+		rc = ll_qmap_handle_switch_resp(skb);
+		break;
+
+	case QMAP_LL_SWITCH_STATUS:
+		rc = ll_qmap_handle_switch_status(skb);
+		break;
+
+	default:
+		if (cmd->cmd_type == QMAP_CMD_REQUEST)
+			rc = QMAP_CMD_UNSUPPORTED;
+	}
+
+	return rc;
+}
+
+static int ll_qmap_send_switch(u8 mux_id, u8 channel, u8 num_bearers,
+			       u8 *bearer_list, __be32 *txid)
+{
+	struct sk_buff *skb;
+	struct qmap_ll_switch *ll_switch;
+	unsigned int len;
+	int i;
+
+	if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER || !bearer_list)
+		return -EINVAL;
+
+	len  = sizeof(struct qmap_ll_switch) +
+			num_bearers * sizeof(struct qmap_ll_bearer);
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	skb->protocol = htons(ETH_P_MAP);
+	ll_switch = skb_put(skb, len);
+	memset(ll_switch, 0, len);
+
+	ll_switch->hdr.cd_bit = 1;
+	ll_switch->hdr.mux_id = mux_id;
+	ll_switch->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+	ll_switch->hdr.cmd_name = QMAP_LL_SWITCH;
+	ll_switch->hdr.cmd_type = QMAP_CMD_REQUEST;
+	ll_switch->hdr.tx_id = htonl(rmnet_qmap_next_txid());
+
+	ll_switch->cmd_ver = QMAP_LL_VER;
+	if (channel == RMNET_CH_CTL)
+		ll_switch->request_type = QMAP_SWITCH_QUERY;
+	else if (channel == RMNET_CH_LL)
+		ll_switch->request_type = QMAP_SWITCH_TO_LL;
+	else
+		ll_switch->request_type = QMAP_SWITCH_TO_DEFAULT;
+	ll_switch->num_bearers = num_bearers;
+	for (i = 0; i < num_bearers; i++)
+		ll_switch->bearer[i].bearer_id = bearer_list[i];
+
+	if (txid)
+		*txid = ll_switch->hdr.tx_id;
+
+	trace_dfc_ll_switch("REQ", ll_switch->request_type,
+			    ll_switch->num_bearers, ll_switch->bearer);
+
+	return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
+}
+
+/*
+ * Start channel switch. The switch request is sent only if all bearers
+ * are eligible to switch. Return 0 if switch request is sent.
+ */
+int rmnet_ll_switch(struct net_device *dev, struct tcmsg *tcm, int attrlen)
+{
+	u8 switch_to_ch;
+	u8 num_bearers;
+	u8 *bearer_list;
+	u32 flags;
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	__be32 txid;
+	int i;
+	int j;
+	int rc = -EINVAL;
+
+	if (!dev || !tcm)
+		return -EINVAL;
+	/*
+	 * tcm__pad1: switch type (ch #, 0xFF query)
+	 * tcm__pad2: num bearers
+	 * tcm_info: flags
+	 * tcm_ifindex: netlink fd
+	 * tcm_handle: pid
+	 * tcm_parent: seq
+	 */
+
+	switch_to_ch = tcm->tcm__pad1;
+	num_bearers = tcm->tcm__pad2;
+	flags = tcm->tcm_info;
+
+	if (switch_to_ch != RMNET_CH_CTL && switch_to_ch >= RMNET_CH_MAX)
+		return -EOPNOTSUPP;
+	if (!num_bearers || num_bearers > QMAP_LL_MAX_BEARER)
+		return -EINVAL;
+	if (attrlen - sizeof(*tcm) < num_bearers)
+		return -EINVAL;
+
+	bearer_list = (u8 *)tcm + sizeof(*tcm);
+
+	for (i = 0; i < num_bearers; i++)
+		for (j = 0; j < num_bearers; j++)
+			if (j != i && bearer_list[i] == bearer_list[j])
+				return -EINVAL;
+
+	qos = rmnet_get_qos_pt(dev);
+	if (!qos)
+		return -EINVAL;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	/* Validate the bearer list */
+	for (i = 0; i < num_bearers; i++) {
+		bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
+		if (!bearer) {
+			rc = -EFAULT;
+			goto out;
+		}
+		if (bearer->ch_switch.state != CH_SWITCH_NONE) {
+			rc = -EBUSY;
+			goto out;
+		}
+	}
+
+	/* Send QMAP switch command */
+	rc = ll_qmap_send_switch(qos->mux_id, switch_to_ch,
+				 num_bearers, bearer_list, &txid);
+	if (rc)
+		goto out;
+
+	/* Update state */
+	for (i = 0; i < num_bearers; i++) {
+		bearer = qmi_rmnet_get_bearer_map(qos, bearer_list[i]);
+		if (!bearer)
+			continue;
+		bearer->ch_switch.switch_to_ch = switch_to_ch;
+		bearer->ch_switch.switch_txid = txid;
+		bearer->ch_switch.state = CH_SWITCH_STARTED;
+		bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
+		bearer->ch_switch.retry_left =
+			(flags & LL_MASK_AUTO_RETRY) ? LL_MAX_RETRY : 0;
+		bearer->ch_switch.flags = flags;
+		bearer->ch_switch.timer_quit = false;
+		mod_timer(&bearer->ch_switch.guard_timer,
+			  jiffies + LL_TIMEOUT);
+
+		bearer->ch_switch.nl_pid = tcm->tcm_handle;
+		bearer->ch_switch.nl_seq = tcm->tcm_parent;
+	}
+out:
+	spin_unlock_bh(&qos->qos_lock);
+	return rc;
+}
+
+void rmnet_ll_guard_fn(struct timer_list *t)
+{
+	struct rmnet_ch_switch *ch_switch;
+	struct rmnet_bearer_map *bearer;
+	int switch_status = SWITCH_STATUS_TIMEOUT;
+	__be32 txid;
+	int rc;
+
+	ch_switch = container_of(t, struct rmnet_ch_switch, guard_timer);
+	bearer = container_of(ch_switch, struct rmnet_bearer_map, ch_switch);
+
+	spin_lock_bh(&bearer->qos->qos_lock);
+
+	if (bearer->ch_switch.timer_quit ||
+	    bearer->ch_switch.state == CH_SWITCH_NONE)
+		goto out;
+
+	if (bearer->ch_switch.state == CH_SWITCH_FAILED_RETRY) {
+		if (bearer->ch_switch.current_ch ==
+		    bearer->ch_switch.switch_to_ch) {
+			switch_status = SWITCH_STATUS_NO_EFFECT;
+			goto send_err;
+		}
+
+		rc = ll_qmap_send_switch(bearer->qos->mux_id,
+					 bearer->ch_switch.switch_to_ch,
+					 1,
+					 &bearer->bearer_id,
+					 &txid);
+		if (!rc) {
+			bearer->ch_switch.switch_txid = txid;
+			bearer->ch_switch.state = CH_SWITCH_STARTED;
+			bearer->ch_switch.status_code = SWITCH_STATUS_NONE;
+			goto out;
+		}
+	}
+
+send_err:
+	bearer->ch_switch.state = CH_SWITCH_NONE;
+	bearer->ch_switch.status_code = switch_status;
+	bearer->ch_switch.retry_left = 0;
+	ll_send_nl_ack(bearer);
+	bearer->ch_switch.flags = 0;
+
+out:
+	spin_unlock_bh(&bearer->qos->qos_lock);
+}

+ 331 - 0
qcom/opensource/datarmnet/core/rmnet_map.h

@@ -0,0 +1,331 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+
+#include <linux/skbuff.h>
+#include "rmnet_config.h"
+
+struct rmnet_map_control_command {
+	u8  command_name;
+	u8  cmd_type:2;
+	u8  reserved:6;
+	u16 reserved2;
+	u32 transaction_id;
+	union {
+		struct {
+			u16 ip_family:2;
+			u16 reserved:14;
+			__be16 flow_control_seq_num;
+			__be32 qos_id;
+		} flow_control;
+		u8 data[0];
+	};
+}  __aligned(1);
+
+enum rmnet_map_commands {
+	RMNET_MAP_COMMAND_NONE,
+	RMNET_MAP_COMMAND_FLOW_DISABLE,
+	RMNET_MAP_COMMAND_FLOW_ENABLE,
+	RMNET_MAP_COMMAND_FLOW_START = 7,
+	RMNET_MAP_COMMAND_FLOW_END = 8,
+	RMNET_MAP_COMMAND_PB_BYTES = 35,
+	/* These should always be the last 2 elements */
+	RMNET_MAP_COMMAND_UNKNOWN,
+	RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+enum rmnet_map_v5_header_type {
+	RMNET_MAP_HEADER_TYPE_UNKNOWN,
+	RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
+	RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
+	RMNET_MAP_HEADER_TYPE_TSO = 0x3,
+	RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
+};
+
+enum rmnet_map_v5_close_type {
+	RMNET_MAP_COAL_CLOSE_NON_COAL,
+	RMNET_MAP_COAL_CLOSE_IP_MISS,
+	RMNET_MAP_COAL_CLOSE_TRANS_MISS,
+	RMNET_MAP_COAL_CLOSE_HW,
+	RMNET_MAP_COAL_CLOSE_COAL,
+};
+
+enum rmnet_map_v5_close_value {
+	RMNET_MAP_COAL_CLOSE_HW_NL,
+	RMNET_MAP_COAL_CLOSE_HW_PKT,
+	RMNET_MAP_COAL_CLOSE_HW_BYTE,
+	RMNET_MAP_COAL_CLOSE_HW_TIME,
+	RMNET_MAP_COAL_CLOSE_HW_EVICT,
+};
+
+/* Main QMAP header */
+struct rmnet_map_header {
+	u8  pad_len:6;
+	u8  next_hdr:1;
+	u8  cd_bit:1;
+	u8  mux_id;
+	__be16 pkt_len;
+}  __aligned(1);
+
+/* QMAP v5 headers */
+struct rmnet_map_v5_csum_header {
+	u8  next_hdr:1;
+	u8  header_type:7;
+	u8  hw_reserved:4;
+	u8  aps_prio:1;
+	u8  priority:1;
+	u8  hw_reserved_bit:1;
+	u8  csum_valid_required:1;
+	__be16 reserved;
+} __aligned(1);
+
+struct rmnet_map_v5_nl_pair {
+	__be16 pkt_len;
+	u8  csum_error_bitmap;
+	u8  num_packets;
+} __aligned(1);
+
+/* NLO: Number-length object */
+#define RMNET_MAP_V5_MAX_NLOS         (6)
+#define RMNET_MAP_V5_MAX_PACKETS      (48)
+
+struct rmnet_map_v5_coal_header {
+	u8  next_hdr:1;
+	u8  header_type:7;
+	u8  reserved1:4;
+	u8  num_nlos:3;
+	u8  csum_valid:1;
+	u8  close_type:4;
+	u8  close_value:4;
+	u8  reserved2:4;
+	u8  virtual_channel_id:4;
+
+	struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
+} __aligned(1);
+
+struct rmnet_map_v5_tso_header {
+	u8  next_hdr:1;
+	u8  header_type:7;
+	u8  hw_reserved:5;
+	u8  priority:1;
+	u8  zero_csum:1;
+	u8  ip_id_cfg:1;
+	__be16 segment_size;
+} __aligned(1);
+
+/* QMAP v4 headers */
+struct rmnet_map_dl_csum_trailer {
+	u8  reserved1;
+	u8  valid:1;
+	u8  reserved2:7;
+	u16 csum_start_offset;
+	u16 csum_length;
+	__be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+	__be16 csum_start_offset;
+	u16 csum_insert_offset:14;
+	u16 udp_ind:1;
+	u16 csum_enabled:1;
+} __aligned(1);
+
+struct rmnet_map_control_command_header {
+	u8 command_name;
+	u8 cmd_type:2;
+	u8 reserved:5;
+	u8 e:1;
+	u16 source_id:15;
+	u16 ext:1;
+	u32 transaction_id;
+}  __aligned(1);
+
+struct rmnet_map_flow_info_le {
+	__be32 mux_id;
+	__be32 flow_id;
+	__be32 bytes;
+	__be32 pkts;
+} __aligned(1);
+
+struct rmnet_map_flow_info_be {
+	u32 mux_id;
+	u32 flow_id;
+	u32 bytes;
+	u32 pkts;
+} __aligned(1);
+
+struct rmnet_map_pb_ind_hdr {
+	union {
+		struct {
+			u32 seq_num;
+			u32 start_end_seq_num;
+			u32 row_bytes_pending;
+			u32 fc_bytes_pending;
+		} le __aligned(1);
+		struct {
+			u32 seq_num;
+			u32 start_end_seq_num;
+			u32 row_bytes_pending;
+			u32 fc_bytes_pending;
+		} be __aligned(1);
+	} __aligned(1);
+} __aligned(1);
+
+struct rmnet_map_pb_ind {
+	u8 priority;
+	void (*pb_ind_handler)(struct rmnet_map_pb_ind_hdr *pbhdr);
+	struct list_head list;
+};
+
+struct rmnet_map_dl_ind_hdr {
+	union {
+		struct {
+			u32 seq;
+			u32 bytes;
+			u32 pkts;
+			u32 flows;
+			struct rmnet_map_flow_info_le flow[0];
+		} le __aligned(1);
+		struct {
+			__be32 seq;
+			__be32 bytes;
+			__be32 pkts;
+			__be32 flows;
+			struct rmnet_map_flow_info_be flow[0];
+		} be __aligned(1);
+	} __aligned(1);
+} __aligned(1);
+
+struct rmnet_map_dl_ind_trl {
+	union {
+		__be32 seq_be;
+		u32 seq_le;
+	} __aligned(1);
+} __aligned(1);
+
+struct rmnet_map_dl_ind {
+	u8 priority;
+	void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *dlhdr,
+				  struct rmnet_map_control_command_header *qcmd);
+	void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *dltrl,
+				  struct rmnet_map_control_command_header *qcmd);
+	struct list_head list;
+};
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
+				 (Y)->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
+				(Y)->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
+				(Y)->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
+				    ((Y)->data + \
+				      sizeof(struct rmnet_map_header)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
+					(Y)->data)->pkt_len))
+
+#define RMNET_MAP_DEAGGR_SPACING  64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
+#define RMNET_MAP_COMMAND_REQUEST     0
+#define RMNET_MAP_COMMAND_ACK         1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID     3
+
+#define RMNET_MAP_NO_PAD_BYTES        0
+#define RMNET_MAP_ADD_PAD_BYTES       1
+
+static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
+{
+	/* Nonlinear packets we receive are entirely within frag 0 */
+	if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
+		return skb_frag_address(skb_shinfo(skb)->frags);
+
+	return skb->data;
+}
+
+static inline struct rmnet_map_control_command *
+rmnet_map_get_cmd_start(struct sk_buff *skb)
+{
+	unsigned char *data = rmnet_map_data_ptr(skb);
+
+	data += sizeof(struct rmnet_map_header);
+	return (struct rmnet_map_control_command *)data;
+}
+
+static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
+{
+	unsigned char *data = rmnet_map_data_ptr(skb);
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_coal_header *)data)->header_type;
+}
+
+static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
+{
+	unsigned char *data = rmnet_map_data_ptr(skb);
+
+	data += sizeof(struct rmnet_map_header);
+	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
+}
+
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port);
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+						  int hdrlen, int pad,
+						  struct rmnet_port *port);
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct rmnet_port *port,
+				      struct net_device *orig_dev,
+				      int csum_type);
+bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr);
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+				      struct sk_buff_head *list,
+				      u16 len);
+int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
+void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+			    bool low_latency);
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
+				    u16 size, u8 count, u8 features, u32 time);
+void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+				struct rmnet_map_dl_ind_hdr *dl_hdr,
+				struct rmnet_map_control_command_header *qcmd);
+void rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
+				struct rmnet_map_dl_ind_trl *dltrl,
+				struct rmnet_map_control_command_header *qcmd);
+void rmnet_map_pb_ind_notify(struct rmnet_port *port,
+			     struct rmnet_map_pb_ind_hdr *pbhdr);
+int rmnet_map_flow_command(struct sk_buff *skb,
+			   struct rmnet_port *port,
+			   bool rmnet_perf);
+void rmnet_map_cmd_init(struct rmnet_port *port);
+int rmnet_map_dl_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_dl_ind *dl_ind);
+int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_dl_ind *dl_ind);
+int rmnet_map_pb_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_pb_ind *pb_ind);
+int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_pb_ind *pb_ind);
+void rmnet_map_cmd_exit(struct rmnet_port *port);
+void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
+void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state);
+int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
+			      struct net_device *orig_dev);
+#endif /* _RMNET_MAP_H_ */

+ 465 - 0
qcom/opensource/datarmnet/core/rmnet_map_command.c

@@ -0,0 +1,465 @@
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+
+#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
+			    sizeof(struct rmnet_map_control_command_header))
+
+#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+#define RMNET_PB_IND_HDR_SIZE (sizeof(struct rmnet_map_pb_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
+				    struct rmnet_port *port,
+				    int enable)
+{
+	struct rmnet_map_header *qmap;
+	struct rmnet_map_control_command *cmd;
+	struct rmnet_endpoint *ep;
+	struct net_device *vnd;
+	u16 ip_family;
+	u16 fc_seq;
+	u32 qos_id;
+	u8 mux_id;
+	int r;
+
+	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
+	mux_id = qmap->mux_id;
+	cmd = rmnet_map_get_cmd_start(skb);
+
+	if (mux_id >= RMNET_MAX_LOGICAL_EP) {
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	ep = rmnet_get_endpoint(port, mux_id);
+	if (!ep) {
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	vnd = ep->egress_dev;
+
+	ip_family = cmd->flow_control.ip_family;
+	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+	qos_id = ntohl(cmd->flow_control.qos_id);
+
+	/* Ignore the ip family and pass the sequence number for both v4 and v6
+	 * sequence. User space does not support creating dedicated flows for
+	 * the 2 protocols
+	 */
+	r = rmnet_vnd_do_flow_control(vnd, enable);
+	if (r) {
+		kfree_skb(skb);
+		return RMNET_MAP_COMMAND_UNSUPPORTED;
+	} else {
+		return RMNET_MAP_COMMAND_ACK;
+	}
+}
+
+static void rmnet_map_send_ack(struct sk_buff *skb,
+			       unsigned char type,
+			       struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	struct net_device *dev = skb->dev;
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		pskb_trim(skb,
+			  skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
+
+	skb->protocol = htons(ETH_P_MAP);
+
+	cmd = rmnet_map_get_cmd_start(skb);
+	cmd->cmd_type = type & 0x03;
+
+	netif_tx_lock(dev);
+	dev->netdev_ops->ndo_start_xmit(skb, dev);
+	netif_tx_unlock(dev);
+}
+
+void
+rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
+			   struct rmnet_map_dl_ind_hdr *dlhdr,
+			   struct rmnet_map_control_command_header *qcmd)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_hdr_handler_v2(dlhdr, qcmd);
+}
+
+void
+rmnet_map_dl_trl_notify_v2(struct rmnet_port *port,
+			   struct rmnet_map_dl_ind_trl *dltrl,
+			   struct rmnet_map_control_command_header *qcmd)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_trl_handler_v2(dltrl, qcmd);
+}
+
+void rmnet_map_pb_ind_notify(struct rmnet_port *port,
+			     struct rmnet_map_pb_ind_hdr *pbhdr)
+{
+	struct rmnet_map_pb_ind *tmp;
+
+	list_for_each_entry(tmp, &port->pb_list, list) {
+		port->stats.pb_marker_seq = pbhdr->le.seq_num;
+		tmp->pb_ind_handler(pbhdr);
+	}
+}
+
+static void rmnet_map_process_pb_ind(struct sk_buff *skb,
+				     struct rmnet_port *port,
+				     bool rmnet_perf)
+{
+	struct rmnet_map_pb_ind_hdr *pbhdr;
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (skb->len < RMNET_PB_IND_HDR_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	if (is_dl_mark_v2) {
+		pskb_pull(skb, sizeof(struct rmnet_map_header) +
+				  sizeof(struct rmnet_map_control_command_header));
+	}
+
+	pbhdr = (struct rmnet_map_pb_ind_hdr *)rmnet_map_data_ptr(skb);
+	port->stats.pb_marker_count++;
+
+	if (is_dl_mark_v2)
+		rmnet_map_pb_ind_notify(port, pbhdr);
+
+	if (rmnet_perf) {
+		unsigned int pull_size;
+
+		pull_size = sizeof(struct rmnet_map_pb_ind_hdr);
+		if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
+		pskb_pull(skb, pull_size);
+	}
+}
+
+static void rmnet_map_process_flow_start(struct sk_buff *skb,
+					 struct rmnet_port *port,
+					 bool rmnet_perf)
+{
+	struct rmnet_map_dl_ind_hdr *dlhdr;
+	struct rmnet_map_control_command_header *qcmd;
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (skb->len < RMNET_DL_IND_HDR_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	if (is_dl_mark_v2) {
+		pskb_pull(skb, sizeof(struct rmnet_map_header));
+		qcmd = (struct rmnet_map_control_command_header *)
+			rmnet_map_data_ptr(skb);
+		port->stats.dl_hdr_last_ep_id = qcmd->source_id;
+		port->stats.dl_hdr_last_qmap_vers = qcmd->reserved;
+		port->stats.dl_hdr_last_trans_id = qcmd->transaction_id;
+		pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
+	}
+
+	dlhdr = (struct rmnet_map_dl_ind_hdr *)rmnet_map_data_ptr(skb);
+
+	port->stats.dl_hdr_last_seq = dlhdr->le.seq;
+	port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
+	port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
+	port->stats.dl_hdr_last_flows = dlhdr->le.flows;
+	port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
+	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
+	port->stats.dl_hdr_count++;
+
+	if (is_dl_mark_v2)
+		rmnet_map_dl_hdr_notify_v2(port, dlhdr, qcmd);
+
+	if (rmnet_perf) {
+		unsigned int pull_size;
+
+		pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
+		if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
+		pskb_pull(skb, pull_size);
+	}
+}
+
+static void rmnet_map_process_flow_end(struct sk_buff *skb,
+				       struct rmnet_port *port,
+				       bool rmnet_perf)
+{
+	struct rmnet_map_dl_ind_trl *dltrl;
+	struct rmnet_map_control_command_header *qcmd;
+	u32 data_format;
+	bool is_dl_mark_v2;
+
+	if (skb->len < RMNET_DL_IND_TRL_SIZE)
+		return;
+
+	data_format = port->data_format;
+	is_dl_mark_v2 = data_format & RMNET_INGRESS_FORMAT_DL_MARKER_V2;
+	if (is_dl_mark_v2) {
+		pskb_pull(skb, sizeof(struct rmnet_map_header));
+		qcmd = (struct rmnet_map_control_command_header *)
+			rmnet_map_data_ptr(skb);
+		pskb_pull(skb, sizeof(struct rmnet_map_control_command_header));
+	}
+
+	dltrl = (struct rmnet_map_dl_ind_trl *)rmnet_map_data_ptr(skb);
+
+	port->stats.dl_trl_last_seq = dltrl->seq_le;
+	port->stats.dl_trl_count++;
+
+	if (is_dl_mark_v2)
+		rmnet_map_dl_trl_notify_v2(port, dltrl, qcmd);
+
+	if (rmnet_perf) {
+		unsigned int pull_size;
+
+		pull_size = sizeof(struct rmnet_map_dl_ind_trl);
+		if (data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
+		pskb_pull(skb, pull_size);
+	}
+}
+
+/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ */
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
+{
+	struct rmnet_map_control_command *cmd;
+	unsigned char command_name;
+	unsigned char rc = 0;
+
+	cmd = rmnet_map_get_cmd_start(skb);
+	command_name = cmd->command_name;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_ENABLE:
+		rc = rmnet_map_do_flow_control(skb, port, 1);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_DISABLE:
+		rc = rmnet_map_do_flow_control(skb, port, 0);
+		break;
+
+	default:
+		rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+		kfree_skb(skb);
+		break;
+	}
+	if (rc == RMNET_MAP_COMMAND_ACK)
+		rmnet_map_send_ack(skb, rc, port);
+}
+
+int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
+			   bool rmnet_perf)
+{
+	struct rmnet_map_control_command *cmd;
+	unsigned char command_name;
+
+	cmd = rmnet_map_get_cmd_start(skb);
+	command_name = cmd->command_name;
+
+	/* Silently discard any markers on the LL channel */
+	if (skb->priority == 0xda1a &&
+	    (command_name == RMNET_MAP_COMMAND_FLOW_START ||
+	     command_name == RMNET_MAP_COMMAND_FLOW_END)) {
+		if (!rmnet_perf)
+			consume_skb(skb);
+
+		return 0;
+	}
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_START:
+		rmnet_map_process_flow_start(skb, port, rmnet_perf);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_END:
+		rmnet_map_process_flow_end(skb, port, rmnet_perf);
+		break;
+
+	case RMNET_MAP_COMMAND_PB_BYTES:
+		rmnet_map_process_pb_ind(skb, port, rmnet_perf);
+		break;
+
+	default:
+		return 1;
+	}
+
+	/* rmnet_perf module will handle the consuming */
+	if (!rmnet_perf)
+		consume_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_flow_command);
+
+void rmnet_map_cmd_exit(struct rmnet_port *port)
+{
+	struct rmnet_map_dl_ind *tmp, *idx;
+
+	list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
+		list_del_rcu(&tmp->list);
+
+	list_for_each_entry_safe(tmp, idx, &port->pb_list, list)
+		list_del_rcu(&tmp->list);
+}
+
+void rmnet_map_cmd_init(struct rmnet_port *port)
+{
+	INIT_LIST_HEAD(&port->dl_list);
+	INIT_LIST_HEAD(&port->pb_list);
+}
+
+int rmnet_map_dl_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_dl_ind *dl_ind)
+{
+	struct rmnet_map_dl_ind *dl_ind_iterator;
+	bool empty_ind_list = true;
+
+	if (!port || !dl_ind || !dl_ind->dl_hdr_handler_v2 ||
+	    !dl_ind->dl_trl_handler_v2)
+		return -EINVAL;
+
+	list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
+		empty_ind_list = false;
+		if (dl_ind_iterator->priority < dl_ind->priority) {
+			if (dl_ind_iterator->list.next) {
+				if (dl_ind->priority
+				    < list_entry_rcu(dl_ind_iterator->list.next,
+				    typeof(*dl_ind_iterator), list)->priority) {
+					list_add_rcu(&dl_ind->list,
+						     &dl_ind_iterator->list);
+					break;
+				}
+			} else {
+				list_add_rcu(&dl_ind->list,
+					     &dl_ind_iterator->list);
+				break;
+			}
+		} else {
+			list_add_tail_rcu(&dl_ind->list,
+					  &dl_ind_iterator->list);
+			break;
+		}
+	}
+
+	if (empty_ind_list)
+		list_add_rcu(&dl_ind->list, &port->dl_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_dl_ind_register);
+
+int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_dl_ind *dl_ind)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	if (!port || !dl_ind || !(port->dl_list.next))
+		return -EINVAL;
+
+	list_for_each_entry(tmp, &port->dl_list, list) {
+		if (tmp == dl_ind) {
+			list_del_rcu(&dl_ind->list);
+			goto done;
+		}
+	}
+
+done:
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);
+
+int rmnet_map_pb_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_pb_ind *pb_ind)
+{
+	struct rmnet_map_pb_ind *pb_ind_iterator;
+	bool empty_ind_list = true;
+
+	if (!port || !pb_ind || !pb_ind->pb_ind_handler)
+		return -EINVAL;
+
+	list_for_each_entry_rcu(pb_ind_iterator, &port->pb_list, list) {
+		empty_ind_list = false;
+		if (pb_ind_iterator->priority < pb_ind->priority) {
+			if (pb_ind_iterator->list.next) {
+				if (pb_ind->priority
+				    < list_entry_rcu(pb_ind_iterator->list.next,
+				    typeof(*pb_ind_iterator), list)->priority) {
+					list_add_rcu(&pb_ind->list,
+						     &pb_ind_iterator->list);
+					break;
+				}
+			} else {
+				list_add_rcu(&pb_ind->list,
+					     &pb_ind_iterator->list);
+				break;
+			}
+		} else {
+			list_add_tail_rcu(&pb_ind->list,
+					  &pb_ind_iterator->list);
+			break;
+		}
+	}
+
+	if (empty_ind_list)
+		list_add_rcu(&pb_ind->list, &port->pb_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_pb_ind_register);
+
+int rmnet_map_pb_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_pb_ind *pb_ind)
+{
+	struct rmnet_map_pb_ind *tmp;
+
+	if (!port || !pb_ind || !(port->pb_list.next))
+		return -EINVAL;
+
+	list_for_each_entry(tmp, &port->pb_list, list) {
+		if (tmp == pb_ind) {
+			list_del_rcu(&pb_ind->list);
+			goto done;
+		}
+	}
+
+done:
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_pb_ind_deregister);

+ 1720 - 0
qcom/opensource/datarmnet/core/rmnet_map_data.c

@@ -0,0 +1,1720 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data MAP protocol
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_handlers.h"
+#include "rmnet_ll.h"
+#include "rmnet_mem.h"
+
+#define RMNET_MAP_PKT_COPY_THRESHOLD 64
+#define RMNET_MAP_DEAGGR_SPACING  64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+#define RMNET_PAGE_COUNT 384
+
+struct rmnet_map_coal_metadata {
+	void *ip_header;
+	void *trans_header;
+	u16 ip_len;
+	u16 trans_len;
+	u16 data_offset;
+	u16 data_len;
+	u8 ip_proto;
+	u8 trans_proto;
+	u8 pkt_id;
+	u8 pkt_count;
+};
+
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+					 const void *txporthdr)
+{
+	__sum16 *check = NULL;
+
+	switch (protocol) {
+	case IPPROTO_TCP:
+		check = &(((struct tcphdr *)txporthdr)->check);
+		break;
+
+	case IPPROTO_UDP:
+		check = &(((struct udphdr *)txporthdr)->check);
+		break;
+
+	default:
+		check = NULL;
+		break;
+	}
+
+	return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer,
+			       struct rmnet_priv *priv)
+{
+	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+	u16 csum_value, csum_value_final;
+	struct iphdr *ip4h;
+	void *txporthdr;
+	__be16 addend;
+
+	ip4h = (struct iphdr *)rmnet_map_data_ptr(skb);
+	if ((ntohs(ip4h->frag_off) & IP_MF) ||
+	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+		priv->stats.csum_fragmented_pkt++;
+		return -EOPNOTSUPP;
+	}
+
+	txporthdr = rmnet_map_data_ptr(skb) + ip4h->ihl * 4;
+
+	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+	if (!csum_field) {
+		priv->stats.csum_err_invalid_transport++;
+		return -EPROTONOSUPPORT;
+	}
+
+	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+		priv->stats.csum_skipped++;
+		return 0;
+	}
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+				     (__force __be16)hdr_csum);
+
+	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+					 ip4h->protocol, 0);
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip4h->protocol) {
+		case IPPROTO_UDP:
+			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL4 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+		priv->stats.csum_ok++;
+		return 0;
+	} else {
+		priv->stats.csum_validation_failed++;
+		return -EINVAL;
+	}
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer,
+			       struct rmnet_priv *priv)
+{
+	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+	u16 csum_value, csum_value_final;
+	__be16 ip6_hdr_csum, addend;
+	struct ipv6hdr *ip6h;
+	void *txporthdr, *data = rmnet_map_data_ptr(skb);
+	u32 length;
+
+	ip6h = data;
+
+	txporthdr = data + sizeof(struct ipv6hdr);
+	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+	if (!csum_field) {
+		priv->stats.csum_err_invalid_transport++;
+		return -EPROTONOSUPPORT;
+	}
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	ip6_hdr_csum = (__force __be16)
+			~ntohs((__force __be16)ip_compute_csum(ip6h,
+			       (int)(txporthdr - data)));
+	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+				      ip6_hdr_csum);
+
+	length = (ip6h->nexthdr == IPPROTO_UDP) ?
+		 ntohs(((struct udphdr *)txporthdr)->len) :
+		 ntohs(ip6h->payload_len);
+	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+			     length, ip6h->nexthdr, 0));
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip6h->nexthdr) {
+		case IPPROTO_UDP:
+			/* RFC 2460 section 8.1
+			 * DL6 One's complement rule for UDP checksum 0
+			 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL6 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
+		priv->stats.csum_ok++;
+		return 0;
+	} else {
+		priv->stats.csum_validation_failed++;
+		return -EINVAL;
+	}
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = iphdr + ip4h->ihl * 4;
+
+	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)iphdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	if (ip4h->protocol == IPPROTO_UDP)
+		ul_header->udp_ind = 1;
+	else
+		ul_header->udp_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)ip6hdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+
+	if (ip6h->nexthdr == IPPROTO_UDP)
+		ul_header->udp_ind = 1;
+	else
+		ul_header->udp_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
+/* Adds MAP header to front of skb->data
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ */
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+						  int hdrlen, int pad,
+						  struct rmnet_port *port)
+{
+	struct rmnet_map_header *map_header;
+	u32 padding, map_datalen;
+	u8 *padbytes;
+
+	map_datalen = skb->len - hdrlen;
+	map_header = (struct rmnet_map_header *)
+			skb_push(skb, sizeof(struct rmnet_map_header));
+	memset(map_header, 0, sizeof(struct rmnet_map_header));
+
+	/* Set next_hdr bit for csum offload packets */
+	if (port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5)
+		map_header->next_hdr = 1;
+
+	if (pad == RMNET_MAP_NO_PAD_BYTES) {
+		map_header->pkt_len = htons(map_datalen);
+		return map_header;
+	}
+
+	padding = ALIGN(map_datalen, 4) - map_datalen;
+
+	if (padding == 0)
+		goto done;
+
+	if (skb_tailroom(skb) < padding)
+		return NULL;
+
+	padbytes = (u8 *)skb_put(skb, padding);
+	memset(padbytes, 0, padding);
+
+done:
+	map_header->pkt_len = htons(map_datalen + padding);
+	map_header->pad_len = padding & 0x3F;
+
+	return map_header;
+}
+
+/* Deaggregates a single packet
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port)
+{
+	struct rmnet_map_header *maph;
+	struct sk_buff *skbn;
+	unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
+	u32 packet_len;
+
+	if (skb->len == 0)
+		return NULL;
+
+	maph = (struct rmnet_map_header *)data;
+	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
+
+	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+	else if (port->data_format & RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5) {
+		if (!maph->cd_bit) {
+			packet_len += sizeof(struct rmnet_map_v5_csum_header);
+
+			/* Coalescing headers require MAPv5 */
+			next_hdr = data + sizeof(*maph);
+		}
+	}
+
+	if (((int)skb->len - (int)packet_len) < 0)
+		return NULL;
+
+	/* Some hardware can send us empty frames. Catch them */
+	if (ntohs(maph->pkt_len) == 0)
+		return NULL;
+
+	if (next_hdr &&
+	    ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
+	     RMNET_MAP_HEADER_TYPE_COALESCING)
+		return skb;
+
+	if (skb_is_nonlinear(skb)) {
+		skb_frag_t *frag0 = skb_shinfo(skb)->frags;
+		struct page *page = skb_frag_page(frag0);
+
+		skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_append_pagefrags(skbn, page, frag0->bv_offset,
+				     packet_len);
+		skbn->data_len += packet_len;
+		skbn->len += packet_len;
+	} else {
+		skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
+				 GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+		skb_put(skbn, packet_len);
+		memcpy(skbn->data, data, packet_len);
+	}
+
+	skbn->priority = skb->priority;
+	pskb_pull(skb, packet_len);
+
+	return skbn;
+}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+	struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+		priv->stats.csum_sw++;
+		return -EOPNOTSUPP;
+	}
+
+	csum_trailer = (struct rmnet_map_dl_csum_trailer *)
+		       (rmnet_map_data_ptr(skb) + len);
+
+	if (!csum_trailer->valid) {
+		priv->stats.csum_valid_unset++;
+		return -EINVAL;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
+#else
+		priv->stats.csum_err_invalid_ip_version++;
+		return -EPROTONOSUPPORT;
+#endif
+	} else {
+		priv->stats.csum_err_invalid_ip_version++;
+		return -EPROTONOSUPPORT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
+
+void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+					 struct net_device *orig_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+	struct rmnet_map_ul_csum_header *ul_header;
+	void *iphdr;
+
+	ul_header = (struct rmnet_map_ul_csum_header *)
+		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+	if (unlikely(!(orig_dev->features &
+		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+		goto sw_csum;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		iphdr = (char *)ul_header +
+			sizeof(struct rmnet_map_ul_csum_header);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+			priv->stats.csum_hw++;
+			return;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+			priv->stats.csum_hw++;
+			return;
+#else
+			priv->stats.csum_err_invalid_ip_version++;
+			goto sw_csum;
+#endif
+		} else {
+			priv->stats.csum_err_invalid_ip_version++;
+		}
+	}
+
+sw_csum:
+	ul_header->csum_start_offset = 0;
+	ul_header->csum_insert_offset = 0;
+	ul_header->csum_enabled = 0;
+	ul_header->udp_ind = 0;
+
+	priv->stats.csum_sw++;
+}
+
+static void rmnet_map_v5_check_priority(struct sk_buff *skb,
+					struct net_device *orig_dev,
+					struct rmnet_map_v5_csum_header *hdr,
+					bool tso)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+
+	if (RMNET_LLM(skb->priority)) {
+		priv->stats.ul_prio++;
+		hdr->priority = 1;
+	}
+
+	/* APS priority bit is only valid for csum header */
+	if (!tso && RMNET_APS_LLB(skb->priority)) {
+		priv->stats.aps_prio++;
+		hdr->aps_prio = 1;
+	}
+}
+
+void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+					 struct rmnet_port *port,
+					 struct net_device *orig_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+	struct rmnet_map_v5_csum_header *ul_header;
+
+	ul_header = (struct rmnet_map_v5_csum_header *)
+		    skb_push(skb, sizeof(*ul_header));
+	memset(ul_header, 0, sizeof(*ul_header));
+	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
+
+	if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
+		rmnet_map_v5_check_priority(skb, orig_dev, ul_header, false);
+
+	/* Allow priority w/o csum offload */
+	if (!(port->data_format & RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5))
+		return;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		void *iph = (char *)ul_header + sizeof(*ul_header);
+		void *trans;
+		__sum16 *check;
+		u8 proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+			proto = ((struct iphdr *)iph)->protocol;
+			trans = iph + ip_len;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			u16 ip_len = sizeof(struct ipv6hdr);
+
+			proto = ((struct ipv6hdr *)iph)->nexthdr;
+			trans = iph + ip_len;
+		} else {
+			priv->stats.csum_err_invalid_ip_version++;
+			goto sw_csum;
+		}
+
+		check = rmnet_map_get_csum_field(proto, trans);
+		if (check) {
+			skb->ip_summed = CHECKSUM_NONE;
+			/* Ask for checksum offloading */
+			ul_header->csum_valid_required = 1;
+			priv->stats.csum_hw++;
+			return;
+		}
+	}
+
+sw_csum:
+	priv->stats.csum_sw++;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct rmnet_port *port,
+				      struct net_device *orig_dev,
+				      int csum_type)
+{
+	switch (csum_type) {
+	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+		break;
+	case RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5:
+		rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+		break;
+	default:
+		break;
+	}
+}
+
+bool rmnet_map_v5_csum_buggy(struct rmnet_map_v5_coal_header *coal_hdr)
+{
+	/* Only applies to frames with a single packet */
+	if (coal_hdr->num_nlos != 1 || coal_hdr->nl_pairs[0].num_packets != 1)
+		return false;
+
+	/* TCP header has FIN or PUSH set */
+	if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_COAL)
+		return true;
+
+	/* Hit packet limit, byte limit, or time limit/EOF on DMA */
+	if (coal_hdr->close_type == RMNET_MAP_COAL_CLOSE_HW) {
+		switch (coal_hdr->close_value) {
+		case RMNET_MAP_COAL_CLOSE_HW_PKT:
+		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
+		case RMNET_MAP_COAL_CLOSE_HW_TIME:
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void rmnet_map_move_headers(struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	u16 ip_len;
+	u16 trans_len = 0;
+	u8 proto;
+
+	/* This only applies to non-linear SKBs */
+	if (!skb_is_nonlinear(skb))
+		return;
+
+	iph = (struct iphdr *)rmnet_map_data_ptr(skb);
+	if (iph->version == 4) {
+		ip_len = iph->ihl * 4;
+		proto = iph->protocol;
+		if (iph->frag_off & htons(IP_OFFSET))
+			/* No transport header information */
+			goto pull;
+	} else if (iph->version == 6) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+		__be16 frag_off;
+		u8 nexthdr = ip6h->nexthdr;
+
+		ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
+					  &frag_off);
+		if (ip_len < 0)
+			return;
+
+		proto = nexthdr;
+	} else {
+		return;
+	}
+
+	if (proto == IPPROTO_TCP) {
+		struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
+
+		trans_len = tp->doff * 4;
+	} else if (proto == IPPROTO_UDP) {
+		trans_len = sizeof(struct udphdr);
+	} else if (proto == NEXTHDR_FRAGMENT) {
+		/* Non-first fragments don't have the fragment length added by
+		 * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
+		 * we account for the length here.
+		 */
+		ip_len += sizeof(struct frag_hdr);
+	}
+
+pull:
+	__pskb_pull_tail(skb, ip_len + trans_len);
+	skb_reset_network_header(skb);
+	if (trans_len)
+		skb_set_transport_header(skb, ip_len);
+}
+
+static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
+				     struct rmnet_map_coal_metadata *coal_meta,
+				     struct sk_buff *dest)
+{
+	unsigned char *data_start = rmnet_map_data_ptr(coal_skb) +
+				    coal_meta->ip_len + coal_meta->trans_len;
+	u32 copy_len = coal_meta->data_len * coal_meta->pkt_count;
+
+	if (skb_is_nonlinear(coal_skb)) {
+		skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
+		struct page *page = skb_frag_page(frag0);
+
+		skb_append_pagefrags(dest, page,
+				     frag0->bv_offset + coal_meta->ip_len +
+				     coal_meta->trans_len +
+				     coal_meta->data_offset,
+				     copy_len);
+		dest->data_len += copy_len;
+		dest->len += copy_len;
+	} else {
+		skb_put_data(dest, data_start + coal_meta->data_offset,
+			     copy_len);
+	}
+}
+
+/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
+ * if needed (i.e. forwarding, UDP GRO)
+ */
+static void rmnet_map_gso_stamp(struct sk_buff *skb,
+				struct rmnet_map_coal_metadata *coal_meta)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+	if (coal_meta->trans_proto == IPPROTO_TCP)
+		shinfo->gso_type = (coal_meta->ip_proto == 4) ?
+				   SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+	else
+		shinfo->gso_type = SKB_GSO_UDP_L4;
+
+	shinfo->gso_size = coal_meta->data_len;
+	shinfo->gso_segs = coal_meta->pkt_count;
+}
+
+/* Handles setting up the partial checksum in the skb. Sets the transport
+ * checksum to the pseudoheader checksum and sets the csum offload metadata
+ */
+static void rmnet_map_partial_csum(struct sk_buff *skb,
+				   struct rmnet_map_coal_metadata *coal_meta)
+{
+	unsigned char *data = skb->data;
+	__sum16 pseudo;
+	u16 pkt_len = skb->len - coal_meta->ip_len;
+
+	if (coal_meta->ip_proto == 4) {
+		struct iphdr *iph = (struct iphdr *)data;
+
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    pkt_len, coal_meta->trans_proto,
+					    0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
+
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  pkt_len, coal_meta->trans_proto, 0);
+	}
+
+	if (coal_meta->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
+
+		tp->check = pseudo;
+		skb->csum_offset = offsetof(struct tcphdr, check);
+	} else {
+		struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
+
+		up->check = pseudo;
+		skb->csum_offset = offsetof(struct udphdr, check);
+	}
+
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
+}
+
+static void
+__rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
+			     struct rmnet_map_coal_metadata *coal_meta,
+			     struct sk_buff_head *list, u8 pkt_id,
+			     bool csum_valid)
+{
+	struct sk_buff *skbn;
+	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
+	__sum16 *check = NULL;
+	u32 alloc_len;
+	u32 dlen = coal_meta->data_len * coal_meta->pkt_count;
+	u32 hlen = coal_meta->ip_len + coal_meta->trans_len;
+	bool zero_csum = false;
+
+	/* We can avoid copying the data if the SKB we got from the lower-level
+	 * drivers was nonlinear.
+	 */
+	if (skb_is_nonlinear(coal_skb))
+		alloc_len = hlen;
+	else
+		alloc_len = hlen + dlen;
+
+	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
+	if (!skbn)
+		return;
+
+	skb_reserve(skbn, hlen);
+	rmnet_map_nonlinear_copy(coal_skb, coal_meta, skbn);
+
+	/* Push transport header and update necessary fields */
+	skb_push(skbn, coal_meta->trans_len);
+	memcpy(skbn->data, coal_meta->trans_header, coal_meta->trans_len);
+	skb_reset_transport_header(skbn);
+	if (coal_meta->trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th = tcp_hdr(skbn);
+
+		th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
+		check = &th->check;
+
+		/* Don't allow dangerous flags to be set in any segment but the
+		 * last one.
+		 */
+		if (th->fin || th->psh) {
+			if (hlen + coal_meta->data_offset + dlen <
+			    coal_skb->len) {
+				th->fin = 0;
+				th->psh = 0;
+			}
+		}
+	} else if (coal_meta->trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh = udp_hdr(skbn);
+
+		uh->len = htons(skbn->len);
+		check = &uh->check;
+		if (coal_meta->ip_proto == 4 && !uh->check)
+			zero_csum = true;
+	}
+
+	/* Push IP header and update necessary fields */
+	skb_push(skbn, coal_meta->ip_len);
+	memcpy(skbn->data, coal_meta->ip_header, coal_meta->ip_len);
+	skb_reset_network_header(skbn);
+	if (coal_meta->ip_proto == 4) {
+		struct iphdr *iph = ip_hdr(skbn);
+
+		iph->id = htons(ntohs(iph->id) + coal_meta->pkt_id);
+		iph->tot_len = htons(skbn->len);
+		iph->check = 0;
+		iph->check = ip_fast_csum(iph, iph->ihl);
+	} else {
+		/* Payload length includes any extension headers */
+		ipv6_hdr(skbn)->payload_len = htons(skbn->len -
+						    sizeof(struct ipv6hdr));
+	}
+
+	/* Handle checksum status */
+	if (likely(csum_valid) || zero_csum) {
+		/* Set the partial checksum information */
+		rmnet_map_partial_csum(skbn, coal_meta);
+	} else if (check) {
+		/* Unfortunately, we have to fake a bad checksum here, since
+		 * the original bad value is lost by the hardware. The only
+		 * reliable way to do it is to calculate the actual checksum
+		 * and corrupt it.
+		 */
+		__wsum csum;
+		unsigned int offset = skb_transport_offset(skbn);
+		__sum16 pseudo;
+
+		/* Calculate pseudo header */
+		if (coal_meta->ip_proto == 4) {
+			struct iphdr *iph = ip_hdr(skbn);
+
+			pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+						    skbn->len -
+						    coal_meta->ip_len,
+						    coal_meta->trans_proto, 0);
+		} else {
+			struct ipv6hdr *ip6h = ipv6_hdr(skbn);
+
+			pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+						  skbn->len - coal_meta->ip_len,
+						  coal_meta->trans_proto, 0);
+		}
+
+		*check = pseudo;
+		csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
+		/* Add 1 to corrupt. This cannot produce a final value of 0
+		 * since csum_fold() can't return a value of 0xFFFF.
+		 */
+		*check = csum16_add(csum_fold(csum), htons(1));
+		skbn->ip_summed = CHECKSUM_NONE;
+	}
+
+	skbn->dev = coal_skb->dev;
+	priv->stats.coal.coal_reconstruct++;
+
+	/* Stamp GSO information if necessary */
+	if (coal_meta->pkt_count > 1)
+		rmnet_map_gso_stamp(skbn, coal_meta);
+
+	/* Propagate priority value */
+	skbn->priority = coal_skb->priority;
+
+	__skb_queue_tail(list, skbn);
+
+	/* Update meta information to move past the data we just segmented */
+	coal_meta->data_offset += dlen;
+	coal_meta->pkt_id = pkt_id + 1;
+	coal_meta->pkt_count = 0;
+}
+
+static bool rmnet_map_validate_csum(struct sk_buff *skb,
+				    struct rmnet_map_coal_metadata *meta)
+{
+	u8 *data = rmnet_map_data_ptr(skb);
+	unsigned int datagram_len;
+	__wsum csum;
+	__sum16 pseudo;
+
+	datagram_len = skb->len - meta->ip_len;
+	if (meta->ip_proto == 4) {
+		struct iphdr *iph = (struct iphdr *)data;
+
+		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+					    datagram_len,
+					    meta->trans_proto, 0);
+	} else {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)data;
+
+		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+					  datagram_len, meta->trans_proto,
+					  0);
+	}
+
+	csum = skb_checksum(skb, meta->ip_len, datagram_len,
+			    csum_unfold(pseudo));
+	return !csum_fold(csum);
+}
+
+/* Converts the coalesced SKB into a list of SKBs.
+ * NLOs containing csum erros will not be included.
+ * The original coalesced SKB should be treated as invalid and
+ * must be freed by the caller
+ */
+static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
+				       u64 nlo_err_mask,
+				       struct sk_buff_head *list)
+{
+	struct iphdr *iph;
+	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
+	struct rmnet_map_v5_coal_header *coal_hdr;
+	struct rmnet_map_coal_metadata coal_meta;
+	u16 pkt_len;
+	u8 pkt, total_pkt = 0;
+	u8 nlo;
+	bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;
+	bool zero_csum = false;
+
+	memset(&coal_meta, 0, sizeof(coal_meta));
+
+	/* Pull off the headers we no longer need */
+	pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
+	coal_hdr = (struct rmnet_map_v5_coal_header *)
+		   rmnet_map_data_ptr(coal_skb);
+	pskb_pull(coal_skb, sizeof(*coal_hdr));
+
+	iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
+
+	if (iph->version == 4) {
+		coal_meta.ip_proto = 4;
+		coal_meta.ip_len = iph->ihl * 4;
+		coal_meta.trans_proto = iph->protocol;
+		coal_meta.ip_header = iph;
+
+		/* Don't allow coalescing of any packets with IP options */
+		if (iph->ihl != 5)
+			gro = false;
+	} else if (iph->version == 6) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
+		__be16 frag_off;
+		u8 protocol = ip6h->nexthdr;
+
+		coal_meta.ip_proto = 6;
+		coal_meta.ip_len = ipv6_skip_exthdr(coal_skb, sizeof(*ip6h),
+						    &protocol, &frag_off);
+		coal_meta.trans_proto = protocol;
+		coal_meta.ip_header = ip6h;
+
+		/* If we run into a problem, or this has a fragment header
+		 * (which should technically not be possible, if the HW
+		 * works as intended...), bail.
+		 */
+		if (coal_meta.ip_len < 0 || frag_off) {
+			priv->stats.coal.coal_ip_invalid++;
+			return;
+		} else if (coal_meta.ip_len > sizeof(*ip6h)) {
+			/* Don't allow coalescing of any packets with IPv6
+			 * extension headers.
+			 */
+			gro = false;
+		}
+	} else {
+		priv->stats.coal.coal_ip_invalid++;
+		return;
+	}
+
+	if (coal_meta.trans_proto == IPPROTO_TCP) {
+		struct tcphdr *th;
+
+		th = (struct tcphdr *)((u8 *)iph + coal_meta.ip_len);
+		coal_meta.trans_len = th->doff * 4;
+		coal_meta.trans_header = th;
+	} else if (coal_meta.trans_proto == IPPROTO_UDP) {
+		struct udphdr *uh;
+
+		uh = (struct udphdr *)((u8 *)iph + coal_meta.ip_len);
+		coal_meta.trans_len = sizeof(*uh);
+		coal_meta.trans_header = uh;
+		/* Check for v4 zero checksum */
+		if (coal_meta.ip_proto == 4 && !uh->check)
+			zero_csum = true;
+	} else {
+		priv->stats.coal.coal_trans_invalid++;
+		return;
+	}
+
+	if (rmnet_map_v5_csum_buggy(coal_hdr) && !zero_csum) {
+		rmnet_map_move_headers(coal_skb);
+		/* Mark as valid if it checks out */
+		if (rmnet_map_validate_csum(coal_skb, &coal_meta))
+			coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		__skb_queue_tail(list, coal_skb);
+		return;
+	}
+
+	/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
+	 * no checksum errors, and are allowing GRO. We can just reuse this
+	 * SKB unchanged.
+	 */
+	if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
+		rmnet_map_move_headers(coal_skb);
+		coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
+		coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
+		coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
+		coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
+		if (coal_meta.pkt_count > 1) {
+			rmnet_map_partial_csum(coal_skb, &coal_meta);
+			rmnet_map_gso_stamp(coal_skb, &coal_meta);
+		}
+
+		__skb_queue_tail(list, coal_skb);
+		return;
+	}
+
+	/* Segment the coalesced SKB into new packets */
+	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
+		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
+		pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
+		coal_meta.data_len = pkt_len;
+		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
+		     pkt++, total_pkt++, nlo_err_mask >>= 1) {
+			bool csum_err = nlo_err_mask & 1;
+
+			/* Segment the packet if we're not sending the larger
+			 * packet up the stack.
+			 */
+			if (!gro) {
+				coal_meta.pkt_count = 1;
+				if (csum_err)
+					priv->stats.coal.coal_csum_err++;
+
+				__rmnet_map_segment_coal_skb(coal_skb,
+							     &coal_meta, list,
+							     total_pkt,
+							     !csum_err);
+				continue;
+			}
+
+			if (csum_err) {
+				priv->stats.coal.coal_csum_err++;
+
+				/* Segment out the good data */
+				if (gro && coal_meta.pkt_count)
+					__rmnet_map_segment_coal_skb(coal_skb,
+								     &coal_meta,
+								     list,
+								     total_pkt,
+								     true);
+
+				/* Segment out the bad checksum */
+				coal_meta.pkt_count = 1;
+				__rmnet_map_segment_coal_skb(coal_skb,
+							     &coal_meta, list,
+							     total_pkt, false);
+			} else {
+				coal_meta.pkt_count++;
+			}
+		}
+
+		/* If we're switching NLOs, we need to send out everything from
+		 * the previous one, if we haven't done so. NLOs only switch
+		 * when the packet length changes.
+		 */
+		if (coal_meta.pkt_count)
+			__rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
+						     total_pkt, true);
+	}
+}
+
+/* Record reason for coalescing pipe closure */
+static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type,
+					   u8 code)
+{
+	struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;
+
+	switch (type) {
+	case RMNET_MAP_COAL_CLOSE_NON_COAL:
+		stats->non_coal++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_IP_MISS:
+		stats->ip_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
+		stats->trans_miss++;
+		break;
+	case RMNET_MAP_COAL_CLOSE_HW:
+		switch (code) {
+		case RMNET_MAP_COAL_CLOSE_HW_NL:
+			stats->hw_nl++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_PKT:
+			stats->hw_pkt++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
+			stats->hw_byte++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_TIME:
+			stats->hw_time++;
+			break;
+		case RMNET_MAP_COAL_CLOSE_HW_EVICT:
+			stats->hw_evict++;
+			break;
+		default:
+			break;
+		}
+		break;
+	case RMNET_MAP_COAL_CLOSE_COAL:
+		stats->coal++;
+		break;
+	default:
+		break;
+	}
+}
+
+/* Check if the coalesced header has any incorrect values, in which case, the
+ * entire coalesced skb must be dropped. Then check if there are any
+ * checksum issues
+ */
+static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
+					    u64 *nlo_err_mask)
+{
+	struct rmnet_map_v5_coal_header *coal_hdr;
+	unsigned char *data = rmnet_map_data_ptr(skb);
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+	u64 mask = 0;
+	int i;
+	u8 veid, pkts = 0;
+
+	coal_hdr = ((struct rmnet_map_v5_coal_header *)
+		    (data + sizeof(struct rmnet_map_header)));
+	veid = coal_hdr->virtual_channel_id;
+
+	if (coal_hdr->num_nlos == 0 ||
+	    coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
+		priv->stats.coal.coal_hdr_nlo_err++;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
+		/* If there is a checksum issue, we need to split
+		 * up the skb. Rebuild the full csum error field
+		 */
+		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
+		u8 pkt = coal_hdr->nl_pairs[i].num_packets;
+
+		mask |= ((u64)err) << (8 * i);
+
+		/* Track total packets in frame */
+		pkts += pkt;
+		if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
+			priv->stats.coal.coal_hdr_pkt_err++;
+			return -EINVAL;
+		}
+	}
+
+	/* Track number of packets we get inside of coalesced frames */
+	priv->stats.coal.coal_pkts += pkts;
+
+	/* Update ethtool stats */
+	rmnet_map_data_log_close_stats(priv,
+				       coal_hdr->close_type,
+				       coal_hdr->close_value);
+	if (veid < RMNET_MAX_VEID)
+		priv->stats.coal.coal_veid[veid]++;
+
+	*nlo_err_mask = mask;
+
+	return 0;
+}
+
+/* Process a QMAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+				      struct sk_buff_head *list,
+				      u16 len)
+{
+	struct rmnet_priv *priv = netdev_priv(skb->dev);
+	u64 nlo_err_mask;
+	int rc = 0;
+
+	switch (rmnet_map_get_next_hdr_type(skb)) {
+	case RMNET_MAP_HEADER_TYPE_COALESCING:
+		priv->stats.coal.coal_rx++;
+		rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask);
+		if (rc)
+			return rc;
+
+		rmnet_map_segment_coal_skb(skb, nlo_err_mask, list);
+		if (skb_peek(list) != skb)
+			consume_skb(skb);
+		break;
+	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
+		if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+			priv->stats.csum_sw++;
+		} else if (rmnet_map_get_csum_valid(skb)) {
+			priv->stats.csum_ok++;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		} else {
+			priv->stats.csum_valid_unset++;
+		}
+
+		/* Pull unnecessary headers and move the rest to the linear
+		 * section of the skb.
+		 */
+		pskb_pull(skb,
+			  (sizeof(struct rmnet_map_header) +
+			   sizeof(struct rmnet_map_v5_csum_header)));
+		rmnet_map_move_headers(skb);
+
+		/* Remove padding only for csum offload packets.
+		 * Coalesced packets should never have padding.
+		 */
+		pskb_trim(skb, len);
+		__skb_queue_tail(list, skb);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+long rmnet_agg_time_limit __read_mostly = 1000000L;
+long rmnet_agg_bypass_time __read_mostly = 10000000L;
+
+int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
+{
+	u8 *packet_start = skb->data + offset;
+	int is_icmp = 0;
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *ip4h = (struct iphdr *)(packet_start);
+
+		if (ip4h->protocol == IPPROTO_ICMP)
+			is_icmp = 1;
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
+
+		if (ip6h->nexthdr == IPPROTO_ICMPV6) {
+			is_icmp = 1;
+		} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
+			struct frag_hdr *frag;
+
+			frag = (struct frag_hdr *)(packet_start
+						   + sizeof(struct ipv6hdr));
+			if (frag->nexthdr == IPPROTO_ICMPV6)
+				is_icmp = 1;
+		}
+	}
+
+	return is_icmp;
+}
+
+static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
+{
+	struct sk_buff *skb = NULL;
+	struct rmnet_aggregation_state *state;
+
+	state = container_of(work, struct rmnet_aggregation_state, agg_wq);
+
+	spin_lock_bh(&state->agg_lock);
+	if (likely(state->agg_state == -EINPROGRESS)) {
+		/* Buffer may have already been shipped out */
+		if (likely(state->agg_skb)) {
+			skb = state->agg_skb;
+			state->agg_skb = NULL;
+			state->agg_count = 0;
+			memset(&state->agg_time, 0, sizeof(state->agg_time));
+		}
+		state->agg_state = 0;
+	}
+
+	if (skb)
+		state->send_agg_skb(skb);
+	spin_unlock_bh(&state->agg_lock);
+}
+
+enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
+{
+	struct rmnet_aggregation_state *state;
+
+	state = container_of(t, struct rmnet_aggregation_state, hrtimer);
+
+	schedule_work(&state->agg_wq);
+	return HRTIMER_NORESTART;
+}
+
+static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
+{
+	unsigned int linear = src->len - src->data_len, target = src->len;
+	unsigned char *src_buf;
+	struct sk_buff *skb;
+
+	src_buf = src->data;
+	skb_put_data(dst, src_buf, linear);
+	target -= linear;
+
+	skb = src;
+
+	while (target) {
+		unsigned int i = 0, non_linear = 0;
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+			src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
+
+			skb_put_data(dst, src_buf, non_linear);
+			target -= non_linear;
+		}
+
+		if (skb_shinfo(skb)->frag_list) {
+			skb = skb_shinfo(skb)->frag_list;
+			continue;
+		}
+
+		if (skb->next)
+			skb = skb->next;
+	}
+}
+
+static void rmnet_free_agg_pages(struct rmnet_aggregation_state *state)
+{
+	struct rmnet_agg_page *agg_page, *idx;
+
+	list_for_each_entry_safe(agg_page, idx, &state->agg_list, list) {
+		list_del(&agg_page->list);
+		rmnet_mem_put_page_entry(agg_page->page);
+		kfree(agg_page);
+	}
+
+	state->agg_head = NULL;
+}
+
+static struct page *rmnet_get_agg_pages(struct rmnet_aggregation_state *state)
+{
+	struct rmnet_agg_page *agg_page;
+	struct page *page = NULL;
+	int i = 0;
+	int rc;
+	int pageorder = 2;
+
+	if (!(state->params.agg_features & RMNET_PAGE_RECYCLE))
+		goto alloc;
+
+	do {
+		agg_page = state->agg_head;
+		if (unlikely(!agg_page))
+			break;
+
+		if (page_ref_count(agg_page->page) == 1) {
+			page = agg_page->page;
+			page_ref_inc(agg_page->page);
+
+			state->stats->ul_agg_reuse++;
+			state->agg_head = list_next_entry(agg_page, list);
+			break;
+		}
+
+		state->agg_head = list_next_entry(agg_page, list);
+		i++;
+	} while (i <= 5);
+
+alloc:
+	if (!page) {
+		page = rmnet_mem_get_pages_entry(GFP_ATOMIC, state->agg_size_order, &rc,
+						 &pageorder, RMNET_CORE_ID);
+
+		state->stats->ul_agg_alloc++;
+	}
+
+	return page;
+}
+
+static struct rmnet_agg_page *
+__rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
+{
+	struct rmnet_agg_page *agg_page;
+	struct page *page;
+	int rc;
+	int pageorder = 2;
+
+	agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
+	if (!agg_page)
+		return NULL;
+
+	page = rmnet_mem_get_pages_entry(GFP_ATOMIC, state->agg_size_order, &rc,
+					 &pageorder, RMNET_CORE_ID);
+
+	if (!page) {
+		kfree(agg_page);
+		return NULL;
+	}
+
+	agg_page->page = page;
+	INIT_LIST_HEAD(&agg_page->list);
+
+	return agg_page;
+}
+
+static void rmnet_alloc_agg_pages(struct rmnet_aggregation_state *state)
+{
+	struct rmnet_agg_page *agg_page = NULL;
+	int i = 0;
+
+	for (i = 0; i < RMNET_PAGE_COUNT; i++) {
+		agg_page = __rmnet_alloc_agg_pages(state);
+
+		if (agg_page)
+			list_add_tail(&agg_page->list, &state->agg_list);
+	}
+
+	state->agg_head = list_first_entry_or_null(&state->agg_list,
+						   struct rmnet_agg_page, list);
+}
+
+static struct sk_buff *
+rmnet_map_build_skb(struct rmnet_aggregation_state *state)
+{
+	struct sk_buff *skb;
+	unsigned int size;
+	struct page *page;
+	void *vaddr;
+
+	page = rmnet_get_agg_pages(state);
+	if (!page)
+		return NULL;
+
+	vaddr = page_address(page);
+	size = PAGE_SIZE << state->agg_size_order;
+
+	skb = build_skb(vaddr, size);
+	if (!skb) {
+		put_page(page);
+		return NULL;
+	}
+
+	return skb;
+}
+
+void rmnet_map_send_agg_skb(struct rmnet_aggregation_state *state)
+{
+	struct sk_buff *agg_skb;
+
+	if (!state->agg_skb) {
+		spin_unlock_bh(&state->agg_lock);
+		return;
+	}
+
+	agg_skb = state->agg_skb;
+	/* Reset the aggregation state */
+	state->agg_skb = NULL;
+	state->agg_count = 0;
+	memset(&state->agg_time, 0, sizeof(state->agg_time));
+	state->agg_state = 0;
+	state->send_agg_skb(agg_skb);
+	spin_unlock_bh(&state->agg_lock);
+	hrtimer_cancel(&state->hrtimer);
+}
+
+void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+			    bool low_latency)
+{
+	struct rmnet_aggregation_state *state;
+	struct timespec64 diff, last;
+	int size;
+
+	state = &port->agg_state[(low_latency) ? RMNET_LL_AGG_STATE :
+						 RMNET_DEFAULT_AGG_STATE];
+
+new_packet:
+	spin_lock_bh(&state->agg_lock);
+	memcpy(&last, &state->agg_last, sizeof(last));
+	ktime_get_real_ts64(&state->agg_last);
+
+	if ((port->data_format & RMNET_EGRESS_FORMAT_PRIORITY) &&
+	    (RMNET_LLM(skb->priority) || RMNET_APS_LLB(skb->priority))) {
+		/* Send out any aggregated SKBs we have */
+		rmnet_map_send_agg_skb(state);
+		/* Send out the priority SKB. Not holding agg_lock anymore */
+		skb->protocol = htons(ETH_P_MAP);
+		state->send_agg_skb(skb);
+		return;
+	}
+
+	if (!state->agg_skb) {
+		/* Check to see if we should agg first. If the traffic is very
+		 * sparse, don't aggregate. We will need to tune this later
+		 */
+		diff = timespec64_sub(state->agg_last, last);
+		size = state->params.agg_size - skb->len;
+
+		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
+		    size <= 0) {
+			skb->protocol = htons(ETH_P_MAP);
+			state->send_agg_skb(skb);
+			spin_unlock_bh(&state->agg_lock);
+			return;
+		}
+
+		state->agg_skb = rmnet_map_build_skb(state);
+		if (!state->agg_skb) {
+			state->agg_skb = NULL;
+			state->agg_count = 0;
+			memset(&state->agg_time, 0, sizeof(state->agg_time));
+			skb->protocol = htons(ETH_P_MAP);
+			state->send_agg_skb(skb);
+			spin_unlock_bh(&state->agg_lock);
+			return;
+		}
+
+		rmnet_map_linearize_copy(state->agg_skb, skb);
+		state->agg_skb->dev = skb->dev;
+		state->agg_skb->protocol = htons(ETH_P_MAP);
+		state->agg_count = 1;
+		ktime_get_real_ts64(&state->agg_time);
+		dev_consume_skb_any(skb);
+		goto schedule;
+	}
+	diff = timespec64_sub(state->agg_last, state->agg_time);
+	size = skb_tailroom(state->agg_skb);
+
+	if (skb->len > size ||
+	    state->agg_count >= state->params.agg_count ||
+	    diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
+		rmnet_map_send_agg_skb(state);
+		goto new_packet;
+	}
+
+	rmnet_map_linearize_copy(state->agg_skb, skb);
+	state->agg_count++;
+	dev_consume_skb_any(skb);
+
+schedule:
+	if (state->agg_state != -EINPROGRESS) {
+		state->agg_state = -EINPROGRESS;
+		hrtimer_start(&state->hrtimer,
+			      ns_to_ktime(state->params.agg_time),
+			      HRTIMER_MODE_REL);
+	}
+	spin_unlock_bh(&state->agg_lock);
+}
+
+void rmnet_map_update_ul_agg_config(struct rmnet_aggregation_state *state,
+				    u16 size, u8 count, u8 features, u32 time)
+{
+	spin_lock_bh(&state->agg_lock);
+	state->params.agg_count = count;
+	state->params.agg_time = time;
+	state->params.agg_size = size;
+	state->params.agg_features = features;
+
+	rmnet_free_agg_pages(state);
+
+	/* This effectively disables recycling in case the UL aggregation
+	 * size is lesser than PAGE_SIZE.
+	 */
+	if (size < PAGE_SIZE)
+		goto done;
+
+	state->agg_size_order = get_order(size);
+
+	size = PAGE_SIZE << state->agg_size_order;
+	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	state->params.agg_size = size;
+
+	if (state->params.agg_features == RMNET_PAGE_RECYCLE)
+		rmnet_alloc_agg_pages(state);
+
+done:
+	spin_unlock_bh(&state->agg_lock);
+}
+
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
+{
+	unsigned int i;
+
+
+	for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
+		struct rmnet_aggregation_state *state = &port->agg_state[i];
+
+		spin_lock_init(&state->agg_lock);
+		INIT_LIST_HEAD(&state->agg_list);
+		hrtimer_init(&state->hrtimer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		state->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+		INIT_WORK(&state->agg_wq, rmnet_map_flush_tx_packet_work);
+		state->stats = &port->stats.agg;
+
+		/* Since PAGE_SIZE - 1 is specified here, no pages are
+		 * pre-allocated. This is done to reduce memory usage in cases
+		 * where UL aggregation is disabled.
+		 * Additionally, the features flag is also set to 0.
+		 */
+		rmnet_map_update_ul_agg_config(state, PAGE_SIZE - 1, 20, 0,
+					       3000000);
+	}
+
+	/* Set delivery functions for each aggregation state */
+	port->agg_state[RMNET_DEFAULT_AGG_STATE].send_agg_skb = dev_queue_xmit;
+	port->agg_state[RMNET_LL_AGG_STATE].send_agg_skb = rmnet_ll_send_skb;
+}
+
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
+{
+	unsigned int i;
+
+	for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
+		struct rmnet_aggregation_state *state = &port->agg_state[i];
+
+		hrtimer_cancel(&state->hrtimer);
+		cancel_work_sync(&state->agg_wq);
+	}
+
+	for (i = RMNET_DEFAULT_AGG_STATE; i < RMNET_MAX_AGG_STATE; i++) {
+		struct rmnet_aggregation_state *state = &port->agg_state[i];
+
+		spin_lock_bh(&state->agg_lock);
+		if (state->agg_state == -EINPROGRESS) {
+			if (state->agg_skb) {
+				kfree_skb(state->agg_skb);
+				state->agg_skb = NULL;
+				state->agg_count = 0;
+				memset(&state->agg_time, 0,
+				       sizeof(state->agg_time));
+			}
+
+			state->agg_state = 0;
+		}
+
+		rmnet_free_agg_pages(state);
+		spin_unlock_bh(&state->agg_lock);
+	}
+}
+
+void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush)
+{
+	struct rmnet_aggregation_state *state;
+	struct rmnet_port *port;
+	struct sk_buff *agg_skb;
+
+	if (unlikely(ch >= RMNET_MAX_AGG_STATE))
+		ch = RMNET_DEFAULT_AGG_STATE;
+
+	port = rmnet_get_port(qmap_skb->dev);
+	if (!port) {
+		kfree_skb(qmap_skb);
+		return;
+	}
+	state = &port->agg_state[ch];
+
+	if (!flush)
+		goto send;
+
+	if (!(port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION))
+		goto send;
+
+	spin_lock_bh(&state->agg_lock);
+	if (state->agg_skb) {
+		agg_skb = state->agg_skb;
+		state->agg_skb = NULL;
+		state->agg_count = 0;
+		memset(&state->agg_time, 0, sizeof(state->agg_time));
+		state->agg_state = 0;
+		state->send_agg_skb(agg_skb);
+		spin_unlock_bh(&state->agg_lock);
+		hrtimer_cancel(&state->hrtimer);
+	} else {
+		spin_unlock_bh(&state->agg_lock);
+	}
+
+send:
+	state->send_agg_skb(qmap_skb);
+}
+EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
+
+int rmnet_map_add_tso_header(struct sk_buff *skb, struct rmnet_port *port,
+			      struct net_device *orig_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(orig_dev);
+	struct rmnet_map_v5_tso_header *ul_header;
+
+	if (!(orig_dev->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))) {
+		priv->stats.tso_arriv_errs++;
+		return -EINVAL;
+	}
+
+	ul_header = (struct rmnet_map_v5_tso_header *)
+		    skb_push(skb, sizeof(*ul_header));
+	memset(ul_header, 0, sizeof(*ul_header));
+	ul_header->header_type = RMNET_MAP_HEADER_TYPE_TSO;
+
+	if (port->data_format & RMNET_EGRESS_FORMAT_PRIORITY)
+		rmnet_map_v5_check_priority(skb, orig_dev,
+					    (struct rmnet_map_v5_csum_header *)ul_header,
+					    true);
+
+	ul_header->segment_size = htons(skb_shinfo(skb)->gso_size);
+
+	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+		ul_header->ip_id_cfg = 1;
+
+	skb->ip_summed = CHECKSUM_NONE;
+	skb_shinfo(skb)->gso_size = 0;
+	skb_shinfo(skb)->gso_segs = 0;
+	skb_shinfo(skb)->gso_type = 0;
+
+	priv->stats.tso_pkts++;
+
+	return 0;
+}

+ 100 - 0
qcom/opensource/datarmnet/core/rmnet_module.c

@@ -0,0 +1,100 @@
+/* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "rmnet_module.h"
+
+struct rmnet_module_hook_info {
+	void *func __rcu;
+};
+
+static struct rmnet_module_hook_info
+rmnet_module_hooks[__RMNET_MODULE_NUM_HOOKS];
+
+void
+rmnet_module_hook_register(const struct rmnet_module_hook_register_info *info,
+			   int hook_count)
+{
+	struct rmnet_module_hook_info *hook_info;
+	int i;
+
+	for (i = 0; i < hook_count; i++) {
+		int hook = info[i].hooknum;
+
+		if (hook < __RMNET_MODULE_NUM_HOOKS) {
+			hook_info = &rmnet_module_hooks[hook];
+			rcu_assign_pointer(hook_info->func, info[i].func);
+		}
+	}
+}
+EXPORT_SYMBOL(rmnet_module_hook_register);
+
+bool rmnet_module_hook_is_set(int hook)
+{
+	if (hook >= __RMNET_MODULE_NUM_HOOKS)
+		return false;
+
+	return rcu_dereference(rmnet_module_hooks[hook].func) != NULL;
+}
+EXPORT_SYMBOL(rmnet_module_hook_is_set);
+
+void
+rmnet_module_hook_unregister_no_sync(const struct rmnet_module_hook_register_info *info,
+				     int hook_count)
+{
+	struct rmnet_module_hook_info *hook_info;
+	int i;
+
+	for (i = 0; i < hook_count; i++) {
+		int hook = info[i].hooknum;
+
+		if (hook < __RMNET_MODULE_NUM_HOOKS) {
+			hook_info = &rmnet_module_hooks[hook];
+			rcu_assign_pointer(hook_info->func, NULL);
+		}
+	}
+}
+EXPORT_SYMBOL(rmnet_module_hook_unregister_no_sync);
+
+#define __RMNET_HOOK_DEFINE(call, hook_num, proto, args, ret_type) \
+int rmnet_module_hook_##call( \
+__RMNET_HOOK_PROTO(RMNET_HOOK_PARAMS(proto), ret_type) \
+) \
+{ \
+	ret_type (*__func)(proto); \
+	struct rmnet_module_hook_info *__info = \
+		&rmnet_module_hooks[hook_num]; \
+	int __ret = 0; \
+\
+	rcu_read_lock(); \
+	__func = rcu_dereference(__info->func); \
+	if (__func) { \
+		RMNET_HOOK_IF_NON_VOID_TYPE(ret_type)( ret_type __rc = ) \
+		__func(args); \
+		__ret = 1; \
+\
+		RMNET_HOOK_IF_NON_VOID_TYPE(ret_type)( if (__ret_code) \
+			*__ret_code = __rc; )\
+	} \
+\
+	rcu_read_unlock(); \
+	return __ret; \
+} \
+EXPORT_SYMBOL(rmnet_module_hook_##call);
+
+#undef RMNET_MODULE_HOOK
+#define RMNET_MODULE_HOOK(call, hook_num, proto, args, ret_type) \
+__RMNET_HOOK_DEFINE(call, hook_num, RMNET_HOOK_PARAMS(proto), \
+		    RMNET_HOOK_PARAMS(args), ret_type)
+
+#define __RMNET_HOOK_MULTIREAD__
+#include "rmnet_hook.h"
+

+ 137 - 0
qcom/opensource/datarmnet/core/rmnet_module.h

@@ -0,0 +1,137 @@
+/* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RMNET_MODULE_H__
+#define __RMNET_MODULE_H__
+
+#include <linux/rcupdate.h>
+
+enum {
+	RMNET_MODULE_HOOK_OFFLOAD_INGRESS,
+	RMNET_MODULE_HOOK_OFFLOAD_CHAIN_END,
+	RMNET_MODULE_HOOK_SHS_SKB_ENTRY,
+	RMNET_MODULE_HOOK_SHS_SKB_LL_ENTRY,
+	RMNET_MODULE_HOOK_SHS_SWITCH,
+	RMNET_MODULE_HOOK_PERF_TETHER_INGRESS,
+	RMNET_MODULE_HOOK_PERF_TETHER_EGRESS,
+	RMNET_MODULE_HOOK_PERF_TETHER_CMD,
+	RMNET_MODULE_HOOK_PERF_INGRESS,
+	RMNET_MODULE_HOOK_PERF_EGRESS,
+	RMNET_MODULE_HOOK_PERF_SET_THRESH,
+	RMNET_MODULE_HOOK_APS_PRE_QUEUE,
+	RMNET_MODULE_HOOK_APS_POST_QUEUE,
+	RMNET_MODULE_HOOK_WLAN_FLOW_MATCH,
+	RMNET_MODULE_HOOK_APS_DATA_INACTIVE,
+	RMNET_MODULE_HOOK_APS_DATA_ACTIVE,
+	RMNET_MODULE_HOOK_APS_DATA_REPORT,
+	RMNET_MODULE_HOOK_PERF_INGRESS_RX_HANDLER,
+	RMNET_MODULE_HOOK_WLAN_INGRESS_RX_HANDLER,
+	__RMNET_MODULE_NUM_HOOKS,
+};
+
+struct rmnet_module_hook_register_info {
+	int hooknum;
+	void *func;
+};
+
+void
+rmnet_module_hook_register(const struct rmnet_module_hook_register_info *info,
+			   int hook_count);
+bool rmnet_module_hook_is_set(int hook);
+void
+rmnet_module_hook_unregister_no_sync(const struct rmnet_module_hook_register_info *info,
+				     int hook_count);
+static inline void
+rmnet_module_hook_unregister(const struct rmnet_module_hook_register_info *info,
+			     int hook_count)
+{
+	rmnet_module_hook_unregister_no_sync(info, hook_count);
+	synchronize_rcu();
+}
+
+/* Dummy macro. Can use kernel version later */
+#define __CAT(a, b) a ## b
+#define CAT(a, b) __CAT(a, b)
+
+#define RMNET_HOOK_PARAMS(args...) args
+
+#define RMNET_MODULE_HOOK_NUM(__hook) CAT(RMNET_MODULE_HOOK_, __hook)
+#define RMNET_MODULE_HOOK_PROTOCOL(proto...) proto
+#define RMNET_MODULE_HOOK_ARGS(args...) args
+#define RMNET_MODULE_HOOK_RETURN_TYPE(type) type
+
+/* A ...lovely... framework for checking if the argument passed in to a function
+ * macro is a pair of parentheses.
+ * If so, resolve to 1. Otherwise, 0.
+ *
+ * The idea here is that you pass the argument along with a "test" macro to
+ * a "checker" macro. If the argument IS a pair of parentheses, this will cause
+ * the tester macro to expand into multiple arguments.
+ *
+ * The key is that "checker" macro just returns the second argument it receives.
+ * So have the "tester" macro expand to a set of arguments that makes 1 the
+ * second argument, or 0 if it doesn't expand.
+ */
+#define __RMNET_HOOK_SECOND_ARG(_, arg, ...) arg
+#define RMNET_HOOK_PARENTHESES_CHECKER(args...) \
+	__RMNET_HOOK_SECOND_ARG(args, 0, )
+#define __RMNET_HOOK_PARENTHESES_TEST(arg) arg, 1,
+#define __RMNET_HOOK_IS_PARENTHESES_TEST(...) __RMNET_HOOK_PARENTHESES_TEST(XXX)
+#define RMNET_HOOK_IS_PARENTHESES(arg) \
+	RMNET_HOOK_PARENTHESES_CHECKER(__RMNET_HOOK_IS_PARENTHESES_TEST arg)
+
+/* So what's the point of the above stuff, you ask?
+ *
+ * CPP can't actually do type checking ;). But what we CAN do is something
+ * like this to determine if the type passed in is void. If so, this will
+ * expand to (), causing the RMNET_HOOK_IS_PARENTHESES check to resolve to 1,
+ * but if not, then the check resolves to 0.
+ */
+#define __RMNET_HOOK_CHECK_TYPE_IS_void(arg) arg
+#define RMNET_HOOK_TYPE_IS_VOID(type) \
+	RMNET_HOOK_IS_PARENTHESES( __RMNET_HOOK_CHECK_TYPE_IS_ ## type (()) )
+
+/* And now, we have some logic macros. The main macro will resolve
+ * to one of the branches depending on the bool value passed in.
+ */
+#define __IF_0(t_path, e_path...) e_path
+#define __IF_1(t_path, e_path...) t_path
+#define IF(arg) CAT(__IF_, arg)
+#define __NOT_1 0
+#define __NOT_0 1
+#define NOT(arg) CAT(__NOT_, arg)
+
+/* And now we combine this all, for a purely function macro way of splitting
+ * return type handling...
+ *
+ * ...all to circumvent that you can't actually add #if conditionals in macro
+ * expansions. It would have been much simpler that way. ;)
+ */
+#define RMNET_HOOK_IF_NON_VOID_TYPE(type) \
+	IF(NOT(RMNET_HOOK_TYPE_IS_VOID(type)))
+
+#define __RMNET_HOOK_PROTO(proto, ret_type)\
+RMNET_HOOK_IF_NON_VOID_TYPE(ret_type) \
+	(RMNET_HOOK_PARAMS(ret_type *__ret_code, proto), \
+	 RMNET_HOOK_PARAMS(proto))
+
+#define __RMNET_HOOK_DECLARE(call, proto, ret_type) \
+int rmnet_module_hook_##call( \
+__RMNET_HOOK_PROTO(RMNET_HOOK_PARAMS(proto), ret_type));
+
+#undef RMNET_MODULE_HOOK
+#define RMNET_MODULE_HOOK(call, hook_num, proto, args, ret_type) \
+__RMNET_HOOK_DECLARE(call, RMNET_HOOK_PARAMS(proto), ret_type)
+
+#include "rmnet_hook.h"
+
+#endif

+ 70 - 0
qcom/opensource/datarmnet/core/rmnet_private.h

@@ -0,0 +1,70 @@
+/* Copyright (c) 2013-2014, 2016-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_PRIVATE_H_
+#define _RMNET_PRIVATE_H_
+
+#include <linux/types.h>
+
+#define RMNET_MAX_PACKET_SIZE      16384
+#define RMNET_DFLT_PACKET_SIZE     1500
+#define RMNET_NEEDED_HEADROOM      16
+#define RMNET_TX_QUEUE_LEN         1000
+
+/* Constants */
+#define RMNET_EGRESS_FORMAT_AGGREGATION         BIT(31)
+#define RMNET_INGRESS_FORMAT_DL_MARKER_V1       BIT(30)
+#define RMNET_INGRESS_FORMAT_DL_MARKER_V2       BIT(29)
+
+#define RMNET_FLAGS_INGRESS_COALESCE            BIT(4)
+#define RMNET_PRIV_FLAGS_INGRESS_MAP_CKSUMV5    BIT(5)
+#define RMNET_PRIV_FLAGS_EGRESS_MAP_CKSUMV5     BIT(6)
+
+#define RMNET_INGRESS_FORMAT_DL_MARKER  (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
+RMNET_INGRESS_FORMAT_DL_MARKER_V2)
+
+/* UL Packet prioritization */
+#define RMNET_EGRESS_FORMAT_PRIORITY            BIT(28)
+
+/* Power save feature*/
+#define RMNET_INGRESS_FORMAT_PS                 BIT(27)
+#define RMNET_FORMAT_PS_NOTIF                   BIT(26)
+
+/* UL Aggregation parameters */
+#define RMNET_PAGE_RECYCLE                      BIT(0)
+
+/* Replace skb->dev to a virtual rmnet device and pass up the stack */
+#define RMNET_EPMODE_VND (1)
+/* Pass the frame directly to another device with dev_queue_xmit() */
+#define RMNET_EPMODE_BRIDGE (2)
+
+/* Struct for skb control block use within rmnet driver */
+struct rmnet_skb_cb {
+	/* MUST be the first entries because of legacy reasons */
+	char flush_shs;
+	char qmap_steer;
+
+	bool tethered;
+
+	/* coalescing stats */
+	u32 coal_bytes;
+	u32 coal_bufsize;
+
+	u32 bif;
+	u32 ack_thresh;
+	u32 ack_forced;
+};
+
+#define RMNET_SKB_CB(skb) ((struct rmnet_skb_cb *)(skb)->cb)
+
+#endif /* _RMNET_PRIVATE_H_ */

+ 162 - 0
qcom/opensource/datarmnet/core/rmnet_qmap.c

@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dfc.h"
+#include "rmnet_qmi.h"
+#include "rmnet_ctl.h"
+#include "rmnet_qmap.h"
+#include "rmnet_module.h"
+#include "rmnet_hook.h"
+
+static atomic_t qmap_txid;
+static void *rmnet_ctl_handle;
+static void *rmnet_port;
+static struct net_device *real_data_dev;
+static struct rmnet_ctl_client_if *rmnet_ctl;
+
+int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush)
+{
+	trace_dfc_qmap(skb->data, skb->len, false, ch);
+
+	if (ch != RMNET_CH_CTL && real_data_dev) {
+		skb->protocol = htons(ETH_P_MAP);
+		skb->dev = real_data_dev;
+		rmnet_ctl->log(RMNET_CTL_LOG_DEBUG, "TXI", 0, skb->data,
+			       skb->len);
+
+		rmnet_map_tx_qmap_cmd(skb, ch, flush);
+		return 0;
+	}
+
+	if (rmnet_ctl->send(rmnet_ctl_handle, skb)) {
+		pr_err("Failed to send to rmnet ctl\n");
+		return -ECOMM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_qmap_send);
+
+static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
+{
+	struct qmap_cmd_hdr *cmd;
+	int rc = QMAP_CMD_DONE;
+
+	if (!skb)
+		return;
+
+	trace_dfc_qmap(skb->data, skb->len, true, RMNET_CH_CTL);
+
+	if (skb->len < sizeof(struct qmap_cmd_hdr))
+		goto free_skb;
+
+	cmd = (struct qmap_cmd_hdr *)skb->data;
+	if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
+		goto free_skb;
+
+	rcu_read_lock();
+
+	switch (cmd->cmd_name) {
+	case QMAP_DFC_CONFIG:
+	case QMAP_DFC_IND:
+	case QMAP_DFC_QUERY:
+	case QMAP_DFC_END_MARKER:
+	case QMAP_DFC_POWERSAVE:
+		rc = dfc_qmap_cmd_handler(skb);
+		break;
+
+	case QMAP_LL_SWITCH:
+	case QMAP_LL_SWITCH_STATUS:
+		rc = ll_qmap_cmd_handler(skb);
+		break;
+
+	case QMAP_DATA_REPORT:
+		rmnet_module_hook_aps_data_report(skb);
+		rc = QMAP_CMD_DONE;
+		break;
+
+	default:
+		if (cmd->cmd_type == QMAP_CMD_REQUEST)
+			rc = QMAP_CMD_UNSUPPORTED;
+	}
+
+	/* Send ack */
+	if (rc != QMAP_CMD_DONE) {
+		if (rc == QMAP_CMD_ACK_INBAND) {
+			cmd->cmd_type = QMAP_CMD_ACK;
+			rmnet_qmap_send(skb, RMNET_CH_DEFAULT, false);
+		} else {
+			cmd->cmd_type = rc;
+			rmnet_qmap_send(skb, RMNET_CH_CTL, false);
+		}
+		rcu_read_unlock();
+		return;
+	}
+
+	rcu_read_unlock();
+
+free_skb:
+	kfree_skb(skb);
+}
+
+static struct rmnet_ctl_client_hooks cb = {
+	.ctl_dl_client_hook = rmnet_qmap_cmd_handler,
+};
+
+int rmnet_qmap_next_txid(void)
+{
+	return atomic_inc_return(&qmap_txid);
+}
+EXPORT_SYMBOL(rmnet_qmap_next_txid);
+
+struct net_device *rmnet_qmap_get_dev(u8 mux_id)
+{
+	return rmnet_get_rmnet_dev(rmnet_port, mux_id);
+}
+
+int rmnet_qmap_init(void *port)
+{
+	if (rmnet_ctl_handle)
+		return 0;
+
+	atomic_set(&qmap_txid, 0);
+	rmnet_port = port;
+	real_data_dev = rmnet_get_real_dev(rmnet_port);
+
+	rmnet_ctl = rmnet_ctl_if();
+	if (!rmnet_ctl) {
+		pr_err("rmnet_ctl module not loaded\n");
+		return -EFAULT;
+	}
+
+	if (rmnet_ctl->reg)
+		rmnet_ctl_handle = rmnet_ctl->reg(&cb);
+
+	if (!rmnet_ctl_handle) {
+		pr_err("Failed to register with rmnet ctl\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void rmnet_qmap_exit(void)
+{
+	if (rmnet_ctl && rmnet_ctl->dereg)
+		rmnet_ctl->dereg(rmnet_ctl_handle);
+
+	rmnet_ctl_handle = NULL;
+	real_data_dev = NULL;
+	rmnet_port = NULL;
+}

+ 68 - 0
qcom/opensource/datarmnet/core/rmnet_qmap.h

@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __RMNET_QMAP_H
+#define __RMNET_QMAP_H
+
+#include "qmi_rmnet_i.h"
+
+#define QMAP_CMD_DONE		-1
+#define QMAP_CMD_ACK_INBAND	-2
+
+#define QMAP_CMD_REQUEST	0
+#define QMAP_CMD_ACK		1
+#define QMAP_CMD_UNSUPPORTED	2
+#define QMAP_CMD_INVALID	3
+
+struct qmap_hdr {
+	u8	cd_pad;
+	u8	mux_id;
+	__be16	pkt_len;
+} __aligned(1);
+
+#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
+
+struct qmap_cmd_hdr {
+	u8	pad_len:6;
+	u8	reserved_bit:1;
+	u8	cd_bit:1;
+	u8	mux_id;
+	__be16	pkt_len;
+	u8	cmd_name;
+	u8	cmd_type:2;
+	u8	reserved:6;
+	u16	reserved2;
+	__be32	tx_id;
+} __aligned(1);
+
+int rmnet_qmap_init(void *port);
+void rmnet_qmap_exit(void);
+int rmnet_qmap_next_txid(void);
+int rmnet_qmap_send(struct sk_buff *skb, u8 ch, bool flush);
+struct net_device *rmnet_qmap_get_dev(u8 mux_id);
+
+#define QMAP_DFC_CONFIG		10
+#define QMAP_DFC_IND		11
+#define QMAP_DFC_QUERY		12
+#define QMAP_DFC_END_MARKER	13
+#define QMAP_DFC_POWERSAVE	14
+int dfc_qmap_cmd_handler(struct sk_buff *skb);
+
+#define QMAP_LL_SWITCH		25
+#define QMAP_LL_SWITCH_STATUS	26
+int ll_qmap_cmd_handler(struct sk_buff *skb);
+
+#define QMAP_DATA_REPORT	34
+
+#endif /* __RMNET_QMAP_H */

+ 101 - 0
qcom/opensource/datarmnet/core/rmnet_qmi.h

@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_QMI_H
+#define _RMNET_QMI_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#define CONFIG_QTI_QMI_RMNET 1
+
+void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb, u8 ch, bool flush);
+
+#ifdef CONFIG_QTI_QMI_RMNET
+void *rmnet_get_qmi_pt(void *port);
+void *rmnet_get_qos_pt(struct net_device *dev);
+void *rmnet_get_rmnet_port(struct net_device *dev);
+struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id);
+void rmnet_reset_qmi_pt(void *port);
+void rmnet_init_qmi_pt(void *port, void *qmi);
+void rmnet_enable_all_flows(void *port);
+bool rmnet_all_flows_enabled(void *port);
+void rmnet_set_powersave_format(void *port);
+void rmnet_clear_powersave_format(void *port);
+void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
+int rmnet_get_powersave_notif(void *port);
+struct net_device *rmnet_get_real_dev(void *port);
+int rmnet_get_dlmarker_info(void *port);
+void rmnet_prepare_ps_bearers(void *port, u8 *num_bearers, u8 *bearer_id);
+#else
+static inline void *rmnet_get_qmi_pt(void *port)
+{
+	return NULL;
+}
+
+static inline void *rmnet_get_qos_pt(struct net_device *dev)
+{
+	return NULL;
+}
+
+static inline void *rmnet_get_rmnet_port(struct net_device *dev)
+{
+	return NULL;
+}
+
+static inline struct net_device *rmnet_get_rmnet_dev(void *port,
+						     u8 mux_id)
+{
+	return NULL;
+}
+
+static inline void rmnet_reset_qmi_pt(void *port)
+{
+}
+
+static inline void rmnet_init_qmi_pt(void *port, void *qmi)
+{
+}
+
+static inline void rmnet_enable_all_flows(void *port)
+{
+}
+
+static inline bool rmnet_all_flows_enabled(void *port)
+{
+	return true;
+}
+
+static inline void rmnet_set_port_format(void *port)
+{
+}
+
+static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+{
+}
+
+static inline int rmnet_get_powersave_notif(void *port)
+{
+	return 0;
+}
+
+static inline struct net_device *rmnet_get_real_dev(void *port)
+{
+	return NULL;
+}
+
+static inline int rmnet_get_dlmarker_info(void *port)
+{
+	return 0;
+}
+#endif /* CONFIG_QTI_QMI_RMNET */
+#endif /*_RMNET_QMI_H*/

+ 496 - 0
qcom/opensource/datarmnet/core/rmnet_trace.h

@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/version.h>
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rmnet
+#undef TRACE_INCLUDE_PATH
+
+#ifndef RMNET_TRACE_INCLUDE_PATH
+#if defined(CONFIG_RMNET_LA_PLATFORM)
+#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
+#elif defined(__arch_um__)
+#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
+#else
+#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
+#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
+#endif /* RMNET_TRACE_INCLUDE_PATH */
+#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_FILE rmnet_trace
+
+#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RMNET_H
+
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+/*****************************************************************************/
+/* Trace events for rmnet module */
+/*****************************************************************************/
+TRACE_EVENT(rmnet_xmit_skb,
+
+	TP_PROTO(struct sk_buff *skb),
+
+	TP_ARGS(skb),
+
+	TP_STRUCT__entry(
+		__string(dev_name, skb->dev->name)
+		__field(unsigned int, len)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, skb->dev->name);
+		__entry->len = skb->len;
+	),
+
+	TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len)
+);
+
+DECLARE_EVENT_CLASS
+	(rmnet_mod_template,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	 TP_STRUCT__entry(__field(u8, func)
+			  __field(u8, evt)
+			  __field(u32, uint1)
+			  __field(u32, uint2)
+			  __field(u64, ulong1)
+			  __field(u64, ulong2)
+			  __field(void *, ptr1)
+			  __field(void *, ptr2)
+	 ),
+
+	 TP_fast_assign(__entry->func = func;
+			__entry->evt = evt;
+			__entry->uint1 = uint1;
+			__entry->uint2 = uint2;
+			__entry->ulong1 = ulong1;
+			__entry->ulong2 = ulong2;
+			__entry->ptr1 = ptr1;
+			__entry->ptr2 = ptr2;
+	 ),
+
+TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK",
+	  __entry->func, __entry->evt,
+	  __entry->uint1, __entry->uint2,
+	  __entry->ulong1, __entry->ulong2,
+	  __entry->ptr1, __entry->ptr2)
+)
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_low,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_high,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_err,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+TRACE_EVENT(print_skb_gso,
+
+	TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest,
+		 u16 ip_proto, u16 xport_proto, const char *saddr, const char *daddr),
+
+	TP_ARGS(skb, src, dest, ip_proto, xport_proto, saddr, daddr),
+
+	TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(int, len)
+		__field(int, data_len)
+		__field(__be16, src)
+		__field(__be16, dest)
+		__field(u16, ip_proto)
+		__field(u16, xport_proto)
+		__string(saddr, saddr)
+		__string(daddr, daddr)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__entry->data_len = skb->data_len;
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ip_proto = ip_proto;
+		__entry->xport_proto = xport_proto;
+		__assign_str(saddr, saddr);
+		__assign_str(daddr, daddr);
+	),
+
+	TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, [%s][%s] src=%s %u dest=%s %u",
+		__entry->skbaddr, __entry->len, __entry->data_len,
+		__entry->ip_proto == htons(ETH_P_IP) ? "IPv4" : "IPv6",
+		__entry->xport_proto == IPPROTO_TCP ? "TCP" : "UDP",
+		__get_str(saddr), be16_to_cpu(__entry->src),
+		__get_str(daddr), be16_to_cpu(__entry->dest))
+);
+
+DECLARE_EVENT_CLASS(print_icmp,
+
+	TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
+		const char *saddr, const char *daddr),
+
+	TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr),
+
+	TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(int, len)
+		__field(u16, ip_proto)
+		__field(u8, type)
+		__field(__be16, sequence)
+		__string(saddr, saddr)
+		__string(daddr, daddr)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__entry->ip_proto = ip_proto;
+		__entry->type = type;
+		__entry->sequence = sequence;
+		__assign_str(saddr, saddr);
+		__assign_str(daddr, daddr);
+	),
+
+	TP_printk("ICMP: skbaddr=%pK, len=%d, [%s] type=%u sequence=%u source=%s dest=%s",
+		__entry->skbaddr, __entry->len,
+		__entry->ip_proto == htons(ETH_P_IP) ? "IPv4" : "IPv6",
+		__entry->type, be16_to_cpu(__entry->sequence), __get_str(saddr),
+		__get_str(daddr))
+);
+
+DEFINE_EVENT
+	(print_icmp, print_icmp_tx,
+
+	TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
+		const char *saddr, const char *daddr),
+
+	TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr)
+);
+
+DEFINE_EVENT
+	(print_icmp, print_icmp_rx,
+
+	TP_PROTO(struct sk_buff *skb, u16 ip_proto, u8 type, __be16 sequence,
+		const char *saddr, const char *daddr),
+
+	TP_ARGS(skb, ip_proto, type, sequence, saddr, daddr)
+);
+
+DECLARE_EVENT_CLASS(print_tcp,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct tcphdr *tp),
+
+	TP_ARGS(skb, saddr, daddr, tp),
+
+	TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(int, len)
+		__string(saddr, saddr)
+		__string(daddr, daddr)
+		__field(__be16, source)
+		__field(__be16, dest)
+		__field(__be32, seq)
+		__field(__be32, ack_seq)
+		__field(u8, syn)
+		__field(u8, ack)
+		__field(u8, fin)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__assign_str(saddr, saddr);
+		__assign_str(daddr, daddr);
+		__entry->source = tp->source;
+		__entry->dest = tp->dest;
+		__entry->seq = tp->seq;
+		__entry->ack_seq = tp->ack_seq;
+		__entry->syn = tp->syn;
+		__entry->ack = tp->ack;
+		__entry->fin = tp->fin;
+	),
+
+	TP_printk("TCP: skbaddr=%pK, len=%d source=%s %u dest=%s %u seq=%u ack_seq=%u syn=%u ack=%u fin=%u",
+		__entry->skbaddr, __entry->len,
+		__get_str(saddr), be16_to_cpu(__entry->source),
+		__get_str(daddr), be16_to_cpu(__entry->dest),
+		be32_to_cpu(__entry->seq), be32_to_cpu(__entry->ack_seq),
+		!!__entry->syn, !!__entry->ack, !!__entry->fin)
+);
+
+DEFINE_EVENT
+	(print_tcp, print_tcp_tx,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct tcphdr *tp),
+
+	TP_ARGS(skb, saddr, daddr, tp)
+);
+
+DEFINE_EVENT
+	(print_tcp, print_tcp_rx,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct tcphdr *tp),
+
+	TP_ARGS(skb, saddr, daddr, tp)
+);
+
+DECLARE_EVENT_CLASS(print_udp,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct udphdr *uh, u16 ip_id),
+
+	TP_ARGS(skb, saddr, daddr, uh, ip_id),
+
+	TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(int, len)
+		__string(saddr, saddr)
+		__string(daddr, daddr)
+		__field(__be16, source)
+		__field(__be16, dest)
+		__field(__be16, ip_id)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__assign_str(saddr, saddr);
+		__assign_str(daddr, daddr);
+		__entry->source = uh->source;
+		__entry->dest = uh->dest;
+		__entry->ip_id = ip_id;
+	),
+
+	TP_printk("UDP: skbaddr=%pK, len=%d source=%s %u dest=%s %u ip_id=%u",
+		__entry->skbaddr, __entry->len,
+		__get_str(saddr), be16_to_cpu(__entry->source),
+		__get_str(daddr), be16_to_cpu(__entry->dest),
+		__entry->ip_id)
+);
+
+DEFINE_EVENT
+	(print_udp, print_udp_tx,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct udphdr *uh, u16 ip_id),
+
+	TP_ARGS(skb, saddr, daddr, uh, ip_id)
+);
+
+DEFINE_EVENT
+	(print_udp, print_udp_rx,
+
+	TP_PROTO(struct sk_buff *skb, const char *saddr, const char *daddr,
+		 struct udphdr *uh, u16 ip_id),
+
+	TP_ARGS(skb, saddr, daddr, uh, ip_id)
+);
+
+TRACE_EVENT(print_pfn,
+
+	TP_PROTO(struct sk_buff *skb, unsigned long *pfn_list, int num_elements),
+
+	TP_ARGS(skb, pfn_list, num_elements),
+
+	TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(int, num_elements)
+		__dynamic_array(unsigned long, pfn_list, num_elements)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->num_elements = num_elements;
+		memcpy(__get_dynamic_array(pfn_list), pfn_list,
+		       num_elements * sizeof(*pfn_list));
+	),
+
+	TP_printk("skbaddr=%pK count=%d pfn=%s",
+		  __entry->skbaddr,
+		  __entry->num_elements,
+		  __print_array(__get_dynamic_array(pfn_list),
+				__entry->num_elements,
+				sizeof(unsigned long))
+	)
+);
+
+/*****************************************************************************/
+/* Trace events for rmnet_perf module */
+/*****************************************************************************/
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_perf_low,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_perf_high,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_perf_err,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+
+);
+
+/*****************************************************************************/
+/* Trace events for rmnet_shs module */
+/*****************************************************************************/
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_low,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_high,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_err,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_wq_low,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_wq_high,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DEFINE_EVENT
+	(rmnet_mod_template, rmnet_shs_wq_err,
+
+	 TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		  u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	 TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2)
+);
+
+DECLARE_EVENT_CLASS
+	(rmnet_freq_template,
+
+	 TP_PROTO(u8 core, u32 newfreq),
+
+	 TP_ARGS(core, newfreq),
+
+	 TP_STRUCT__entry(__field(u8, core)
+			  __field(u32, newfreq)
+	 ),
+
+	 TP_fast_assign(__entry->core = core;
+			__entry->newfreq = newfreq;
+	 ),
+
+TP_printk("freq policy core:%u freq floor :%u",
+	  __entry->core, __entry->newfreq)
+
+);
+
+DEFINE_EVENT
+	(rmnet_freq_template, rmnet_freq_boost,
+
+	 TP_PROTO(u8 core, u32 newfreq),
+
+	 TP_ARGS(core, newfreq)
+);
+
+DEFINE_EVENT
+	(rmnet_freq_template, rmnet_freq_reset,
+
+	 TP_PROTO(u8 core, u32 newfreq),
+
+	 TP_ARGS(core, newfreq)
+);
+
+TRACE_EVENT
+	(rmnet_freq_update,
+
+	 TP_PROTO(u8 core, u32 lowfreq, u32 highfreq),
+
+	 TP_ARGS(core, lowfreq, highfreq),
+
+	 TP_STRUCT__entry(__field(u8, core)
+			  __field(u32, lowfreq)
+			  __field(u32, highfreq)
+	 ),
+
+	 TP_fast_assign(__entry->core = core;
+			__entry->lowfreq = lowfreq;
+			__entry->highfreq = highfreq;
+
+	 ),
+
+TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u",
+	  __entry->core, __entry->lowfreq, __entry->highfreq)
+);
+#endif /* _TRACE_RMNET_H */
+
+#include <trace/define_trace.h>

+ 811 - 0
qcom/opensource/datarmnet/core/rmnet_vnd.c

@@ -0,0 +1,811 @@
+/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data virtual network driver
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/inet.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/ethtool.h>
+#include <net/pkt_sched.h>
+#include <net/ipv6.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+#include "rmnet_vnd.h"
+#include "rmnet_genl.h"
+#include "rmnet_ll.h"
+#include "rmnet_ctl.h"
+
+#include "qmi_rmnet.h"
+#include "rmnet_qmi.h"
+#include "rmnet_trace.h"
+
+typedef void (*rmnet_perf_tether_egress_hook_t)(struct sk_buff *skb);
+rmnet_perf_tether_egress_hook_t rmnet_perf_tether_egress_hook __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_tether_egress_hook);
+
+typedef void (*rmnet_perf_egress_hook1_t)(struct sk_buff *skb);
+rmnet_perf_egress_hook1_t rmnet_perf_egress_hook1 __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_egress_hook1);
+
+typedef void (*rmnet_aps_pre_queue_t)(struct net_device *dev,
+				      struct sk_buff *skb);
+rmnet_aps_pre_queue_t rmnet_aps_pre_queue __read_mostly;
+EXPORT_SYMBOL(rmnet_aps_pre_queue);
+
+typedef int (*rmnet_aps_post_queue_t)(struct net_device *dev,
+				      struct sk_buff *skb);
+rmnet_aps_post_queue_t rmnet_aps_post_queue __read_mostly;
+EXPORT_SYMBOL(rmnet_aps_post_queue);
+
+typedef void (*rmnet_wlan_ll_tuple_hook_t)(struct sk_buff *skb);
+rmnet_wlan_ll_tuple_hook_t rmnet_wlan_ll_tuple_hook __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_wlan_ll_tuple_hook);
+
+/* RX/TX Fixup */
+
+void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_pcpu_stats *pcpu_ptr;
+
+	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+	u64_stats_update_begin(&pcpu_ptr->syncp);
+	pcpu_ptr->stats.rx_pkts++;
+	pcpu_ptr->stats.rx_bytes += skb_len;
+	u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_pcpu_stats *pcpu_ptr;
+
+	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+	u64_stats_update_begin(&pcpu_ptr->syncp);
+	pcpu_ptr->stats.tx_pkts++;
+	pcpu_ptr->stats.tx_bytes += skb_len;
+	u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+/* Network Device Operations */
+
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+					struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+	int ip_type;
+	u32 mark;
+	unsigned int len;
+	rmnet_perf_tether_egress_hook_t rmnet_perf_tether_egress;
+	rmnet_aps_post_queue_t aps_post_queue;
+	rmnet_wlan_ll_tuple_hook_t rmnet_wlan_ll_tuple;
+	bool low_latency = false;
+	bool need_to_drop = false;
+
+	priv = netdev_priv(dev);
+
+	aps_post_queue = rcu_dereference(rmnet_aps_post_queue);
+	if (aps_post_queue)
+		if (unlikely(aps_post_queue(dev, skb))) {
+			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+			kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+
+	if (priv->real_dev) {
+		ip_type = (ip_hdr(skb)->version == 4) ?
+					AF_INET : AF_INET6;
+		mark = skb->mark;
+		len = skb->len;
+		trace_rmnet_xmit_skb(skb);
+		rmnet_perf_tether_egress = rcu_dereference(rmnet_perf_tether_egress_hook);
+		if (rmnet_perf_tether_egress) {
+			rmnet_perf_tether_egress(skb);
+		}
+
+		rmnet_wlan_ll_tuple = rcu_dereference(rmnet_wlan_ll_tuple_hook);
+		if (rmnet_wlan_ll_tuple) {
+			rmnet_wlan_ll_tuple(skb);
+		}
+
+		qmi_rmnet_get_flow_state(dev, skb, &need_to_drop, &low_latency);
+		if (unlikely(need_to_drop)) {
+			this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+			kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+
+		if (RMNET_APS_LLC(skb->priority))
+			low_latency = true;
+
+		if ((low_latency || RMNET_APS_LLB(skb->priority)) &&
+		    skb_is_gso(skb)) {
+			netdev_features_t features;
+			struct sk_buff *segs, *tmp;
+
+			features = dev->features & ~NETIF_F_GSO_MASK;
+			segs = skb_gso_segment(skb, features);
+			if (IS_ERR_OR_NULL(segs)) {
+				this_cpu_add(priv->pcpu_stats->stats.tx_drops,
+					     skb_shinfo(skb)->gso_segs);
+				priv->stats.ll_tso_errs++;
+				kfree_skb(skb);
+				return NETDEV_TX_OK;
+			}
+
+			consume_skb(skb);
+			for (skb = segs; skb; skb = tmp) {
+				tmp = skb->next;
+				skb->dev = dev;
+				priv->stats.ll_tso_segs++;
+				skb_mark_not_on_list(skb);
+				rmnet_egress_handler(skb, low_latency);
+			}
+		} else if (!low_latency && skb_is_gso(skb)) {
+			u64 gso_limit = priv->real_dev->gso_max_size ? : 1;
+			u16 gso_goal = 0;
+			netdev_features_t features = NETIF_F_SG;
+			u16 orig_gso_size = skb_shinfo(skb)->gso_size;
+			unsigned int orig_gso_type = skb_shinfo(skb)->gso_type;
+			struct sk_buff *segs, *tmp;
+
+			features |=  NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+			if (skb->len < gso_limit || gso_limit > 65535) {
+				priv->stats.tso_segment_skip++;
+				rmnet_egress_handler(skb, low_latency);
+			} else {
+				do_div(gso_limit, skb_shinfo(skb)->gso_size);
+				gso_goal = gso_limit * skb_shinfo(skb)->gso_size;
+				skb_shinfo(skb)->gso_size = gso_goal;
+
+				segs = __skb_gso_segment(skb, features, false);
+				if (IS_ERR_OR_NULL(segs)) {
+					skb_shinfo(skb)->gso_size = orig_gso_size;
+					skb_shinfo(skb)->gso_type = orig_gso_type;
+
+					priv->stats.tso_segment_fail++;
+					rmnet_egress_handler(skb, low_latency);
+				} else {
+					consume_skb(skb);
+
+					for (skb = segs; skb; skb = tmp) {
+						tmp = skb->next;
+						skb->dev = dev;
+
+						skb_shinfo(skb)->gso_size = orig_gso_size;
+						skb_shinfo(skb)->gso_type = orig_gso_type;
+
+						priv->stats.tso_segment_success++;
+						skb_mark_not_on_list(skb);
+						rmnet_egress_handler(skb, low_latency);
+					}
+				}
+			}
+		} else {
+			rmnet_egress_handler(skb, low_latency);
+		}
+		qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
+		qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev), NULL, NULL);
+	} else {
+		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+		kfree_skb(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
+{
+	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
+		return -EINVAL;
+
+	rmnet_dev->mtu = new_mtu;
+	return 0;
+}
+
+static int rmnet_vnd_get_iflink(const struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+
+	return priv->real_dev->ifindex;
+}
+
+static int rmnet_vnd_init(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	int err;
+
+	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
+	if (!priv->pcpu_stats)
+		return -ENOMEM;
+
+	err = gro_cells_init(&priv->gro_cells, dev);
+	if (err) {
+		free_percpu(priv->pcpu_stats);
+		return err;
+	}
+
+	return 0;
+}
+
+static void rmnet_vnd_uninit(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	void *qos;
+
+	gro_cells_destroy(&priv->gro_cells);
+	free_percpu(priv->pcpu_stats);
+
+	qos = rcu_dereference(priv->qos_info);
+	RCU_INIT_POINTER(priv->qos_info, NULL);
+	qmi_rmnet_qos_exit_pre(qos);
+}
+
+static void rmnet_get_stats64(struct net_device *dev,
+			      struct rtnl_link_stats64 *s)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_vnd_stats total_stats;
+	struct rmnet_pcpu_stats *pcpu_ptr;
+	unsigned int cpu, start;
+
+	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
+
+	for_each_possible_cpu(cpu) {
+		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
+
+		do {
+			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
+			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
+			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
+			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
+			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
+
+		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
+	}
+
+	s->rx_packets = total_stats.rx_pkts;
+	s->rx_bytes = total_stats.rx_bytes;
+	s->tx_packets = total_stats.tx_pkts;
+	s->tx_bytes = total_stats.tx_bytes;
+	s->tx_dropped = total_stats.tx_drops;
+}
+
+static u16 rmnet_vnd_select_queue(struct net_device *dev,
+				  struct sk_buff *skb,
+				  struct net_device *sb_dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	u64 boost_period = 0;
+	int boost_trigger = 0;
+	int txq = 0;
+	rmnet_perf_egress_hook1_t rmnet_perf_egress1;
+	rmnet_aps_pre_queue_t aps_pre_queue;
+
+	rmnet_perf_egress1 = rcu_dereference(rmnet_perf_egress_hook1);
+	if (rmnet_perf_egress1) {
+		rmnet_perf_egress1(skb);
+	}
+
+	if (trace_print_icmp_tx_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+		u16 ip_proto = 0;
+		__be16 sequence = 0;
+		u8 type = 0;
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
+				goto skip_trace_print_icmp_tx;
+
+			if (icmp_hdr(skb)->type != ICMP_ECHOREPLY &&
+			    icmp_hdr(skb)->type != ICMP_ECHO)
+				goto skip_trace_print_icmp_tx;
+
+			ip_proto = htons(ETH_P_IP);
+			type = icmp_hdr(skb)->type;
+			sequence = icmp_hdr(skb)->un.echo.sequence;
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
+		}
+
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(skb)->nexthdr != NEXTHDR_ICMP)
+				goto skip_trace_print_icmp_tx;
+
+			if (icmp6_hdr(skb)->icmp6_type != ICMPV6_ECHO_REQUEST &&
+			    icmp6_hdr(skb)->icmp6_type != ICMPV6_ECHO_REPLY)
+				goto skip_trace_print_icmp_tx;
+
+			ip_proto = htons(ETH_P_IPV6);
+			type = icmp6_hdr(skb)->icmp6_type;
+			sequence = icmp6_hdr(skb)->icmp6_sequence;
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
+		}
+
+		if (!ip_proto)
+			goto skip_trace_print_icmp_tx;
+
+		trace_print_icmp_tx(skb, ip_proto, type, sequence, saddr, daddr);
+	}
+
+skip_trace_print_icmp_tx:
+	if (trace_print_tcp_tx_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(skb)->protocol != IPPROTO_TCP)
+				goto skip_trace_print_tcp_tx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
+		}
+
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)
+				goto skip_trace_print_tcp_tx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
+		}
+
+		trace_print_tcp_tx(skb, saddr, daddr, tcp_hdr(skb));
+	}
+
+skip_trace_print_tcp_tx:
+	if (trace_print_udp_tx_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+		u16 ip_id = 0;
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(skb)->protocol != IPPROTO_UDP)
+				goto skip_trace_print_udp_tx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
+			ip_id = ntohs(ip_hdr(skb)->id);
+		}
+
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
+				goto skip_trace_print_udp_tx;
+
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
+		}
+
+		trace_print_udp_tx(skb, saddr, daddr, udp_hdr(skb), ip_id);
+	}
+
+skip_trace_print_udp_tx:
+	if (trace_print_skb_gso_enabled()) {
+		char saddr[INET6_ADDRSTRLEN], daddr[INET6_ADDRSTRLEN];
+		u16 ip_proto = 0, xport_proto = 0;
+
+		if (!skb_shinfo(skb)->gso_size)
+			goto skip_trace;
+
+		memset(saddr, 0, INET6_ADDRSTRLEN);
+		memset(daddr, 0, INET6_ADDRSTRLEN);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+				xport_proto = IPPROTO_TCP;
+			else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+				xport_proto = IPPROTO_UDP;
+			else
+				goto skip_trace;
+
+			ip_proto = htons(ETH_P_IP);
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI4", &ip_hdr(skb)->daddr);
+		}
+
+		if (skb->protocol == htons(ETH_P_IPV6)) {
+			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+				xport_proto = IPPROTO_TCP;
+			else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+				xport_proto = IPPROTO_UDP;
+			else
+				goto skip_trace;
+
+			ip_proto = htons(ETH_P_IPV6);
+			snprintf(saddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->saddr);
+			snprintf(daddr, INET6_ADDRSTRLEN, "%pI6", &ipv6_hdr(skb)->daddr);
+		}
+
+		trace_print_skb_gso(skb,
+				    xport_proto == IPPROTO_TCP ? tcp_hdr(skb)->source :
+								 udp_hdr(skb)->source,
+				    xport_proto == IPPROTO_TCP ? tcp_hdr(skb)->dest :
+								 udp_hdr(skb)->dest,
+				    ip_proto, xport_proto, saddr, daddr);
+	}
+
+skip_trace:
+	if (priv->real_dev)
+		txq = qmi_rmnet_get_queue(dev, skb);
+
+	if (rmnet_core_userspace_connected) {
+		rmnet_update_pid_and_check_boost(task_pid_nr(current),
+						 skb->len,
+						 &boost_trigger,
+						 &boost_period);
+
+		if (boost_trigger)
+			(void) boost_period;
+	}
+
+	aps_pre_queue = rcu_dereference(rmnet_aps_pre_queue);
+	if (aps_pre_queue)
+		aps_pre_queue(dev, skb);
+
+	return (txq < dev->real_num_tx_queues) ? txq : 0;
+}
+
+static const struct net_device_ops rmnet_vnd_ops = {
+	.ndo_start_xmit = rmnet_vnd_start_xmit,
+	.ndo_change_mtu = rmnet_vnd_change_mtu,
+	.ndo_get_iflink = rmnet_vnd_get_iflink,
+	.ndo_add_slave  = rmnet_add_bridge,
+	.ndo_del_slave  = rmnet_del_bridge,
+	.ndo_init       = rmnet_vnd_init,
+	.ndo_uninit     = rmnet_vnd_uninit,
+	.ndo_get_stats64 = rmnet_get_stats64,
+	.ndo_select_queue = rmnet_vnd_select_queue,
+};
+
+static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"Checksum ok",
+	"Checksum valid bit not set",
+	"Checksum validation failed",
+	"Checksum error bad buffer",
+	"Checksum error bad ip version",
+	"Checksum error bad transport",
+	"Checksum skipped on ip fragment",
+	"Checksum skipped",
+	"Checksum computed in software",
+	"Checksum computed in hardware",
+	"Coalescing packets received",
+	"Coalesced packets",
+	"Coalescing header NLO errors",
+	"Coalescing header pcount errors",
+	"Coalescing checksum errors",
+	"Coalescing packet reconstructs",
+	"Coalescing IP version invalid",
+	"Coalescing L4 header invalid",
+	"Coalescing close Non-coalescable",
+	"Coalescing close L3 mismatch",
+	"Coalescing close L4 mismatch",
+	"Coalescing close HW NLO limit",
+	"Coalescing close HW packet limit",
+	"Coalescing close HW byte limit",
+	"Coalescing close HW time limit",
+	"Coalescing close HW eviction",
+	"Coalescing close Coalescable",
+	"Coalescing packets over VEID0",
+	"Coalescing packets over VEID1",
+	"Coalescing packets over VEID2",
+	"Coalescing packets over VEID3",
+	"Coalescing packets over VEID4",
+	"Coalescing packets over VEID5",
+	"Coalescing packets over VEID6",
+	"Coalescing packets over VEID7",
+	"Coalescing packets over VEID8",
+	"Coalescing packets over VEID9",
+	"Coalescing packets over VEID10",
+	"Coalescing packets over VEID11",
+	"Coalescing packets over VEID12",
+	"Coalescing packets over VEID13",
+	"Coalescing packets over VEID14",
+	"Coalescing packets over VEID15",
+	"Coalescing TCP frames",
+	"Coalescing TCP bytes",
+	"Coalescing UDP frames",
+	"Coalescing UDP bytes",
+	"Uplink priority packets",
+	"TSO packets",
+	"TSO packets arriving incorrectly",
+	"TSO segment success",
+	"TSO segment fail",
+	"TSO segment skip",
+	"LL TSO segment success",
+	"LL TSO segment fail",
+	"APS priority packets",
+};
+
+static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"MAP Cmd last version",
+	"MAP Cmd last ep id",
+	"MAP Cmd last transaction id",
+	"DL header last seen sequence",
+	"DL header last seen bytes",
+	"DL header last seen packets",
+	"DL header last seen flows",
+	"DL header pkts received",
+	"DL header total bytes received",
+	"DL header total pkts received",
+	"DL trailer last seen sequence",
+	"DL trailer pkts received",
+	"UL agg reuse",
+	"UL agg alloc",
+	"DL chaining [0-10)",
+	"DL chaining [10-20)",
+	"DL chaining [20-30)",
+	"DL chaining [30-40)",
+	"DL chaining [40-50)",
+	"DL chaining [50-60)",
+	"DL chaining >= 60",
+	"DL chaining frags [0-1]",
+	"DL chaining frags [2-3]",
+	"DL chaining frags [4-7]",
+	"DL chaining frags [8-11]",
+	"DL chaining frags [12-15]",
+	"DL chaining frags = 16",
+	"PB Byte Marker Count",
+};
+
+static const char rmnet_ll_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"LL TX queues",
+	"LL TX queue errors",
+	"LL TX completions",
+	"LL TX completion errors",
+	"LL RX queues",
+	"LL RX queue errors",
+	"LL RX status errors",
+	"LL RX empty transfers",
+	"LL RX OOM errors",
+	"LL RX packets",
+	"LL RX temp buffer allocations",
+	"LL TX disabled",
+	"LL TX enabled",
+	"LL TX FC queued",
+	"LL TX FC sent",
+	"LL TX FC err",
+};
+
+static const char rmnet_qmap_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"QMAP RX success",
+	"QMAP RX errors",
+	"QMAP TX queued",
+	"QMAP TX errors",
+	"QMAP TX complete (MHI)",
+};
+
+static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	size_t off = 0;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &rmnet_gstrings_stats,
+		       sizeof(rmnet_gstrings_stats));
+		off += sizeof(rmnet_gstrings_stats);
+		memcpy(buf + off,
+		       &rmnet_port_gstrings_stats,
+		       sizeof(rmnet_port_gstrings_stats));
+		off += sizeof(rmnet_port_gstrings_stats);
+		memcpy(buf + off, &rmnet_ll_gstrings_stats,
+		       sizeof(rmnet_ll_gstrings_stats));
+		off += sizeof(rmnet_ll_gstrings_stats);
+		memcpy(buf + off, &rmnet_qmap_gstrings_stats,
+		       sizeof(rmnet_qmap_gstrings_stats));
+		break;
+	}
+}
+
+static int rmnet_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(rmnet_gstrings_stats) +
+		       ARRAY_SIZE(rmnet_port_gstrings_stats) +
+		       ARRAY_SIZE(rmnet_ll_gstrings_stats) +
+		       ARRAY_SIZE(rmnet_qmap_gstrings_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void rmnet_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_priv_stats *st = &priv->stats;
+	struct rmnet_port_priv_stats *stp;
+	struct rmnet_ll_stats *llp;
+	struct rmnet_port *port;
+	size_t off = 0;
+	u64 qmap_s[ARRAY_SIZE(rmnet_qmap_gstrings_stats)];
+
+	port = rmnet_get_port(priv->real_dev);
+
+	if (!data || !port)
+		return;
+
+	stp = &port->stats;
+	llp = rmnet_ll_get_stats();
+
+	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+	off += ARRAY_SIZE(rmnet_gstrings_stats);
+	memcpy(data + off, stp,
+	       ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
+	off += ARRAY_SIZE(rmnet_port_gstrings_stats);
+	memcpy(data + off, llp,
+	       ARRAY_SIZE(rmnet_ll_gstrings_stats) * sizeof(u64));
+
+	off += ARRAY_SIZE(rmnet_ll_gstrings_stats);
+	memset(qmap_s, 0, sizeof(qmap_s));
+	rmnet_ctl_get_stats(qmap_s, ARRAY_SIZE(rmnet_qmap_gstrings_stats));
+	memcpy(data + off, qmap_s,
+	       ARRAY_SIZE(rmnet_qmap_gstrings_stats) * sizeof(u64));
+}
+
+static int rmnet_stats_reset(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_port_priv_stats *stp;
+	struct rmnet_priv_stats *st;
+	struct rmnet_port *port;
+
+	port = rmnet_get_port(priv->real_dev);
+	if (!port)
+		return -EINVAL;
+
+	stp = &port->stats;
+
+	memset(stp, 0, sizeof(*stp));
+
+	st = &priv->stats;
+
+	memset(st, 0, sizeof(*st));
+
+	return 0;
+}
+
+static const struct ethtool_ops rmnet_ethtool_ops = {
+	.get_ethtool_stats = rmnet_get_ethtool_stats,
+	.get_strings = rmnet_get_strings,
+	.get_sset_count = rmnet_get_sset_count,
+	.nway_reset = rmnet_stats_reset,
+};
+
+/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+void rmnet_vnd_setup(struct net_device *rmnet_dev)
+{
+	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
+	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
+	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
+	eth_random_addr(rmnet_dev->perm_addr);
+	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
+
+	/* Raw IP mode */
+	rmnet_dev->header_ops = NULL;  /* No header */
+	rmnet_dev->type = ARPHRD_RAWIP;
+	rmnet_dev->hard_header_len = 0;
+	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+
+	rmnet_dev->needs_free_netdev = true;
+	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+}
+
+/* Exposed API */
+
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+		      struct rmnet_port *port,
+		      struct net_device *real_dev,
+		      struct rmnet_endpoint *ep)
+{
+	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+	int rc;
+
+	if (ep->egress_dev)
+		return -EINVAL;
+
+	if (rmnet_get_endpoint(port, id))
+		return -EBUSY;
+
+	rmnet_dev->hw_features = NETIF_F_RXCSUM;
+	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	rmnet_dev->hw_features |= NETIF_F_SG;
+	rmnet_dev->hw_features |= NETIF_F_GRO_HW;
+	rmnet_dev->hw_features |= NETIF_F_GSO_UDP_L4;
+	rmnet_dev->hw_features |= NETIF_F_ALL_TSO;
+
+	priv->real_dev = real_dev;
+
+	rmnet_dev->gso_max_size = 65535;
+
+	rc = register_netdevice(rmnet_dev);
+	if (!rc) {
+		ep->egress_dev = rmnet_dev;
+		ep->mux_id = id;
+		port->nr_rmnet_devs++;
+
+		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
+
+		priv->mux_id = id;
+		rcu_assign_pointer(priv->qos_info,
+			qmi_rmnet_qos_init(real_dev, rmnet_dev, id));
+
+		netdev_dbg(rmnet_dev, "rmnet dev created\n");
+	}
+
+	return rc;
+}
+
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+		      struct rmnet_endpoint *ep)
+{
+	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
+		return -EINVAL;
+
+	ep->egress_dev = NULL;
+	port->nr_rmnet_devs--;
+	return 0;
+}
+
+u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
+{
+	struct rmnet_priv *priv;
+
+	priv = netdev_priv(rmnet_dev);
+	return priv->mux_id;
+}
+
+int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
+{
+	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
+	/* Although we expect similar number of enable/disable
+	 * commands, optimize for the disable. That is more
+	 * latency sensitive than enable
+	 */
+	if (unlikely(enable))
+		netif_wake_queue(rmnet_dev);
+	else
+		netif_stop_queue(rmnet_dev);
+
+	return 0;
+}
+
+void rmnet_vnd_reset_mac_addr(struct net_device *dev)
+{
+	if (dev->netdev_ops != &rmnet_vnd_ops)
+		return;
+
+	eth_random_addr(dev->perm_addr);
+}

+ 31 - 0
qcom/opensource/datarmnet/core/rmnet_vnd.h

@@ -0,0 +1,31 @@
+/* Copyright (c) 2013-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Virtual Network Device APIs
+ *
+ */
+
+#ifndef _RMNET_VND_H_
+#define _RMNET_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+		      struct rmnet_port *port,
+		      struct net_device *real_dev,
+		      struct rmnet_endpoint *ep);
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+		      struct rmnet_endpoint *ep);
+void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
+void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
+u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
+void rmnet_vnd_setup(struct net_device *dev);
+void rmnet_vnd_reset_mac_addr(struct net_device *dev);
+#endif /* _RMNET_VND_H_ */

+ 92 - 0
qcom/opensource/datarmnet/core/wda.h

@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018,2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/version.h>
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wda
+#undef TRACE_INCLUDE_PATH
+
+#ifndef RMNET_TRACE_INCLUDE_PATH
+#if defined(CONFIG_RMNET_LA_PLATFORM)
+#ifdef CONFIG_ARCH_KHAJE
+#define RMNET_TRACE_INCLUDE_PATH ../../../../../vendor/qcom/opensource/datarmnet/core
+#else
+#define RMNET_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/datarmnet/core
+#endif /* CONFIG_ARCH_KHAJE */
+#elif defined(__arch_um__)
+#define RMNET_TRACE_INCLUDE_PATH ../../datarmnet/core
+#else
+#define RMNET_TRACE_INCLUDE_PATH ../../../../../../../datarmnet/core
+#endif /* defined(CONFIG_RMNET_LA_PLATFORM) */
+#endif /* RMNET_TRACE_INCLUDE_PATH */
+#define TRACE_INCLUDE_PATH RMNET_TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_FILE wda
+
+#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WDA_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(wda_set_powersave_mode,
+
+	TP_PROTO(int enable),
+
+	TP_ARGS(enable),
+
+	TP_STRUCT__entry(
+		__field(int, enable)
+	),
+
+	TP_fast_assign(
+		__entry->enable = enable;
+	),
+
+	TP_printk("set powersave mode to %s",
+		__entry->enable ? "enable" : "disable")
+);
+
+TRACE_EVENT(wda_client_state_up,
+
+	TP_PROTO(u32 instance, u32 ep_type, u32 iface),
+
+	TP_ARGS(instance, ep_type, iface),
+
+	TP_STRUCT__entry(
+		__field(u32, instance)
+		__field(u32, ep_type)
+		__field(u32, iface)
+	),
+
+	TP_fast_assign(
+		__entry->instance = instance;
+		__entry->ep_type = ep_type;
+		__entry->iface = iface;
+	),
+
+	TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u",
+		__entry->instance, __entry->ep_type, __entry->iface)
+);
+
+TRACE_EVENT(wda_client_state_down,
+
+	TP_PROTO(int from_cb),
+
+	TP_ARGS(from_cb),
+
+	TP_STRUCT__entry(
+		__field(int, from_cb)
+	),
+
+	TP_fast_assign(
+		__entry->from_cb = from_cb;
+	),
+
+	TP_printk("Client: Connection with WDA lost Exit by callback %d",
+		  __entry->from_cb)
+);
+
+#endif /* _TRACE_WDA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>

+ 582 - 0
qcom/opensource/datarmnet/core/wda_qmi.c

@@ -0,0 +1,582 @@
+/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/soc/qcom/qmi.h>
+#include "rmnet_qmi.h"
+#define CREATE_TRACE_POINTS
+#include "wda.h"
+#undef CREATE_TRACE_POINTS
+#include "qmi_rmnet_i.h"
+
+struct wda_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *wda_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+	struct svc_info svc;
+	int restart_state;
+};
+
+static void wda_svc_config(struct work_struct *work);
+/* **************************************************** */
+#define WDA_SERVICE_ID_V01 0x1A
+#define WDA_SERVICE_VERS_V01 0x01
+#define WDA_TIMEOUT_JF  msecs_to_jiffies(1000)
+
+#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
+#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
+#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18
+#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14
+
+#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
+#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
+#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 48
+#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
+
+enum wda_powersave_config_mask_enum_v01 {
+	WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00,
+	WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01,
+	WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02,
+	WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF,
+	WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct wda_set_powersave_config_req_msg_v01 {
+	/* Mandatory */
+	struct data_ep_id_type_v01 ep_id;
+	/* Optional */
+	uint8_t req_data_cfg_valid;
+	enum wda_powersave_config_mask_enum_v01 req_data_cfg;
+};
+
+struct wda_set_powersave_config_resp_msg_v01 {
+	/* Mandatory */
+	struct qmi_response_type_v01 resp;
+	/* Optional */
+	uint8_t data_cfg_valid;
+	enum wda_powersave_config_mask_enum_v01 data_cfg;
+};
+
+struct wda_set_powersave_mode_req_msg_v01 {
+	/* Mandatory */
+	uint8_t powersave_control_flag;
+	/* Optional */
+	uint8_t allow_dfc_notify_valid;
+	uint8_t allow_dfc_notify;
+	uint8_t allow_bearer_id_list_valid;
+	uint8_t allow_bearer_id_list_len;
+	uint8_t allow_bearer_id_list[PS_MAX_BEARERS];
+	uint8_t auto_shut_allow_bearer_valid;
+	uint8_t auto_shut_allow_bearer;
+};
+
+struct wda_set_powersave_mode_resp_msg_v01 {
+	/* Mandatory */
+	struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct data_ep_id_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				ep_id),
+		.ei_array	= data_ep_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				req_data_cfg_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum
+					 wda_powersave_config_mask_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				req_data_cfg),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   data_cfg_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum
+					 wda_powersave_config_mask_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   data_cfg),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   powersave_control_flag),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   allow_dfc_notify_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   allow_dfc_notify),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   allow_bearer_id_list_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   allow_bearer_id_list_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= PS_MAX_BEARERS,
+		.elem_size	= sizeof(u8),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   allow_bearer_id_list),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   auto_shut_allow_bearer_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   auto_shut_allow_bearer),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable,
+				      u8 num_bearers, u8 *bearer_id)
+{
+	struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
+	struct wda_set_powersave_mode_resp_msg_v01 *resp;
+	struct wda_set_powersave_mode_req_msg_v01  *req;
+	struct qmi_txn txn;
+	int ret;
+
+	if (!data || !data->rmnet_port)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(&data->handle, &txn,
+			   wda_set_powersave_mode_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->powersave_control_flag = enable;
+
+	if (enable && num_bearers && bearer_id &&
+	    num_bearers <= PS_MAX_BEARERS) {
+		req->allow_dfc_notify_valid = 1;
+		req->allow_dfc_notify = 1;
+
+		req->allow_bearer_id_list_valid = 1;
+		req->allow_bearer_id_list_len = num_bearers;
+		memcpy(req->allow_bearer_id_list, bearer_id, num_bearers);
+
+		req->auto_shut_allow_bearer_valid = 1;
+		req->auto_shut_allow_bearer = 1;
+	}
+
+	ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
+			QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
+			QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
+			wda_set_powersave_mode_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
+					int dl_marker)
+{
+	struct wda_qmi_data *data = container_of(wda_handle,
+						 struct wda_qmi_data, handle);
+	struct wda_set_powersave_config_resp_msg_v01 *resp;
+	struct wda_set_powersave_config_req_msg_v01  *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(wda_handle, &txn,
+			   wda_set_powersave_config_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->ep_id.ep_type = data->svc.ep_type;
+	req->ep_id.iface_id = data->svc.iface_id;
+	req->req_data_cfg_valid = 1;
+	req->req_data_cfg = dl_marker ? WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 :
+					WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01;
+	ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
+			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
+			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
+			wda_set_powersave_config_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, error: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static void wda_svc_config(struct work_struct *work)
+{
+	struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
+						 svc_arrive);
+	struct qmi_info *qmi;
+	int rc, dl_marker = 0;
+
+	while (!rtnl_trylock()) {
+		if (!data->restart_state)
+			cond_resched();
+		else
+			return;
+	}
+
+	dl_marker = rmnet_get_dlmarker_info(data->rmnet_port);
+	rtnl_unlock();
+
+	if (data->restart_state == 1)
+		return;
+
+	rc = wda_set_powersave_config_req(&data->handle, dl_marker);
+	if (rc < 0) {
+		pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+		return;
+	}
+
+	if (data->restart_state == 1)
+		return;
+	while (!rtnl_trylock()) {
+		if (!data->restart_state)
+			cond_resched();
+		else
+			return;
+	}
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi) {
+		rtnl_unlock();
+		return;
+	}
+
+	qmi->wda_pending = NULL;
+	qmi->wda_client = (void *)data;
+	trace_wda_client_state_up(data->svc.instance,
+				  data->svc.ep_type,
+				  data->svc.iface_id);
+
+	rtnl_unlock();
+
+	pr_info("Connection established with the WDA Service, DL Marker %s\n",
+		dl_marker ? "enabled" : "disabled");
+}
+
+static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
+						 handle);
+
+	data->ssctl.sq_family = AF_QIPCRTR;
+	data->ssctl.sq_node = svc->node;
+	data->ssctl.sq_port = svc->port;
+
+	queue_work(data->wda_wq, &data->svc_arrive);
+
+	return 0;
+}
+
+static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
+						 handle);
+
+	if (!data)
+		pr_info("%s() data is null\n", __func__);
+}
+
+static struct qmi_ops server_ops = {
+	.new_server = wda_svc_arrive,
+	.del_server = wda_svc_exit,
+};
+
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
+{
+	struct wda_qmi_data *data;
+	int rc = -ENOMEM;
+
+	if (!port || !qmi)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->wda_wq = create_singlethread_workqueue("wda_wq");
+	if (!data->wda_wq) {
+		pr_err("%s Could not create workqueue\n", __func__);
+		goto err0;
+	}
+
+	data->rmnet_port = port;
+	data->restart_state = 0;
+	memcpy(&data->svc, psvc, sizeof(data->svc));
+	INIT_WORK(&data->svc_arrive, wda_svc_config);
+
+	rc = qmi_handle_init(&data->handle,
+			     QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN,
+			     &server_ops, NULL);
+	if (rc < 0) {
+		pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
+		goto err1;
+	}
+
+	rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
+			    WDA_SERVICE_VERS_V01, psvc->instance);
+	if (rc < 0) {
+		pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
+		goto err2;
+	}
+
+	qmi->wda_pending = (void *)data;
+	return 0;
+
+err2:
+	qmi_handle_release(&data->handle);
+err1:
+	destroy_workqueue(data->wda_wq);
+err0:
+	kfree(data);
+	return rc;
+}
+
+void wda_qmi_client_exit(void *wda_data)
+{
+	struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
+
+	if (!data) {
+		pr_info("%s() data is null\n", __func__);
+		return;
+	}
+
+	data->restart_state = 1;
+	trace_wda_client_state_down(0);
+	destroy_workqueue(data->wda_wq);
+	kfree(data);
+}
+
+int wda_set_powersave_mode(void *wda_data, uint8_t enable, u8 num_bearers,
+			   u8 *bearer_id)
+{
+	trace_wda_set_powersave_mode(enable);
+	return wda_set_powersave_mode_req(wda_data, enable, num_bearers,
+					  bearer_id);
+}
+
+void wda_qmi_client_release(void *wda_data)
+{
+	if (!wda_data)
+		return;
+	qmi_handle_release(&((struct wda_qmi_data *)wda_data)->handle);
+}

+ 25 - 0
qcom/opensource/datarmnet/datarmnet_dlkm_vendor_board.mk

@@ -0,0 +1,25 @@
+TARGET_DATARMNET_ENABLE := false
+
+ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
+	ifeq ($(TARGET_KERNEL_DLKM_DATARMNET_OVERRIDE), true)
+		TARGET_DATARMNET_ENABLE := true
+	endif
+else
+	TARGET_DATARMNET_ENABLE := true
+endif
+
+ifeq ($(TARGET_DATARMNET_ENABLE), true)
+	#Build rmnet core
+	DATA_DLKM_BOARD_PLATFORMS_LIST := pineapple
+	DATA_DLKM_BOARD_PLATFORMS_LIST += blair
+	DATA_DLKM_BOARD_PLATFORMS_LIST += monaco
+	DATA_DLKM_BOARD_PLATFORMS_LIST += pitti
+	DATA_DLKM_BOARD_PLATFORMS_LIST += volcano
+
+	ifneq ($(TARGET_BOARD_AUTO),true)
+		ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
+			BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_core.ko
+			BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rmnet_ctl.ko
+		endif
+	endif
+endif

+ 2 - 0
qcom/opensource/datarmnet/datarmnet_dlkm_vendor_product.mk

@@ -0,0 +1,2 @@
+PRODUCT_PACKAGES += rmnet_core.ko
+PRODUCT_PACKAGES += rmnet_ctl.ko

+ 103 - 0
qcom/opensource/datarmnet/define_modules.bzl

@@ -0,0 +1,103 @@
+load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
+load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
+
+def define_modules(target, variant):
+    kernel_build_variant = "{}_{}".format(target, variant)
+    include_base = "../../../{}".format(native.package_name())
+
+    #The below will take care of the defconfig
+    #include_defconfig = ":{}_defconfig".format(variant)
+
+    ddk_module(
+        name = "{}_rmnet_ctl".format(kernel_build_variant),
+        out = "rmnet_ctl.ko",
+        srcs = [
+            "core/rmnet_ctl_ipa.c",
+            "core/rmnet_ctl.h",
+            "core/rmnet_ctl_client.h",
+        ],
+        kconfig = "core/Kconfig",
+        conditional_srcs = {
+            "CONFIG_ARCH_PINEAPPLE": {
+                True: [
+                    "core/rmnet_ctl_client.c",
+                ],
+            },
+            "CONFIG_ARCH_BLAIR": {
+                True: [
+                    "core/rmnet_ctl_client.c",
+                ],
+            },
+            "CONFIG_ARCH_MONACO": {
+                True: [
+                    "core/rmnet_ctl_client.c",
+                ],
+            },
+            "CONFIG_ARCH_PITTI": {
+                True: [
+                    "core/rmnet_ctl_client.c",
+                ],
+            },
+            "CONFIG_ARCH_VOLCANO": {
+                True: [
+                    "core/rmnet_ctl_client.c",
+                ],
+            },
+        },
+        kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+        deps = [
+            "//vendor/qcom/opensource/dataipa:{}_ipam".format(kernel_build_variant),
+            "//msm-kernel:all_headers",
+            "//vendor/qcom/opensource/dataipa:include_headers",
+        ],
+    )
+
+    ddk_module(
+        name = "{}_rmnet_core".format(kernel_build_variant),
+        out = "rmnet_core.ko",
+        srcs = [
+            "core/rmnet_config.c",
+            "core/rmnet_descriptor.c",
+            "core/rmnet_genl.c",
+            "core/rmnet_handlers.c",
+            "core/rmnet_map_command.c",
+            "core/rmnet_map_data.c",
+            "core/rmnet_module.c",
+            "core/rmnet_vnd.c",
+            "core/dfc_qmap.c",
+            "core/dfc_qmi.c",
+            "core/qmi_rmnet.c",
+            "core/wda_qmi.c",
+            "core/rmnet_ll.c",
+            "core/rmnet_ll_ipa.c",
+            "core/rmnet_qmap.c",
+            "core/rmnet_ll_qmap.c",
+        ],
+        local_defines = [
+            "RMNET_TRACE_INCLUDE_PATH={}/core".format(include_base),
+        ],
+        kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
+        deps = [
+            ":rmnet_core_headers",
+            ":{}_rmnet_ctl".format(kernel_build_variant),
+            "//vendor/qcom/opensource/dataipa:{}_ipam".format(kernel_build_variant),
+            "//vendor/qcom/opensource/datarmnet-ext/mem:{}_rmnet_mem".format(kernel_build_variant),
+            "//msm-kernel:all_headers",
+            "//vendor/qcom/opensource/dataipa:include_headers",
+            "//vendor/qcom/opensource/datarmnet-ext/mem:rmnet_mem_headers",
+        ],
+    )
+
+    copy_to_dist_dir(
+        name = "{}_modules_dist".format(kernel_build_variant),
+        data = [
+            "{}_rmnet_core".format(kernel_build_variant),
+            "{}_rmnet_ctl".format(kernel_build_variant),
+        ],
+        dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
+        flat = True,
+        wipe_dist_dir = False,
+        allow_duplicate_filenames = False,
+        mode_overrides = {"**/*": "644"},
+        log = "info",
+    )