
Packets are now sent over a dedicated MHI channel when indicated by the DFC driver. New dedicated channel is controlled by rmnet driver. Buffers are allocated and supplied to it as needed from a recyclable pool for RX on the channel, and packets will be sent to it and freed manually once the channel indicates that they have been sent. Low latency packets can be aggregated like standard QMAP packets, but have their own aggregation state to prevent mixing default and low latency flows, and to allow each type of flow to use their own send functions (i.e. dev_queue_xmit() versus rmnet_ll_send_skb()). Low latency packets also have their own load-balancing scheme, and do not need to use the SHS module for balancing. To facilitate this, we mark the low latency packets with a non-zero priority value upon receipt from the MHI chainnel and avoid sending any such marked packets to the SHS ingress hook. DFC has been updated with a new netlink message type to handle swapping a list of bearers from one channel to another. The actual swap is performed asynchronously, and separate netlink ACKs will be sent to the userspace socket when the switch has been completed. Change-Id: I93861d4b004f399ba203d76a71b2f01fa5c0d5d2 Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
164 lines
3.9 KiB
C
164 lines
3.9 KiB
C
/*
|
|
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#ifndef _QMI_RMNET_H
|
|
#define _QMI_RMNET_H
|
|
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#define CONFIG_QTI_QMI_RMNET 1
|
|
#define CONFIG_QTI_QMI_DFC 1
|
|
#define CONFIG_QTI_QMI_POWER_COLLAPSE 1
|
|
|
|
struct qmi_rmnet_ps_ind {
|
|
void (*ps_on_handler)(void *port);
|
|
void (*ps_off_handler)(void *port);
|
|
struct list_head list;
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_QTI_QMI_RMNET
|
|
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
|
|
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
|
int attr_len);
|
|
void qmi_rmnet_enable_all_flows(struct net_device *dev);
|
|
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
|
|
#else
|
|
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
|
int attr_len)
|
|
{
|
|
}
|
|
|
|
static inline void
|
|
qmi_rmnet_enable_all_flows(struct net_device *dev)
|
|
{
|
|
}
|
|
|
|
static inline bool
|
|
qmi_rmnet_all_flows_enabled(struct net_device *dev)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_QTI_QMI_DFC
|
|
void *qmi_rmnet_qos_init(struct net_device *real_dev,
|
|
struct net_device *vnd_dev, u8 mux_id);
|
|
void qmi_rmnet_qos_exit_pre(void *qos);
|
|
void qmi_rmnet_qos_exit_post(void);
|
|
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev, int ip_type,
|
|
u32 mark);
|
|
void qmi_rmnet_burst_fc_check(struct net_device *dev,
|
|
int ip_type, u32 mark, unsigned int len);
|
|
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
|
|
#else
|
|
static inline void *
|
|
qmi_rmnet_qos_init(struct net_device *real_dev,
|
|
struct net_device *vnd_dev, u8 mux_id)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void qmi_rmnet_qos_exit_pre(void *qos)
|
|
{
|
|
}
|
|
|
|
static inline void qmi_rmnet_qos_exit_post(void)
|
|
{
|
|
}
|
|
|
|
static inline bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
|
int ip_type, u32 mark)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void
|
|
qmi_rmnet_burst_fc_check(struct net_device *dev,
|
|
int ip_type, u32 mark, unsigned int len)
|
|
{
|
|
}
|
|
|
|
static inline int qmi_rmnet_get_queue(struct net_device *dev,
|
|
struct sk_buff *skb)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_QTI_QMI_POWER_COLLAPSE
|
|
int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable);
|
|
void qmi_rmnet_work_init(void *port);
|
|
void qmi_rmnet_work_exit(void *port);
|
|
void qmi_rmnet_work_maybe_restart(void *port);
|
|
void qmi_rmnet_set_dl_msg_active(void *port);
|
|
bool qmi_rmnet_ignore_grant(void *port);
|
|
|
|
int qmi_rmnet_ps_ind_register(void *port,
|
|
struct qmi_rmnet_ps_ind *ps_ind);
|
|
int qmi_rmnet_ps_ind_deregister(void *port,
|
|
struct qmi_rmnet_ps_ind *ps_ind);
|
|
void qmi_rmnet_ps_off_notify(void *port);
|
|
void qmi_rmnet_ps_on_notify(void *port);
|
|
|
|
#else
|
|
static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void qmi_rmnet_work_init(void *port)
|
|
{
|
|
}
|
|
static inline void qmi_rmnet_work_exit(void *port)
|
|
{
|
|
}
|
|
static inline void qmi_rmnet_work_maybe_restart(void *port)
|
|
{
|
|
|
|
}
|
|
static inline void qmi_rmnet_set_dl_msg_active(void *port)
|
|
{
|
|
}
|
|
static inline bool qmi_rmnet_ignore_grant(void *port)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
|
|
struct qmi_rmnet_ps_ind *ps_ind)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
|
|
struct qmi_rmnet_ps_ind *ps_ind)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
|
|
{
|
|
|
|
}
|
|
|
|
static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
|
|
{
|
|
|
|
}
|
|
#endif
|
|
#endif /*_QMI_RMNET_H*/
|