dfc: QMAP based powersave
Support DFC powersave using extended QMAP powersave command. Change-Id: I3055212d8ecfe461bf9a6b35488269a59d4dec52 Acked-by: Weiyi Chen <weiyic@qti.qualcomm.com> Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Tento commit je obsažen v:
18
core/dfc.h
18
core/dfc.h
@@ -349,6 +349,24 @@ TRACE_EVENT(dfc_ll_switch,
|
||||
__entry->num_bearer, 1))
|
||||
);
|
||||
|
||||
TRACE_EVENT(dfc_set_powersave_mode,
|
||||
|
||||
TP_PROTO(int enable),
|
||||
|
||||
TP_ARGS(enable),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, enable)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->enable = enable;
|
||||
),
|
||||
|
||||
TP_printk("set powersave mode to %s",
|
||||
__entry->enable ? "enable" : "disable")
|
||||
);
|
||||
|
||||
#endif /* _TRACE_DFC_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include "dfc.h"
|
||||
|
||||
#define QMAP_DFC_VER 1
|
||||
#define QMAP_PS_MAX_BEARERS 32
|
||||
|
||||
struct qmap_dfc_config {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
@@ -89,6 +90,22 @@ struct qmap_dfc_end_marker_cnf {
|
||||
u32 reserved4;
|
||||
} __aligned(1);
|
||||
|
||||
struct qmap_dfc_powersave_req {
|
||||
struct qmap_cmd_hdr hdr;
|
||||
u8 cmd_ver;
|
||||
u8 allow:1;
|
||||
u8 autoshut:1;
|
||||
u8 reserved:6;
|
||||
u8 reserved2;
|
||||
u8 mode:1;
|
||||
u8 reserved3:7;
|
||||
__be32 ep_type;
|
||||
__be32 iface_id;
|
||||
u8 num_bearers;
|
||||
u8 bearer_id[QMAP_PS_MAX_BEARERS];
|
||||
u8 reserved4[3];
|
||||
} __aligned(1);
|
||||
|
||||
static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
|
||||
static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
|
||||
static struct dfc_qmi_data __rcu *qmap_dfc_data;
|
||||
@@ -384,6 +401,65 @@ static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
|
||||
rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
static int dfc_qmap_send_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct qmap_dfc_powersave_req *dfc_powersave;
|
||||
unsigned int len = sizeof(struct qmap_dfc_powersave_req);
|
||||
struct dfc_qmi_data *dfc;
|
||||
u32 ep_type = 0;
|
||||
u32 iface_id = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
dfc = rcu_dereference(qmap_dfc_data);
|
||||
if (dfc) {
|
||||
ep_type = dfc->svc.ep_type;
|
||||
iface_id = dfc->svc.iface_id;
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
skb = alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skb->protocol = htons(ETH_P_MAP);
|
||||
dfc_powersave = (struct qmap_dfc_powersave_req *)skb_put(skb, len);
|
||||
memset(dfc_powersave, 0, len);
|
||||
|
||||
dfc_powersave->hdr.cd_bit = 1;
|
||||
dfc_powersave->hdr.mux_id = 0;
|
||||
dfc_powersave->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
|
||||
dfc_powersave->hdr.cmd_name = QMAP_DFC_POWERSAVE;
|
||||
dfc_powersave->hdr.cmd_type = QMAP_CMD_REQUEST;
|
||||
dfc_powersave->hdr.tx_id = htonl(rmnet_qmap_next_txid());
|
||||
|
||||
dfc_powersave->cmd_ver = 3;
|
||||
dfc_powersave->mode = enable ? 1 : 0;
|
||||
|
||||
if (enable && num_bearers) {
|
||||
if (unlikely(num_bearers > QMAP_PS_MAX_BEARERS))
|
||||
num_bearers = QMAP_PS_MAX_BEARERS;
|
||||
dfc_powersave->allow = 1;
|
||||
dfc_powersave->autoshut = 1;
|
||||
dfc_powersave->num_bearers = num_bearers;
|
||||
memcpy(dfc_powersave->bearer_id, bearer_id, num_bearers);
|
||||
}
|
||||
|
||||
dfc_powersave->ep_type = htonl(ep_type);
|
||||
dfc_powersave->iface_id = htonl(iface_id);
|
||||
|
||||
return rmnet_qmap_send(skb, RMNET_CH_CTL, false);
|
||||
}
|
||||
|
||||
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id)
|
||||
{
|
||||
trace_dfc_set_powersave_mode(enable);
|
||||
return dfc_qmap_send_powersave(enable, num_bearers, bearer_id);
|
||||
}
|
||||
|
||||
void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
|
||||
{
|
||||
struct rmnet_bearer_map *bearer;
|
||||
@@ -429,8 +505,14 @@ int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
|
||||
|
||||
pr_info("DFC QMAP init\n");
|
||||
|
||||
dfc_config_acked = false;
|
||||
dfc_qmap_send_config(data);
|
||||
/* Currently if powersave ext is enabled, no need to do dfc config
|
||||
* which only enables tx_info */
|
||||
if (qmi->ps_ext) {
|
||||
dfc_config_acked = true;
|
||||
} else {
|
||||
dfc_config_acked = false;
|
||||
dfc_qmap_send_config(data);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1119,9 +1119,18 @@ void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
|
||||
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
|
||||
/* In powersave, change grant to 1 if it is a enable */
|
||||
if (qmi_rmnet_ignore_grant(dfc->rmnet_port)) {
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
continue;
|
||||
if (flow_status->num_bytes) {
|
||||
flow_status->num_bytes = DEFAULT_GRANT;
|
||||
flow_status->seq_num = 0;
|
||||
/* below is to reset bytes-in-flight */
|
||||
flow_status->rx_bytes_valid = 1;
|
||||
flow_status->rx_bytes = 0xFFFFFFFF;
|
||||
} else {
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(flow_status->bearer_id == 0xFF))
|
||||
|
175
core/qmi_rmnet.c
175
core/qmi_rmnet.c
@@ -37,6 +37,7 @@
|
||||
#define FLAG_DFC_MASK 0x000F
|
||||
#define FLAG_POWERSAVE_MASK 0x0010
|
||||
#define FLAG_QMAP_MASK 0x0020
|
||||
#define FLAG_PS_EXT_MASK 0x0040
|
||||
|
||||
#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
|
||||
|
||||
@@ -44,9 +45,11 @@
|
||||
((m) == DFC_MODE_SA)
|
||||
|
||||
#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
|
||||
#define FLAG_TO_PS_EXT(f) ((f) & FLAG_PS_EXT_MASK)
|
||||
|
||||
int dfc_mode;
|
||||
int dfc_qmap;
|
||||
int dfc_ps_ext;
|
||||
|
||||
unsigned int rmnet_wq_frequency __read_mostly = 1000;
|
||||
|
||||
@@ -633,6 +636,7 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
|
||||
}
|
||||
|
||||
qmi->flag = tcm->tcm_ifindex;
|
||||
qmi->ps_ext = FLAG_TO_PS_EXT(qmi->flag);
|
||||
svc.instance = tcm->tcm_handle;
|
||||
svc.ep_type = tcm->tcm_info;
|
||||
svc.iface_id = tcm->tcm_parent;
|
||||
@@ -736,6 +740,7 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
||||
case NLMSG_CLIENT_SETUP:
|
||||
dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
|
||||
dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
|
||||
dfc_ps_ext = FLAG_TO_PS_EXT(tcm->tcm_ifindex);
|
||||
|
||||
if (!DFC_SUPPORTED_MODE(dfc_mode) &&
|
||||
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
|
||||
@@ -893,6 +898,85 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
|
||||
|
||||
/**
|
||||
* qmi_rmnet_lock_unlock_all_flows - lock or unlock all bearers
|
||||
*/
|
||||
void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock)
|
||||
{
|
||||
struct qos_info *qos;
|
||||
|
||||
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return;
|
||||
|
||||
if (lock)
|
||||
spin_lock_bh(&qos->qos_lock);
|
||||
else
|
||||
spin_unlock_bh(&qos->qos_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_lock_unlock_all_flows);
|
||||
|
||||
/**
|
||||
* qmi_rmnet_get_disabled_flows - get disabled bearers
|
||||
* Needs to be called with qos_lock
|
||||
*/
|
||||
void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
|
||||
u8 *bearer_id)
|
||||
{
|
||||
struct qos_info *qos;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
u8 current_num_bearers = 0;
|
||||
u8 num_bearers_left = 0;
|
||||
|
||||
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
if (!qos || !num_bearers)
|
||||
return;
|
||||
|
||||
num_bearers_left = *num_bearers;
|
||||
|
||||
list_for_each_entry(bearer, &qos->bearer_head, list) {
|
||||
if (!bearer->grant_size && num_bearers_left) {
|
||||
if (bearer_id)
|
||||
bearer_id[current_num_bearers] =
|
||||
bearer->bearer_id;
|
||||
current_num_bearers++;
|
||||
num_bearers_left--;
|
||||
}
|
||||
}
|
||||
|
||||
*num_bearers = current_num_bearers;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_get_disabled_flows);
|
||||
|
||||
/**
|
||||
* qmi_rmnet_reset_enabled_flows - reset enabled bearers for powersave
|
||||
* Needs to be called with qos_lock
|
||||
*/
|
||||
void qmi_rmnet_reset_enabled_flows(struct net_device *dev)
|
||||
{
|
||||
struct qos_info *qos;
|
||||
struct rmnet_bearer_map *bearer;
|
||||
|
||||
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
|
||||
if (!qos)
|
||||
return;
|
||||
|
||||
list_for_each_entry(bearer, &qos->bearer_head, list) {
|
||||
if (bearer->grant_size) {
|
||||
bearer->seq = 0;
|
||||
bearer->ack_req = 0;
|
||||
bearer->bytes_in_flight = 0;
|
||||
bearer->tcp_bidir = false;
|
||||
bearer->rat_switch = false;
|
||||
qmi_rmnet_watchdog_remove(bearer);
|
||||
bearer->grant_size = DEFAULT_GRANT;
|
||||
bearer->grant_thresh =
|
||||
qmi_rmnet_grant_per(DEFAULT_GRANT);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_reset_enabled_flows);
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_DFC
|
||||
bool qmi_rmnet_flow_is_low_latency(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
@@ -1088,6 +1172,7 @@ static struct rmnet_powersave_work *rmnet_work;
|
||||
static bool rmnet_work_quit;
|
||||
static bool rmnet_work_inited;
|
||||
static LIST_HEAD(ps_list);
|
||||
static u8 ps_bearer_id[32];
|
||||
|
||||
struct rmnet_powersave_work {
|
||||
struct delayed_work work;
|
||||
@@ -1276,6 +1361,81 @@ end:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void qmi_rmnet_check_stats_2(struct work_struct *work)
|
||||
{
|
||||
struct rmnet_powersave_work *real_work;
|
||||
struct qmi_info *qmi;
|
||||
u64 rxd, txd;
|
||||
u64 rx, tx;
|
||||
u8 num_bearers;
|
||||
|
||||
real_work = container_of(to_delayed_work(work),
|
||||
struct rmnet_powersave_work, work);
|
||||
|
||||
if (unlikely(!real_work->port))
|
||||
return;
|
||||
|
||||
qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
|
||||
if (unlikely(!qmi))
|
||||
return;
|
||||
|
||||
if (qmi->ps_enabled) {
|
||||
|
||||
/* Ready to accept grant */
|
||||
qmi->ps_ignore_grant = false;
|
||||
|
||||
/* Out of powersave */
|
||||
if (dfc_qmap_set_powersave(0, 0, NULL))
|
||||
goto end;
|
||||
|
||||
qmi->ps_enabled = false;
|
||||
|
||||
if (rmnet_get_powersave_notif(real_work->port))
|
||||
qmi_rmnet_ps_off_notify(real_work->port);
|
||||
|
||||
goto end;
|
||||
}
|
||||
|
||||
rmnet_get_packets(real_work->port, &rx, &tx);
|
||||
rxd = rx - real_work->old_rx_pkts;
|
||||
txd = tx - real_work->old_tx_pkts;
|
||||
real_work->old_rx_pkts = rx;
|
||||
real_work->old_tx_pkts = tx;
|
||||
|
||||
if (!rxd && !txd) {
|
||||
rmnet_lock_unlock_all_flows(real_work->port, true);
|
||||
|
||||
num_bearers = sizeof(ps_bearer_id);
|
||||
memset(ps_bearer_id, 0, sizeof(ps_bearer_id));
|
||||
rmnet_get_disabled_flows(real_work->port, &num_bearers,
|
||||
ps_bearer_id);
|
||||
|
||||
/* Enter powersave */
|
||||
if (dfc_qmap_set_powersave(1, num_bearers, ps_bearer_id)) {
|
||||
rmnet_lock_unlock_all_flows(real_work->port, false);
|
||||
goto end;
|
||||
}
|
||||
|
||||
rmnet_reset_enabled_flows(real_work->port);
|
||||
qmi->ps_ignore_grant = true;
|
||||
qmi->ps_enabled = true;
|
||||
clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
|
||||
|
||||
rmnet_lock_unlock_all_flows(real_work->port, false);
|
||||
|
||||
if (rmnet_get_powersave_notif(real_work->port))
|
||||
qmi_rmnet_ps_on_notify(real_work->port);
|
||||
|
||||
return;
|
||||
}
|
||||
end:
|
||||
rcu_read_lock();
|
||||
if (!rmnet_work_quit) {
|
||||
alarm_start_relative(&real_work->atimer, PS_INTERVAL_KT);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void qmi_rmnet_work_set_active(void *port, int status)
|
||||
{
|
||||
struct qmi_info *qmi;
|
||||
@@ -1307,15 +1467,20 @@ void qmi_rmnet_work_init(void *port)
|
||||
rmnet_ps_wq = NULL;
|
||||
return;
|
||||
}
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
|
||||
|
||||
if (dfc_qmap && dfc_ps_ext)
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work,
|
||||
qmi_rmnet_check_stats_2);
|
||||
else
|
||||
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
|
||||
|
||||
alarm_init(&rmnet_work->atimer, ALARM_BOOTTIME, qmi_rmnet_work_alarm);
|
||||
rmnet_work->port = port;
|
||||
rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
|
||||
&rmnet_work->old_tx_pkts);
|
||||
|
||||
rmnet_work_quit = false;
|
||||
qmi_rmnet_work_set_active(rmnet_work->port, 1);
|
||||
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
|
||||
qmi_rmnet_work_set_active(rmnet_work->port, 0);
|
||||
rmnet_work_inited = true;
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_init);
|
||||
@@ -1328,8 +1493,10 @@ void qmi_rmnet_work_maybe_restart(void *port)
|
||||
if (unlikely(!qmi || !rmnet_work_inited))
|
||||
return;
|
||||
|
||||
if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
|
||||
if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active)) {
|
||||
qmi->ps_ignore_grant = false;
|
||||
qmi_rmnet_work_restart(port);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
|
||||
|
||||
|
@@ -33,6 +33,10 @@ int qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt,
|
||||
int attr_len);
|
||||
void qmi_rmnet_enable_all_flows(struct net_device *dev);
|
||||
bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
|
||||
void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev, bool lock);
|
||||
void qmi_rmnet_get_disabled_flows(struct net_device *dev, u8 *num_bearers,
|
||||
u8 *bearer_id);
|
||||
void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
|
||||
#else
|
||||
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
|
||||
{
|
||||
@@ -55,6 +59,20 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_lock_unlock_all_flows(struct net_device *dev,
|
||||
bool lock)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void qmi_rmnet_get_disabled_flows(struct net_device *dev,
|
||||
u8 *num_bearers, u8 *bearer_id)
|
||||
{
|
||||
if (num_bearers)
|
||||
*num_bearers = 0;
|
||||
}
|
||||
|
||||
static void qmi_rmnet_reset_enabled_flows(struct net_device *dev);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QTI_QMI_DFC
|
||||
|
@@ -139,6 +139,7 @@ struct qmi_info {
|
||||
bool ps_enabled;
|
||||
bool dl_msg_active;
|
||||
bool ps_ignore_grant;
|
||||
int ps_ext;
|
||||
};
|
||||
|
||||
enum data_ep_type_enum_v01 {
|
||||
@@ -267,6 +268,7 @@ void wda_qmi_client_exit(void *wda_data);
|
||||
int wda_set_powersave_mode(void *wda_data, u8 enable);
|
||||
void qmi_rmnet_flush_ps_wq(void);
|
||||
void wda_qmi_client_release(void *wda_data);
|
||||
int dfc_qmap_set_powersave(u8 enable, u8 num_bearers, u8 *bearer_id);
|
||||
#else
|
||||
static inline int
|
||||
wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
|
||||
|
@@ -755,6 +755,70 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_all_flows_enabled);
|
||||
|
||||
void rmnet_lock_unlock_all_flows(void *port, bool lock)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (unlikely(!port))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
qmi_rmnet_lock_unlock_all_flows(ep->egress_dev, lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_lock_unlock_all_flows);
|
||||
|
||||
void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
u8 current_num_bearers = 0;
|
||||
u8 number_bearers_left = 0;
|
||||
u8 num_bearers_in_out;
|
||||
|
||||
if (unlikely(!port || !num_bearers))
|
||||
return;
|
||||
|
||||
number_bearers_left = *num_bearers;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
num_bearers_in_out = number_bearers_left;
|
||||
qmi_rmnet_get_disabled_flows(ep->egress_dev,
|
||||
&num_bearers_in_out,
|
||||
bearer_id ? bearer_id +
|
||||
current_num_bearers : NULL);
|
||||
current_num_bearers += num_bearers_in_out;
|
||||
number_bearers_left -= num_bearers_in_out;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
*num_bearers = current_num_bearers;
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_get_disabled_flows);
|
||||
|
||||
void rmnet_reset_enabled_flows(void *port)
|
||||
{
|
||||
struct rmnet_endpoint *ep;
|
||||
unsigned long bkt;
|
||||
|
||||
if (unlikely(!port))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
|
||||
bkt, ep, hlnode) {
|
||||
qmi_rmnet_reset_enabled_flows(ep->egress_dev);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(rmnet_reset_enabled_flows);
|
||||
|
||||
int rmnet_get_powersave_notif(void *port)
|
||||
{
|
||||
if (!port)
|
||||
|
@@ -69,6 +69,7 @@ static void rmnet_qmap_cmd_handler(struct sk_buff *skb)
|
||||
case QMAP_DFC_IND:
|
||||
case QMAP_DFC_QUERY:
|
||||
case QMAP_DFC_END_MARKER:
|
||||
case QMAP_DFC_POWERSAVE:
|
||||
rc = dfc_qmap_cmd_handler(skb);
|
||||
break;
|
||||
|
||||
|
@@ -55,6 +55,7 @@ struct net_device *rmnet_qmap_get_dev(u8 mux_id);
|
||||
#define QMAP_DFC_IND 11
|
||||
#define QMAP_DFC_QUERY 12
|
||||
#define QMAP_DFC_END_MARKER 13
|
||||
#define QMAP_DFC_POWERSAVE 14
|
||||
int dfc_qmap_cmd_handler(struct sk_buff *skb);
|
||||
|
||||
#define QMAP_LL_SWITCH 25
|
||||
|
@@ -35,6 +35,9 @@ void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
|
||||
int rmnet_get_powersave_notif(void *port);
|
||||
struct net_device *rmnet_get_real_dev(void *port);
|
||||
int rmnet_get_dlmarker_info(void *port);
|
||||
void rmnet_lock_unlock_all_flows(void *port, bool lock);
|
||||
void rmnet_get_disabled_flows(void *port, u8 *num_bearers, u8 *bearer_id);
|
||||
void rmnet_reset_enabled_flows(void *port);
|
||||
#else
|
||||
static inline void *rmnet_get_qmi_pt(void *port)
|
||||
{
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele